1
0

test_all.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. import os
  2. import sys
  3. import pytest
  4. from bs4 import BeautifulSoup
  5. from tinydb import where
  6. sys.path.insert(0, os.getcwd())
  7. from conf import CONFIG # noqa
  8. db_name = os.path.join(CONFIG['content_root'], 'blogit.db')
  9. if os.path.exists(db_name):
  10. import shutil
  11. shutil.rmtree(CONFIG['content_root'])
  12. if not os.path.exists(CONFIG['content_root']):
  13. os.mkdir(CONFIG['content_root'])
  14. CONFIG['content_root'] = 'test_root'
  15. ARCHIVE_SIZE = 10
  16. from blogit.blogit import (find_new_posts_and_pages, DataBase, # noqa
  17. Entry, Tag, _get_last_entries,
  18. render_archive, update_index, build)
  19. import blogit.blogit as m # noqa
  20. DB = DataBase(os.path.join(CONFIG['content_root'], 'blogit.db'))
  21. # monkey patch to local DB
  22. m.DB = DB
  23. Tag.table = DB.tags
  24. Tag.db = DB
  25. Entry.db = DB
  26. tags = ['foo', 'bar', 'baz', 'bug', 'buf']
  27. def shift(l, n):
  28. return l[-n:] + l[:-n]
  29. post = '''\
  30. ---
  31. title: Blog post {number}
  32. author: Famous author
  33. published: 2015-01-{number}
  34. tags: {tags}
  35. public: yes
  36. chronological: yes
  37. kind: writing
  38. summary: This is a summray of post {number}. Donec id elit non mi porta gravida at eget metus. Fusce dapibus
  39. ---
  40. This is the body of post {number}. Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui.
  41. This is a snippet in bash
  42. ```bash
  43. $ for i in `seq 1 10`; do
  44. echo $i
  45. done
  46. VAR="variable"
  47. echo $VAR
  48. # This is a very long long long long long long long long long long comment
  49. ```
  50. This is a snippet in python
  51. ```python
  52. def yay(top):
  53. for i in range(1, top+1):
  54. yield i
  55. for i in yay:
  56. print(i)
  57. ```
  58. '''
  59. try:
  60. os.mkdir(CONFIG['content_root'])
  61. except OSError:
  62. pass
  63. shift_factors = list([(x - 1) // 5 + 1 for x in range(1, 21)])
  64. f = open((os.path.join(CONFIG['content_root'],
  65. 'page.md')), 'w')
  66. f.write("""\
  67. ---
  68. title: example page
  69. public: yes
  70. kind: page
  71. template: about.html
  72. ---
  73. # some heading
  74. content paragraph
  75. ## heading 2
  76. some more content
  77. """)
  78. f.close()
  79. def write_file(i):
  80. f = open((os.path.join(CONFIG['content_root'],
  81. 'post{0:03d}.md'.format(i))), 'w')
  82. f.write(post.format(**{'number': i,
  83. 'tags':
  84. ','.join(shift(tags, shift_factors[i - 1])[:-1])}))
  85. [write_file(i) for i in range(1, 21)]
  86. def test_find_new_posts_and_pages():
  87. entries = [e for e in find_new_posts_and_pages(DB)]
  88. assert len(entries)
  89. pages = [e[1] for e in entries if str(e[0]).endswith('page.md')]
  90. assert len(pages)
  91. assert len(DB.posts.all()) == 20
  92. new_entries = [e for e in find_new_posts_and_pages(DB)]
  93. # no new posts sould be found
  94. assert len(DB.posts.all()) == 20
  95. assert len(new_entries) == 0
  96. [e[0].tags for e in entries]
  97. foo = DB.tags.search(where('name') == 'foo')
  98. assert foo[0]['post_ids'] == list(range(1, 16))
  99. def test_tags():
  100. entries = [
  101. Entry.entry_from_db(os.path.join(CONFIG['content_root'],
  102. e.get('filename')), e.doc_id)
  103. for e in DB.posts.all()]
  104. tags = DB.tags.all() # noqa
  105. t = entries[0].tags
  106. assert len(t) == 4
  107. assert t[0].name == 'buf'
  108. new_tag = Tag('buggg')
  109. new_tag.posts = [100, 100]
  110. with pytest.raises(ValueError):
  111. new_tag.posts = "This should not work"
  112. with pytest.raises(ValueError):
  113. new_tag.posts = 1 # This should not either
  114. new_tag.posts = [100]
  115. with pytest.raises(ValueError):
  116. list(new_tag.entries)
  117. def test_slug():
  118. t = Tag('foo:bar')
  119. assert t.slug == "foo-bar"
  120. t = Tag('foo:;bar,.,baz')
  121. assert t.slug == "foo-bar-baz"
  122. """
  123. def test_tag_posts():
  124. example = Tag('example')
  125. example.posts = [1,2,3]
  126. assert [1,2,3] == example.posts
  127. Filter = Query()
  128. t = DB.tags.get(Filter.post_ids == [1, 2, 3])
  129. assert t['post_ids'] == [1, 2, 3]
  130. example = Tag('example')
  131. example.posts = [4,5,6]
  132. rv = DB.tags.search(where('name') == 'example')
  133. assert rv[0]['post_ids'] == range(1, 7)
  134. def test_tag_entries():
  135. t = Tag('breaks')
  136. t.posts = [10000]
  137. with pytest.raises(ValueError):
  138. list(t.entries)
  139. tf = Tag(u'example')
  140. entries = list(tf.entries)
  141. assert len(entries)
  142. """
  143. def test_tag_post_ids():
  144. m = """\
  145. ---
  146. title: Blog post {}
  147. author: Famous author
  148. published: 2015-01-{}
  149. tags: tag1, tag2
  150. public: yes
  151. chronological: yes
  152. kind: writing
  153. summary: This is a summary
  154. ---
  155. """
  156. assert len(DB.posts.all()) == 20
  157. with open(os.path.join(CONFIG['content_root'], 'e.md'), 'w') as f:
  158. f.write(m.format(25, 25))
  159. with open(os.path.join(CONFIG['content_root'], 'f.md'), 'w') as f:
  160. f.write(m.format(27, 27))
  161. e1 = Entry(os.path.join(CONFIG['content_root'], 'e.md'))
  162. e1.tags
  163. e2 = Entry(os.path.join(CONFIG['content_root'], 'f.md'))
  164. e2.tags
  165. assert len(DB.posts.all()) == 22
  166. #assert e1.tags[0].posts == e2.tags[0].posts
  167. e1.render()
  168. [t.render() for t in e1.tags]
  169. assert len(DB.posts.all()) == 22
  170. def test_tag_render():
  171. p = DB.posts.get(doc_id=1)
  172. entry = Entry.entry_from_db(
  173. os.path.join(CONFIG['content_root'], p.get('filename')), 1)
  174. tags = entry.tags
  175. assert list(map(str, tags)) == ['buf', 'foo', 'bar', 'baz']
  176. # the entries are wrongly sorted, need to look at that
  177. assert tags[0].render()
  178. assert len(list(tags[0].entries))
  179. assert len(DB.posts.all()) == 22
  180. def test_get_last_entries():
  181. assert len(DB.posts.all()) == 22
  182. le, all = _get_last_entries(DB, 10)
  183. assert [e.id for e in le] == list(range(22, 12, -1))
  184. def test_render_archive():
  185. entries = [Entry.entry_from_db(
  186. os.path.join(CONFIG['content_root'], e.get('filename')), e.doc_id) for e in
  187. DB.posts.all()]
  188. render_archive(entries[ARCHIVE_SIZE:])
  189. # pages should not be in the archive
  190. with open(os.path.join(CONFIG['output_to'], 'archive', 'index.html')) as html_index:
  191. soup = BeautifulSoup(html_index.read(), 'html.parser')
  192. assert len(soup.find_all(class_='post')) == 12
  193. def test_render_index():
  194. le, all_entries = _get_last_entries(DB, 10)
  195. update_index(le)
  196. with open(os.path.join(CONFIG['output_to'], 'index.html')) as html_index:
  197. soup = BeautifulSoup(html_index.read(), 'html.parser')
  198. assert len(soup.find_all(class_='clearfix entry')) == 10
  199. def test_build():
  200. DB._db.purge_tables()
  201. build(CONFIG)
  202. # check that the index really contains the last 10 entries
  203. with open(os.path.join(CONFIG['output_to'], 'index.html')) as html_index:
  204. soup = BeautifulSoup(html_index.read(), 'html.parser')
  205. assert len(soup.find_all(class_='clearfix entry')) == 10
  206. # pages should not be in the archive
  207. with open(os.path.join(CONFIG['output_to'], 'archive', 'index.html')) as html_index:
  208. soup = BeautifulSoup(html_index.read(), 'html.parser')
  209. assert len(soup.find_all(class_='post')) == 12
  210. with open(os.path.join(CONFIG['output_to'], 'tags', 'foo', 'index.html')) as tag_foo:
  211. soup = BeautifulSoup(tag_foo.read(), 'html.parser')
  212. titles = [c.a.string for c in
  213. soup.find_all(class_="clearfix entry")]
  214. for title, idx in zip(titles, list(range(15, 0, -1))):
  215. assert title.split()[-1] == str(idx)