1
0

test_all.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. import os
  2. import sys
  3. import pytest
  4. from bs4 import BeautifulSoup
  5. from tinydb import where
  6. sys.path.insert(0, os.getcwd())
  7. from conf import CONFIG
  8. db_name = os.path.join(CONFIG['content_root'], 'blogit.db')
  9. if os.path.exists(db_name):
  10. import shutil
  11. shutil.rmtree(CONFIG['content_root'])
  12. if not os.path.exists(CONFIG['content_root']):
  13. os.mkdir(CONFIG['content_root'])
  14. CONFIG['content_root'] = 'test_root'
  15. ARCHIVE_SIZE = 10
  16. from blogit.blogit import (find_new_posts_and_pages, DataBase,
  17. Entry, Tag, _get_last_entries,
  18. render_archive, update_index, build)
  19. import blogit.blogit as m
  20. DB = DataBase(os.path.join(CONFIG['content_root'], 'blogit.db'))
  21. # monkey patch to local DB
  22. m.DB = DB
  23. Tag.table = DB.tags
  24. Tag.db = DB
  25. Entry.db = DB
  26. tags = ['foo', 'bar', 'baz', 'bug', 'buf']
  27. shift = lambda l, n: l[-n:] + l[:-n]
  28. post = '''\
  29. ---
  30. title: Blog post {number}
  31. author: Famous author
  32. published: 2015-01-{number}
  33. tags: {tags}
  34. public: yes
  35. chronological: yes
  36. kind: writing
  37. summary: This is a summray of post {number}. Donec id elit non mi porta gravida at eget metus. Fusce dapibus
  38. ---
  39. This is the body of post {number}. Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui.
  40. This is a snippet in bash
  41. ```bash
  42. $ for i in `seq 1 10`; do
  43. echo $i
  44. done
  45. VAR="variable"
  46. echo $VAR
  47. # This is a very long long long long long long long long long long comment
  48. ```
  49. This is a snippet in python
  50. ```python
  51. def yay(top):
  52. for i in range(1, top+1):
  53. yield i
  54. for i in yay:
  55. print(i)
  56. ```
  57. '''
  58. try:
  59. os.mkdir(CONFIG['content_root'])
  60. except OSError:
  61. pass
  62. shift_factors = list([(x - 1) // 5 +1 for x in range(1,21)])
  63. f = open((os.path.join(CONFIG['content_root'],
  64. 'page.md')), 'w')
  65. f.write("""\
  66. ---
  67. title: example page
  68. public: yes
  69. kind: page
  70. template: about.html
  71. ---
  72. # some heading
  73. content paragraph
  74. ## heading 2
  75. some more content
  76. """)
  77. f.close()
  78. def write_file(i):
  79. f = open((os.path.join(CONFIG['content_root'],
  80. 'post{0:03d}.md'.format(i))), 'w')
  81. f.write(post.format(**{'number': i,
  82. 'tags':
  83. ','.join(shift(tags, shift_factors[i-1])[:-1])}))
  84. [write_file(i) for i in range(1, 21)]
  85. def test_find_new_posts_and_pages():
  86. entries = [e for e in find_new_posts_and_pages(DB)]
  87. assert len(entries)
  88. pages = [e[1] for e in entries if str(e[0]).endswith('page.md')]
  89. assert len(pages)
  90. assert len(DB.posts.all()) == 20
  91. new_entries = [e for e in find_new_posts_and_pages(DB)]
  92. # no new posts sould be found
  93. assert len(DB.posts.all()) == 20
  94. assert len(new_entries) == 0
  95. [e[0].tags for e in entries]
  96. foo = DB.tags.search(where('name')=='foo')
  97. assert foo[0]['post_ids'] == list(range(1, 16))
  98. def test_tags():
  99. entries = [
  100. Entry.entry_from_db(os.path.join(CONFIG['content_root'],
  101. e.get('filename')), e.eid) for e in DB.posts.all()]
  102. tags = DB.tags.all()
  103. t = entries[0].tags
  104. assert len(t) == 4
  105. assert t[0].name == 'buf'
  106. new_tag = Tag('buggg')
  107. new_tag.posts = [100,100]
  108. with pytest.raises(ValueError):
  109. new_tag.posts = "This should not work"
  110. with pytest.raises(ValueError):
  111. new_tag.posts = 1 # This should not either
  112. new_tag.posts = [100]
  113. with pytest.raises(ValueError):
  114. list(new_tag.entries)
  115. def test_slug():
  116. t = Tag('foo:bar')
  117. assert t.slug == "foo-bar"
  118. t = Tag('foo:;bar,.,baz')
  119. assert t.slug == "foo-bar-baz"
  120. """
  121. def test_tag_posts():
  122. example = Tag('example')
  123. example.posts = [1,2,3]
  124. assert [1,2,3] == example.posts
  125. Filter = Query()
  126. t = DB.tags.get(Filter.post_ids == [1, 2, 3])
  127. assert t['post_ids'] == [1, 2, 3]
  128. example = Tag('example')
  129. example.posts = [4,5,6]
  130. rv = DB.tags.search(where('name') == 'example')
  131. assert rv[0]['post_ids'] == range(1, 7)
  132. def test_tag_entries():
  133. t = Tag('breaks')
  134. t.posts = [10000]
  135. with pytest.raises(ValueError):
  136. list(t.entries)
  137. tf = Tag(u'example')
  138. entries = list(tf.entries)
  139. assert len(entries)
  140. """
  141. def test_tag_post_ids():
  142. m = """\
  143. ---
  144. title: Blog post {}
  145. author: Famous author
  146. published: 2015-01-{}
  147. tags: tag1, tag2
  148. public: yes
  149. chronological: yes
  150. kind: writing
  151. summary: This is a summary
  152. ---
  153. """
  154. assert len(DB.posts.all()) == 20
  155. with open(os.path.join(CONFIG['content_root'], 'e.md'), 'w') as f:
  156. f.write(m.format(25, 25))
  157. with open(os.path.join(CONFIG['content_root'], 'f.md'), 'w') as f:
  158. f.write(m.format(27, 27))
  159. e1 = Entry(os.path.join(CONFIG['content_root'], 'e.md'))
  160. e1.tags
  161. e2 = Entry(os.path.join(CONFIG['content_root'], 'f.md'))
  162. e2.tags
  163. assert len(DB.posts.all()) == 22
  164. assert e1.tags[0].posts == e2.tags[0].posts
  165. e1.render()
  166. [t.render() for t in e1.tags]
  167. assert len(DB.posts.all()) == 22
  168. def test_tag_render():
  169. p = DB.posts.get(eid=1)
  170. entry = Entry.entry_from_db(
  171. os.path.join(CONFIG['content_root'], p.get('filename')), 1)
  172. #entry = Entry(os.path.join(CONFIG['content_root'], 'post1.md'))
  173. tags = entry.tags
  174. assert list(map(str, tags)) == ['buf', 'foo', 'bar', 'baz']
  175. # the entries are wrongly sorted, need to look at that
  176. assert tags[0].render()
  177. assert len(list(tags[0].entries))
  178. assert len(DB.posts.all()) == 22
  179. def test_get_last_entries():
  180. assert len(DB.posts.all()) == 22
  181. le, all = _get_last_entries(DB, 10)
  182. assert [e.id for e in le] == list(range(22, 12, -1))
  183. def test_render_archive():
  184. entries = [Entry.entry_from_db(
  185. os.path.join(CONFIG['content_root'], e.get('filename')), e.eid) for e in
  186. DB.posts.all()]
  187. render_archive(entries[ARCHIVE_SIZE:])
  188. # pages should not be in the archive
  189. with open(os.path.join(CONFIG['output_to'], 'archive', 'index.html')) as html_index:
  190. soup = BeautifulSoup(html_index.read(), 'html.parser')
  191. assert len(soup.find_all(class_='post')) == 12
  192. def test_render_index():
  193. le, all_entries = _get_last_entries(DB, 10)
  194. update_index(le)
  195. with open(os.path.join(CONFIG['output_to'], 'index.html')) as html_index:
  196. soup = BeautifulSoup(html_index.read(), 'html.parser')
  197. assert len(soup.find_all(class_='clearfix entry')) == 10
  198. def test_build():
  199. DB._db.purge_tables()
  200. build(CONFIG)
  201. # check that the index really contains the last 10 entries
  202. with open(os.path.join(CONFIG['output_to'], 'index.html')) as html_index:
  203. soup = BeautifulSoup(html_index.read(), 'html.parser')
  204. assert len(soup.find_all(class_='clearfix entry')) == 10
  205. # pages should not be in the archive
  206. with open(os.path.join(CONFIG['output_to'], 'archive', 'index.html')) as html_index:
  207. soup = BeautifulSoup(html_index.read(), 'html.parser')
  208. assert len(soup.find_all(class_='post')) == 12
  209. with open(os.path.join(CONFIG['output_to'], 'tags', 'foo', 'index.html')) as tag_foo:
  210. soup = BeautifulSoup(tag_foo.read(), 'html.parser')
  211. titles = [c.a.string for c in
  212. soup.find_all(class_="clearfix entry")]
  213. for title, idx in zip(titles, list(range(15, 0, -1))):
  214. assert title.split()[-1] == str(idx)