test_all.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. import os
  2. import sys
  3. import pytest
  4. from bs4 import BeautifulSoup
  5. from tinydb import Query, where
  6. sys.path.insert(0, os.getcwd())
  7. from conf import CONFIG
  8. db_name = os.path.join(CONFIG['content_root'], 'blogit.db')
  9. if os.path.exists(db_name):
  10. import shutil
  11. shutil.rmtree(CONFIG['content_root'])
  12. if not os.path.exists(CONFIG['content_root']):
  13. import pdb; pdb.set_trace()
  14. os.mkdir(CONFIG['content_root'])
  15. CONFIG['content_root'] = 'test_root'
  16. ARCHIVE_SIZE = 10
  17. from blogit.blogit import (find_new_posts_and_pages, DataBase,
  18. Entry, Tag, _sort_entries, _get_last_entries,
  19. render_archive, update_index, build)
  20. import blogit.blogit as m
  21. DB = DataBase(os.path.join(CONFIG['content_root'], 'blogit.db'))
  22. # monkey patch to local DB
  23. m.DB = DB
  24. Tag.table = DB.tags
  25. Tag.db = DB
  26. Entry.db = DB
  27. tags = ['foo', 'bar', 'baz', 'bug', 'buf']
  28. shift = lambda l, n: l[-n:] + l[:-n]
  29. post = '''\
  30. ---
  31. title: Blog post {number}
  32. author: Famous author
  33. published: 2015-01-{number}
  34. tags: {tags}
  35. public: yes
  36. chronological: yes
  37. kind: writing
  38. summary: This is a summray of post {number}. Donec id elit non mi porta gravida at eget metus. Fusce dapibus
  39. ---
  40. This is the body of post {number}. Donec id elit non mi porta gravida at eget metus. Fusce dapibus, tellus ac cursus commodo, tortor mauris condimentum nibh, ut fermentum massa justo sit amet risus. Etiam porta sem malesuada magna mollis euismod. Donec sed odio dui.
  41. This is a snippet in bash
  42. ```bash
  43. $ for i in `seq 1 10`; do
  44. echo $i
  45. done
  46. VAR="variable"
  47. echo $VAR
  48. # This is a very long long long long long long long long long long comment
  49. ```
  50. This is a snippet in python
  51. ```python
  52. def yay(top):
  53. for i in range(1, top+1):
  54. yield i
  55. for i in yay:
  56. print(i)
  57. ```
  58. '''
  59. try:
  60. os.mkdir(CONFIG['content_root'])
  61. except OSError:
  62. pass
  63. shift_factors = list([(x - 1) // 5 +1 for x in range(1,21)])
  64. f = open((os.path.join(CONFIG['content_root'],
  65. 'page.md')), 'w')
  66. f.write("""\
  67. ---
  68. title: example page
  69. public: yes
  70. kind: page
  71. template: about.html
  72. ---
  73. # some heading
  74. content paragraph
  75. ## heading 2
  76. some more content
  77. """)
  78. f.close()
  79. def write_file(i):
  80. f = open((os.path.join(CONFIG['content_root'],
  81. 'post{}.md'.format(i))), 'w')
  82. f.write(post.format(**{'number': i,
  83. 'tags':
  84. ','.join(shift(tags, shift_factors[i-1])[:-1])}))
  85. [write_file(i) for i in range(1, 21)]
  86. def test_find_new_posts_and_pages():
  87. entries = [e for e in find_new_posts_and_pages(DB)]
  88. assert len(entries)
  89. pages = [e[1] for e in entries if str(e[0]).endswith('page.md')]
  90. assert len(pages)
  91. assert len(DB.posts.all()) == 20
  92. new_entries = [e for e in find_new_posts_and_pages(DB)]
  93. # no new posts sould be found
  94. assert len(DB.posts.all()) == 20
  95. assert len(new_entries) == 0
  96. [e[0].tags for e in entries]
  97. foo = DB.tags.search(where('name')=='foo')
  98. assert foo[0]['post_ids'] == list(range(1, 16))
  99. def test_tags():
  100. entries = [
  101. Entry.entry_from_db(os.path.join(CONFIG['content_root'],
  102. e.get('filename')), e.eid) for e in DB.posts.all()]
  103. tags = DB.tags.all()
  104. t = entries[0].tags
  105. assert len(t) == 4
  106. assert t[0].name == 'buf'
  107. new_tag = Tag('buggg')
  108. new_tag.posts = [100,100]
  109. with pytest.raises(ValueError):
  110. new_tag.posts = "This should not work"
  111. with pytest.raises(ValueError):
  112. new_tag.posts = 1 # This should not either
  113. new_tag.posts = [100]
  114. with pytest.raises(ValueError):
  115. list(new_tag.entries)
  116. def test_slug():
  117. t = Tag('foo:bar')
  118. assert t.slug == "foo-bar"
  119. t = Tag('foo:;bar,.,baz')
  120. assert t.slug == "foo-bar-baz"
  121. """
  122. def test_tag_posts():
  123. example = Tag('example')
  124. example.posts = [1,2,3]
  125. assert [1,2,3] == example.posts
  126. Filter = Query()
  127. t = DB.tags.get(Filter.post_ids == [1, 2, 3])
  128. assert t['post_ids'] == [1, 2, 3]
  129. example = Tag('example')
  130. example.posts = [4,5,6]
  131. rv = DB.tags.search(where('name') == 'example')
  132. assert rv[0]['post_ids'] == range(1, 7)
  133. def test_tag_entries():
  134. t = Tag('breaks')
  135. t.posts = [10000]
  136. with pytest.raises(ValueError):
  137. list(t.entries)
  138. tf = Tag(u'example')
  139. entries = list(tf.entries)
  140. assert len(entries)
  141. """
  142. def test_tag_post_ids():
  143. m ="""\
  144. ---
  145. title: Blog post {}
  146. author: Famous author
  147. published: 2015-01-{}
  148. tags: tag1, tag2
  149. public: yes
  150. chronological: yes
  151. kind: writing
  152. summary: This is a summary
  153. ---
  154. """
  155. assert len(DB.posts.all()) == 20
  156. with open(os.path.join(CONFIG['content_root'], 'e.md'), 'w') as f:
  157. f.write(m.format(25, 25))
  158. with open(os.path.join(CONFIG['content_root'], 'f.md'), 'w') as f:
  159. f.write(m.format(27, 27))
  160. e1 = Entry(os.path.join(CONFIG['content_root'], 'e.md'))
  161. e1.tags
  162. e2 = Entry(os.path.join(CONFIG['content_root'], 'f.md'))
  163. e2.tags
  164. assert len(DB.posts.all()) == 22
  165. assert e1.tags[0].posts == e2.tags[0].posts
  166. e1.render()
  167. [t.render() for t in e1.tags]
  168. l = _sort_entries([e2, e1])
  169. assert l == [e2, e1]
  170. assert len(DB.posts.all()) == 22
  171. def test_tag_render():
  172. p = DB.posts.get(eid=1)
  173. entry = Entry.entry_from_db(
  174. os.path.join(CONFIG['content_root'], p.get('filename')), 1)
  175. #entry = Entry(os.path.join(CONFIG['content_root'], 'post1.md'))
  176. tags = entry.tags
  177. assert list(map(str, tags)) == ['buf', 'foo', 'bar', 'baz']
  178. # the entries are wrongly sorted, need to look at that
  179. assert tags[0].render()
  180. assert len(list(tags[0].entries))
  181. assert len(DB.posts.all()) == 22
  182. def test_get_last_entries():
  183. assert len(DB.posts.all()) == 22
  184. le = _get_last_entries(DB, 10)
  185. assert [e.id for e in le] == list(range(22, 12, -1))
  186. def test_render_archive():
  187. entries = [Entry.entry_from_db(
  188. os.path.join(CONFIG['content_root'], e.get('filename')), e.eid) for e in
  189. DB.posts.all()]
  190. render_archive(_sort_entries(entries, reversed=True)[ARCHIVE_SIZE:])
  191. # pages should not be in the archive
  192. with open(os.path.join(CONFIG['output_to'], 'archive', 'index.html')) as html_index:
  193. soup = BeautifulSoup(html_index.read(), 'html.parser')
  194. assert len(soup.find_all(class_='post')) == 12
  195. def test_render_index():
  196. update_index(_get_last_entries(DB, 10))
  197. with open(os.path.join(CONFIG['output_to'], 'index.html')) as html_index:
  198. soup = BeautifulSoup(html_index.read(), 'html.parser')
  199. assert len(soup.find_all(class_='clearfix entry')) == 10
  200. def test_build():
  201. DB._db.purge_tables()
  202. build(CONFIG)
  203. # check that the index really contains the last 10 entries
  204. with open(os.path.join(CONFIG['output_to'], 'index.html')) as html_index:
  205. soup = BeautifulSoup(html_index.read(), 'html.parser')
  206. assert len(soup.find_all(class_='clearfix entry')) == 10
  207. # pages should not be in the archive
  208. with open(os.path.join(CONFIG['output_to'], 'archive', 'index.html')) as html_index:
  209. soup = BeautifulSoup(html_index.read(), 'html.parser')
  210. assert len(soup.find_all(class_='post')) == 12
  211. with open(os.path.join(CONFIG['output_to'], 'tags', 'foo', 'index.html')) as tag_foo:
  212. soup = BeautifulSoup(tag_foo.read(), 'html.parser')
  213. titles = [c.a.string for c in
  214. soup.find_all(class_="clearfix entry")]
  215. for title, idx in zip(titles, list(range(15, 0, -1))):
  216. assert title.split()[-1] == str(idx)