/talerfrontends/blog/content.py

https://gitlab.com/taler/merchant-frontends · Python · 131 lines · 91 code · 16 blank · 24 comment · 8 complexity · 5b3ad1dec6a787ea22232f2976e1c9ff MD5 · raw file

  1. # This file is part of GNU TALER.
  2. # Copyright (C) 2014-2016 INRIA
  3. #
  4. # TALER is free software; you can redistribute it and/or modify it under the
  5. # terms of the GNU Lesser General Public License as published by the Free Software
  6. # Foundation; either version 2.1, or (at your option) any later version.
  7. #
  8. # TALER is distributed in the hope that it will be useful, but WITHOUT ANY
  9. # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
  10. # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
  11. #
  12. # You should have received a copy of the GNU Lesser General Public License along with
  13. # GNU TALER; see the file COPYING. If not, see <http://www.gnu.org/licenses/>
  14. #
  15. # @author Florian Dold
  16. """
  17. Define content and associated metadata that is served on the blog.
  18. """
  19. from collections import OrderedDict
  20. from bs4 import BeautifulSoup
  21. from pkg_resources import resource_stream, resource_filename
  22. from collections import namedtuple
  23. import logging
  24. import os
  25. import re
  26. logger = logging.getLogger(__name__)
  27. Article = namedtuple("Article", "slug title teaser main_file extra_files")
  28. articles = OrderedDict()
  29. def add_article(slug, title, teaser, main_file, extra_files=[]):
  30. articles[slug] = Article(slug, title, teaser, main_file, extra_files)
  31. def get_image_file(image):
  32. f = resource_filename("talerfrontends", os.path.join("blog/data/", image))
  33. return os.path.abspath(f)
  34. def get_article_file(article):
  35. f = resource_filename("talerfrontends", article.main_file)
  36. return os.path.basename(f)
  37. def add_from_html(resource_name, teaser_paragraph=0, title=None):
  38. """
  39. Extract information from article html.
  40. """
  41. res = resource_stream("talerfrontends", resource_name)
  42. soup = BeautifulSoup(res, 'html.parser')
  43. if title is None:
  44. title_el = soup.find("h1", attrs={"class":["chapter", "unnumbered"]})
  45. if title_el is None:
  46. logger.warn("Can't extract title from '%s'", resource_name)
  47. title = resource_name
  48. else:
  49. title = title_el.get_text().strip()
  50. slug = title.replace(" ", "_")
  51. paragraphs = soup.find_all("p")
  52. teaser = soup.find("p", attrs={"id":["teaser"]})
  53. if teaser is None:
  54. teaser = str(paragraphs[teaser_paragraph])
  55. p = re.compile("^/essay/[^/]+/data/[^/]+$")
  56. imgs = soup.find_all("img")
  57. extra_files = []
  58. for img in imgs:
  59. # We require that any image whose access is regulated is src'd
  60. # as "<slug>/data/img.png". We also need to check if the <slug>
  61. # component actually matches the article's slug
  62. if p.match(img['src']):
  63. if img['src'].split(os.sep)[2] == slug:
  64. logger.info("extra file for %s is %s" % (slug, os.path.basename(img['src'])))
  65. extra_files.append(os.path.basename(img['src']))
  66. else:
  67. logger.warning("Image src and slug don't match: '%s' != '%s'" % (img['src'].split(os.sep)[2], slug))
  68. add_article(slug, title, teaser, resource_name, extra_files)
  69. add_from_html("blog/articles/scrap1_U.0.html", 0)
  70. add_from_html("blog/articles/scrap1_U.1.html", 0)
  71. add_from_html("blog/articles/scrap1_1.html", 1)
  72. add_from_html("blog/articles/scrap1_2.html")
  73. add_from_html("blog/articles/scrap1_3.html")
  74. add_from_html("blog/articles/scrap1_4.html")
  75. add_from_html("blog/articles/scrap1_5.html")
  76. add_from_html("blog/articles/scrap1_6.html")
  77. add_from_html("blog/articles/scrap1_7.html")
  78. add_from_html("blog/articles/scrap1_8.html")
  79. add_from_html("blog/articles/scrap1_9.html")
  80. add_from_html("blog/articles/scrap1_10.html")
  81. add_from_html("blog/articles/scrap1_11.html")
  82. add_from_html("blog/articles/scrap1_12.html")
  83. add_from_html("blog/articles/scrap1_13.html", 1)
  84. add_from_html("blog/articles/scrap1_14.html")
  85. add_from_html("blog/articles/scrap1_15.html")
  86. add_from_html("blog/articles/scrap1_16.html")
  87. add_from_html("blog/articles/scrap1_17.html")
  88. add_from_html("blog/articles/scrap1_18.html")
  89. add_from_html("blog/articles/scrap1_19.html")
  90. add_from_html("blog/articles/scrap1_20.html", 1)
  91. add_from_html("blog/articles/scrap1_21.html")
  92. add_from_html("blog/articles/scrap1_22.html")
  93. add_from_html("blog/articles/scrap1_23.html")
  94. add_from_html("blog/articles/scrap1_24.html")
  95. add_from_html("blog/articles/scrap1_25.html", 1)
  96. add_from_html("blog/articles/scrap1_26.html", 1)
  97. add_from_html("blog/articles/scrap1_27.html")
  98. add_from_html("blog/articles/scrap1_28.html", 1)
  99. add_from_html("blog/articles/scrap1_29.html")
  100. add_from_html("blog/articles/scrap1_30.html", 1)
  101. add_from_html("blog/articles/scrap1_31.html", 1)
  102. add_from_html("blog/articles/scrap1_32.html")
  103. add_from_html("blog/articles/scrap1_33.html")
  104. add_from_html("blog/articles/scrap1_34.html")
  105. add_from_html("blog/articles/scrap1_35.html")
  106. add_from_html("blog/articles/scrap1_36.html")
  107. add_from_html("blog/articles/scrap1_37.html")
  108. add_from_html("blog/articles/scrap1_38.html")
  109. add_from_html("blog/articles/scrap1_39.html")
  110. add_from_html("blog/articles/scrap1_40.html")
  111. add_from_html("blog/articles/scrap1_41.html")
  112. add_from_html("blog/articles/scrap1_42.html")
  113. add_from_html("blog/articles/scrap1_43.html", 2)
  114. add_from_html("blog/articles/scrap1_46.html", 1)
  115. add_from_html("blog/articles/scrap1_47.html")