automated Pipeline for parsing profiles of politically exposed persons (PEP) into Wikidata
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

186 lines
7.0 KiB

  1. import os
  2. import yaml
  3. import json
  4. import urllib.request, urllib.error, urllib.parse
  5. from lxml import etree
  6. import lxml.html
  7. import lxml.html.soupparser
  8. class members_parliament_crawler(object):
  9. def __init__(self, config_file):
  10. with open(config_file, "r") as stream:
  11. try:
  12. self.config = yaml.safe_load(stream)
  13. except yaml.YAMLError as exc:
  14. print(exc)
  15. # input list of countries in form of ['nicaragua', 'honduras', .. , 'mexico']
  16. def download_member_list_pages_of_countries(self, list_of_countries):
  17. # download only html pages of the countries specified in input
  18. for country in list_of_countries:
  19. for key in self.config:
  20. if key in list_of_countries:
  21. try:
  22. member_list = self.config.get(key).get("memberList")
  23. except Exception as e:
  24. print(
  25. "There is a problem with the entry memberList in the config.yaml - the original error message is:",
  26. e,
  27. )
  28. try:
  29. member_list_link = member_list.get("link")
  30. except Exception as e:
  31. print(
  32. "No memberListLink defined in config.yaml - the original error message is:",
  33. e,
  34. )
  35. # download the html page of the List of Members
  36. response = urllib.request.urlopen(member_list_link)
  37. web_content = response.read().decode("UTF-8")
  38. # save interim results to files
  39. f = open("crawlers/pages/" + key + "MemberList.html", "w+")
  40. f.write(webContent)
  41. f.close
  42. def parse_member_list_data2dictionary(self, list_of_countries):
  43. for country in list_of_countries:
  44. try:
  45. # use soupparser to handle broken html
  46. tree = lxml.html.soupparser.parse(
  47. "crawlers/pages/" + country + "MemberList.html"
  48. )
  49. # for e in tree.iter():
  50. #
  51. # print(e.tag)
  52. #
  53. # for e in tree.xpath('//html//body//form//table//tr//td//table//tr'):
  54. #
  55. # #print(etree.tostring(e).decode())
  56. dictionary_member_list = {}
  57. country_conf = self.config.get(country)
  58. country_domain = country_conf.get("domain")
  59. country_conf_member_list = country_conf.get("memberList")
  60. country_conf_member_list_parent = country_conf_member_list.get("parent")
  61. country_conf_member_list_child_name = country_conf_member_list.get("child-name")
  62. country_conf_member_list_child_link = country_conf_member_list.get("child-link")
  63. for n in range(len(tree.xpath(country_conf_member_list_parent))):
  64. name = tree.xpath(
  65. country_conf_member_list_parent
  66. + "["
  67. + str(n)
  68. + "]"
  69. + country_conf_member_list_child_name
  70. )
  71. link = tree.xpath(
  72. country_conf_member_list_parent
  73. + "["
  74. + str(n)
  75. + "]"
  76. + country_conf_member_list_child_link
  77. )
  78. if len(name) > 0:
  79. dictionary_member_list[n] = {}
  80. dictionary_member_list[n]["name"] = name[0]
  81. if country_domain in link[0]:
  82. dictionary_member_list[n]["link"] = link[0]
  83. if country_domain not in link[0]:
  84. dictionary_member_list[n]["link"] = country_domain + link[0]
  85. except Exception as e:
  86. print(
  87. "parsing the html did not work. Possibly you first have to downloadMemberListPagesOfCountries(). The original error message is:",
  88. e,
  89. )
  90. # save interim results to files
  91. f = open("crawlers/output/" + country + "MemberList.txt", "w+")
  92. f.write(str(dictionary_member_list))
  93. f.close
  94. def download_member_data_htmls(self, list_of_countries):
  95. for country in list_of_countries:
  96. f = open("crawlers/output/" + country + "MemberList.txt")
  97. text = f.read()
  98. dictionary_member_list = eval(text)
  99. for member_id in dictionary_member_list:
  100. member_link = dictionary_member_list[member_id]["link"]
  101. # download the html page of the Member
  102. response = urllib.request.urlopen(member_link)
  103. web_content = response.read().decode("UTF-8")
  104. # save interim results to files
  105. file_name = "crawlers/pages/" + country + "/" + str(member_id) + ".html"
  106. os.makedirs(os.path.dirname(file_name), exist_ok=True)
  107. f = open(file_name, "w+")
  108. f.write(web_content)
  109. f.close
  110. def parse_member_data2dictionary(self, list_of_countries):
  111. for country in list_of_countries:
  112. print("started to parse data of member of " + country + " ..")
  113. f = open("crawlers/output/" + country + "MemberList.txt")
  114. text = f.read()
  115. dictionary_member_list = eval(text)
  116. country_conf = self.config.get(country)
  117. country_domain = country_conf.get("domain")
  118. country_conf_member = country_conf.get("member")
  119. country_conf_member_info1 = country_conf_member.get("info-1")
  120. country_conf_member_info1_parent = country_conf_member_info1.get("parent")
  121. country_conf_member_info1_child_political_party = country_conf_member_info1.get(
  122. "child-politicalParty"
  123. )
  124. for member_id in dictionary_member_list:
  125. print(
  126. "started to parse data of member with name "
  127. + dictionary_member_list[member_id]["name"]
  128. + " .."
  129. )
  130. file_name = "crawlers/pages/" + country + "/" + str(member_id) + ".html"
  131. tree = lxml.html.soupparser.parse(file_name)
  132. political_party = tree.xpath(
  133. country_conf_member_info1_parent
  134. + country_conf_member_info1_child_political_party
  135. )
  136. print("oi", political_party)
  137. if len(political_party) > 0:
  138. dictionary_member_list[member_id]["political party"] = political_party[
  139. 0
  140. ]
  141. f = open("crawlers/output/" + country + "MemberList.txt", "w+")
  142. f.write(str(dictionary_member_list))
  143. f.close