Browse Source

corrected link glueing for pdf links for loop

master
alpcentaur 8 months ago
parent
commit
cf3bb52684
5 changed files with 133 additions and 8 deletions
  1. +5
    -4
      main.py
  2. BIN
      spiders/__pycache__/fdb_spider.cpython-311.pyc
  3. +2
    -2
      spiders/config.yaml
  4. +110
    -0
      spiders/config.yaml.save
  5. +16
    -2
      spiders/fdb_spider.py

+ 5
- 4
main.py View File

@ -5,20 +5,21 @@ import sys
config = "spiders/config.yaml"
#list_of_fdbs = eval(sys.argv[1])
list_of_fdbs = ["giz","evergabe-online"]
#list_of_fdbs = ["giz","evergabe-online"]
list_of_fdbs = ["giz"]
# doing the crawling of government websites
spider = fdb_spider(config)
spider.download_entry_list_pages_of_funding_databases(list_of_fdbs)
#spider.download_entry_list_pages_of_funding_databases(list_of_fdbs)
#spider.find_config_parameter(list_of_fdbs)
spider.parse_entry_list_data2dictionary(list_of_fdbs)
#spider.parse_entry_list_data2dictionary(list_of_fdbs)
spider.download_entry_data_htmls(list_of_fdbs)
#spider.download_entry_data_htmls(list_of_fdbs)
spider.parse_entry_data2dictionary(list_of_fdbs)

BIN
spiders/__pycache__/fdb_spider.cpython-311.pyc View File


+ 2
- 2
spiders/config.yaml View File

@ -57,8 +57,8 @@ giz:
link1: 'https://ausschreibungen.giz.de/Satellite/company/welcome.do?method=showTable&fromSearch=1&tableSortPROJECT_RESULT=2&tableSortAttributePROJECT_RESULT=publicationDate&selectedTablePagePROJECT_RESULT='
link2: ''
jsdomain: 'None'
iteration-var-list: "[1,2,3,4,5,6,7]"
#iteration-var-list: "[1,2]"
#iteration-var-list: "[1,2,3,4,5,6,7]"
iteration-var-list: "[1,2]"
#parent: "//html//body//div//main//div//div[@class='row']//div[@class='large-12']//a[@class='c-teaser']"
parent: "//html//body//div//div//table[contains(@class, 'csx-new-table')]//tbody//tr"
child-name: "//td[3]//text()"

+ 110
- 0
spiders/config.yaml.save View File

@ -0,0 +1,110 @@
# Settings for the PEP crawler per country to crawl
# Follow the syntax and dont use tbody as it gets added by the browser (when researching xpath through inspector)
# xpath syntax: https://www.w3schools.com/xml/xpath_syntax.asp
# lxml xpath syntax: https://www.geeksforgeeks.org/web-scraping-using-lxml-and-xpath-in-python/
foerderinfo.bund.de:
domain: 'http://foerderinfo.bund.de'
entry-list:
link1: 'https://www.foerderinfo.bund.de/SiteGlobals/Forms/foerderinfo/expertensuche/Servicesuche_Formular.html?gtp=33498_list%253D'
link2: '#searchResults'
iteration-var-list: '[1,2,3,4,5]'
#iteration-var-list: '[1,2]'
parent: "//html//body//div//main//div//div[@class='row']//section[@class='l-search-result-list']//div[@class='l-search-result-list__item']"
child-name: "//div[@class='c-search-result__text-wrapper']//span[@class='c-search-result__title']/text()"
jsdomain: 'None'
javascript-link: ""
child-link: "/a[@class='c-search-result']/@href"
child-info: "//"
child-period: "/"
child-sponsor: "/"
entry:
info-1:
parent: '//html//body//form//table'
#child-name: '//html//body//form//table//tr[1]//td[2]//span'
#child-sum: '//html//body//form//table//tr[2]//td[1]//span//img'
#child-deadline: '//html/body/form/table/tr[2]/td[3]/span + label.1'
foerderinfo.bund.de-bekanntmachungen:
domain: 'http://foerderinfo.bund.de'
entry-list:
link1: 'https://www.foerderinfo.bund.de/SiteGlobals/Forms/foerderinfo/bekanntmachungen/Bekanntmachungen_Formular.html?gtp=407348_list%253D'
link2: '#searchResults'
# here jsdomain has to be specified None with this syntax, if html pages of entrylists are not depending on javascript actions
jsdomain: 'None'
iteration-var-list: '[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]'
#iteration-var-list: '[1,2]'
#parent: "//html//body//div//main//div//div[@class='row']//div[@class='large-12']//a[@class='c-teaser']"
parent: "//html//body//div//main//div[@class='row']//a[contains(@class, 'c-teaser--announcement')]"
child-name: "//div[@class='c-teaser__text-wrapper']//span[@class='c-teaser__title']/text()"
javascript-link: ""
child-link: "/@href"
child-info: "//div[@class='c-teaser__text-wrapper']//div[@class='c-teaser__text']/p/text()"
#child-period: "//div[@class='c-teaser__text-wrapper']//small[@class='c-topline']/span[@class='c-topline__item']/text()"
child-period: "//div[@class='c-teaser__text-wrapper']//small//span/time/text()"
child-sponsor: "//div[@class='c-teaser__text-wrapper']//small[@class='c-topline']//span[@class='c-topline__item']/span[@class='c-topline__category']/text()"
entry:
general:
uniform: 'FALSE'
unitrue:
parent: '//html//body//form//table'
#child-name: '//html//body//form//table//tr[1]//td[2]//span'
#child-sum: '//html//body//form//table//tr[2]//td[1]//span//img'
#child-deadline: '//html/body/form/table/tr[2]/td[3]/span + label.1'
unifalse:
wordlist: "['Mobilität', 'Energie', 'Off-grid', 'regenerative Energien', 'Solar', 'Energienetze', 'Elektromobilität']"
giz:
domain: 'https://ausschreibungen.giz.de'
entry-list:
link1: 'https://ausschreibungen.giz.de/Satellite/company/welcome.do?method=showTable&fromSearch=1&tableSortPROJECT_RESULT=2&tableSortAttributePROJECT_RESULT=publicationDate&selectedTablePagePROJECT_RESULT='
link2: ''
jsdomain: 'None'
#iteration-var-list: "[1,2,3,4,5,6,7]"
iteration-var-list: "[1,2]"
#parent: "//html//body//div//main//div//div[@class='row']//div[@class='large-12']//a[@class='c-teaser']"
parent: "//html//body//div//div//table[contains(@class, 'csx-new-table')]//tbody//tr"
child-name: "//td[3]//text()"
child-link: "//a/@href"
javascript-link: "/td[6]/a"
child-info: "/td[4]/text()[1]"
child-period: "//td[2]/abbr/text()"
#child-period: "//div[@class='c-teaser__text-wrapper']//small//span/time/text()"
child-sponsor: "/tr/td[4]/text()"
entry:
general:
uniform: 'TRUE'
unitrue:
#parent: '//html//body//form//table'
text: '/html/body/div[2]/div[4]/div/div[5]/div/table/tbody/tr/td[5]/a/@href'
text: '/html/body/div[2]/div[4]/div/div[5]/div/table/tbody/tr/td[5]/a
#child-sum: '//html//body//form//table//tr[2]//td[1]//span//img'
#child-deadline: '//html/body/form/table/tr[2]/td[3]/span + label.1'
unifalse:
wordlist: "['Mobilität', 'Energie', 'Off-grid', 'regenerative Energien', 'Solar', 'Energienetze', 'Elektromobilität']"
evergabe-online:
domain: 'https://www.evergabe-online.de/'
entry-list:
link1: 'https://www.evergabe-online.de/search.html?101-1.-searchPanel-results-searchResults-results-topToolbars-toolbars-1-span-navigator-navigation-'
link2: '-pageLink'
jsdomain: 'https://www.evergabe-online.de/search.html'
jslink1: '/html/body/div[8]/main/div[4]/div/div/div[2]/table/thead/tr[1]/td/div[2]/div/span['
jslink2: ']'
jsiteration-var-list: "[1,2, 3, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,6,7,8,9,10]"
#jsiteration-var-list: "[1,2, 3]"
iteration-var-list: "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102]"
#iteration-var-list: "[1, 2, 3]"
parent: "/html/body/div[8]/main/div[4]/div/div/div[2]/table/tbody//tr"
child-name: "//td[1]/div/a/text()"
child-link: "//td[1]/div/a/@href"
javascript-link: ""
child-info: "//td[3]/div/text()"
child-period: "//td[5]/text()"
child-sponsor: "//td[2]/div/text()"
entry:
general:
uniform: 'FALSE'
unitrue:
#parent: '//html//body//form//table'
#text: '/html/body/div[2]/div[4]/div/div[5]/div/table/tbody/tr/td[5]/a/@href'
#child-sum: '//html//body//form//table//tr[2]//td[1]//span//img'
#child-deadline: '//html/body/form/table/tr[2]/td[3]/span + label.1'
unifalse:
wordlist: "['Mobilität', 'Energie', 'Off-grid', 'regenerative Energien', 'Solar', 'Energienetze', 'Elektromobilität']"

+ 16
- 2
spiders/fdb_spider.py View File

@ -718,6 +718,14 @@ class fdb_spider(object):
file_name = "spiders/pages/" + fdb + str(i) + "/" + str(entry_id) + ".pdf"
entry_link = dictionary_entry_list[entry_id]["link"]
print('that is the child: ' + child)
if 'http' in child:
try:
response = requests.get(child)
except Exception as e:
print(child + ' does not appear to be valid pdf link to download, original message is ' + e)
if 'http' not in child:
if 'javascript' or 'js' not in entry_link and 'http' in entry_link:
try:
@ -731,15 +739,21 @@ class fdb_spider(object):
if entry_domain[-1] == '/':
pdf_link = entry_domain[:-1] + child[1:]
if entry_domain[-1] != '/':
#print('it got into OIOIOIOOIOI')
#print('before loop ', entry_domain)
cut_value = 0
for n in range(len(entry_domain)):
if entry_domain[-n] != '/':
entry_domain = entry_domain[:-1]
cut_value += 1
else:
break
entry_domain = entry_domain[:-cut_value]
#print('after loop ', entry_domain)
pdf_link = entry_domain + child[1:]
#print('the pdf link after recursive until slash: ', pdf_link)
if child[0] == '/':
if entry_domain[-1] == '/':

Loading…
Cancel
Save