You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2766 lines
96 KiB

4 years ago
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. # =============================================================================
  4. # Version: 2.55 (March 23, 2016)
  5. # Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
  6. #
  7. # Contributors:
  8. # Antonio Fuschetto (fuschett@aol.com)
  9. # Leonardo Souza (lsouza@amtera.com.br)
  10. # Juan Manuel Caicedo (juan@cavorite.com)
  11. # Humberto Pereira (begini@gmail.com)
  12. # Siegfried-A. Gevatter (siegfried@gevatter.com)
  13. # Pedro Assis (pedroh2306@gmail.com)
  14. # Wim Muskee (wimmuskee@gmail.com)
  15. # Radics Geza (radicsge@gmail.com)
  16. #
  17. # =============================================================================
  18. # Copyright (c) 2011-2016. Giuseppe Attardi (attardi@di.unipi.it).
  19. # =============================================================================
  20. # This file is part of Tanl.
  21. #
  22. # Tanl is free software; you can redistribute it and/or modify it
  23. # under the terms of the GNU General Public License, version 3,
  24. # as published by the Free Software Foundation.
  25. #
  26. # Tanl is distributed in the hope that it will be useful,
  27. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  28. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29. # GNU General Public License for more details.
  30. #
  31. # You should have received a copy of the GNU General Public License
  32. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  33. # =============================================================================
  34. """Wikipedia Extractor:
  35. Extracts and cleans text from a Wikipedia database dump and stores output in a
  36. number of files of similar size in a given directory.
  37. Each file will contain several documents in the format:
  38. <doc id="" url="" title="">
  39. ...
  40. </doc>
  41. Template expansion requires preprocesssng first the whole dump and
  42. collecting template definitions.
  43. """
  44. import sys
  45. import argparse
  46. import bz2
  47. import codecs
  48. import cgi
  49. import fileinput
  50. import logging
  51. import os.path
  52. import re # TODO use regex when it will be standard
  53. import time
  54. import urllib
  55. from cStringIO import StringIO
  56. from htmlentitydefs import name2codepoint
  57. from itertools import izip, izip_longest
  58. from multiprocessing import Queue, Process, Value, cpu_count
  59. from timeit import default_timer
  60. # ===========================================================================
  61. # Program version
  62. version = '2.55'
  63. ## PARAMS ####################################################################
  64. ##
  65. # Defined in <siteinfo>
  66. # We include as default Template, when loading external template file.
  67. knownNamespaces = set(['Template'])
  68. ##
  69. # Keys for Template and Module namespaces
  70. templateKeys = set(['10', '828'])
  71. ##
  72. # The namespace used for template definitions
  73. # It is the name associated with namespace key=10 in the siteinfo header.
  74. templateNamespace = ''
  75. templatePrefix = ''
  76. ##
  77. # The namespace used for module definitions
  78. # It is the name associated with namespace key=828 in the siteinfo header.
  79. moduleNamespace = ''
  80. ##
  81. # Recognize only these namespaces
  82. # w: Internal links to the Wikipedia
  83. # wiktionary: Wiki dictionary
  84. # wikt: shortcut for Wiktionary
  85. #
  86. acceptedNamespaces = ['w', 'wiktionary', 'wikt']
  87. ##
  88. # Drop these elements from article text
  89. #
  90. discardElements = [
  91. 'gallery', 'timeline', 'noinclude', 'pre',
  92. 'table', 'tr', 'td', 'th', 'caption', 'div',
  93. 'form', 'input', 'select', 'option', 'textarea',
  94. 'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
  95. 'ref', 'references', 'img', 'imagemap', 'source', 'small',
  96. 'sub', 'sup'
  97. ]
  98. # This is obtained from <siteinfo>
  99. urlbase = ''
  100. def get_url(uid):
  101. return "%s?curid=%s" % (urlbase, uid)
  102. # =========================================================================
  103. #
  104. # MediaWiki Markup Grammar
  105. # https://www.mediawiki.org/wiki/Preprocessor_ABNF
  106. # xml-char = %x9 / %xA / %xD / %x20-D7FF / %xE000-FFFD / %x10000-10FFFF
  107. # sptab = SP / HTAB
  108. # ; everything except ">" (%x3E)
  109. # attr-char = %x9 / %xA / %xD / %x20-3D / %x3F-D7FF / %xE000-FFFD / %x10000-10FFFF
  110. # literal = *xml-char
  111. # title = wikitext-L3
  112. # part-name = wikitext-L3
  113. # part-value = wikitext-L3
  114. # part = ( part-name "=" part-value ) / ( part-value )
  115. # parts = [ title *( "|" part ) ]
  116. # tplarg = "{{{" parts "}}}"
  117. # template = "{{" parts "}}"
  118. # link = "[[" wikitext-L3 "]]"
  119. # comment = "<!--" literal "-->"
  120. # unclosed-comment = "<!--" literal END
  121. # ; the + in the line-eating-comment rule was absent between MW 1.12 and MW 1.22
  122. # line-eating-comment = LF LINE-START *SP +( comment *SP ) LINE-END
  123. # attr = *attr-char
  124. # nowiki-element = "<nowiki" attr ( "/>" / ( ">" literal ( "</nowiki>" / END ) ) )
  125. # wikitext-L2 = heading / wikitext-L3 / *wikitext-L2
  126. # wikitext-L3 = literal / template / tplarg / link / comment /
  127. # line-eating-comment / unclosed-comment / xmlish-element /
  128. # *wikitext-L3
  129. # ------------------------------------------------------------------------------
  130. selfClosingTags = ('br', 'hr', 'nobr', 'ref', 'references', 'nowiki')
  131. # These tags are dropped, keeping their content.
  132. # handle 'a' separately, depending on keepLinks
  133. ignoredTags = (
  134. 'abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'em',
  135. 'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd', 'nowiki',
  136. 'p', 'plaintext', 's', 'span', 'strike', 'strong',
  137. 'tt', 'u', 'var'
  138. )
  139. placeholder_tags = {'math': 'formula', 'code': 'codice'}
  140. def normalizeTitle(title):
  141. """Normalize title"""
  142. # remove leading/trailing whitespace and underscores
  143. title = title.strip(' _')
  144. # replace sequences of whitespace and underscore chars with a single space
  145. title = re.sub(r'[\s_]+', ' ', title)
  146. m = re.match(r'([^:]*):(\s*)(\S(?:.*))', title)
  147. if m:
  148. prefix = m.group(1)
  149. if m.group(2):
  150. optionalWhitespace = ' '
  151. else:
  152. optionalWhitespace = ''
  153. rest = m.group(3)
  154. ns = normalizeNamespace(prefix)
  155. if ns in knownNamespaces:
  156. # If the prefix designates a known namespace, then it might be
  157. # followed by optional whitespace that should be removed to get
  158. # the canonical page name
  159. # (e.g., "Category: Births" should become "Category:Births").
  160. title = ns + ":" + ucfirst(rest)
  161. else:
  162. # No namespace, just capitalize first letter.
  163. # If the part before the colon is not a known namespace, then we
  164. # must not remove the space after the colon (if any), e.g.,
  165. # "3001: The_Final_Odyssey" != "3001:The_Final_Odyssey".
  166. # However, to get the canonical page name we must contract multiple
  167. # spaces into one, because
  168. # "3001: The_Final_Odyssey" != "3001: The_Final_Odyssey".
  169. title = ucfirst(prefix) + ":" + optionalWhitespace + ucfirst(rest)
  170. else:
  171. # no namespace, just capitalize first letter
  172. title = ucfirst(title)
  173. return title
  174. def unescape(text):
  175. """
  176. Removes HTML or XML character references and entities from a text string.
  177. :param text The HTML (or XML) source text.
  178. :return The plain text, as a Unicode string, if necessary.
  179. """
  180. def fixup(m):
  181. text = m.group(0)
  182. code = m.group(1)
  183. try:
  184. if text[1] == "#": # character reference
  185. if text[2] == "x":
  186. return unichr(int(code[1:], 16))
  187. else:
  188. return unichr(int(code))
  189. else: # named entity
  190. return unichr(name2codepoint[code])
  191. except:
  192. return text # leave as is
  193. return re.sub("&#?(\w+);", fixup, text)
  194. # Match HTML comments
  195. # The buggy template {{Template:T}} has a comment terminating with just "->"
  196. comment = re.compile(r'<!--.*?-->', re.DOTALL)
  197. # Match ignored tags
  198. ignored_tag_patterns = []
  199. def ignoreTag(tag):
  200. left = re.compile(r'<%s\b.*?>' % tag, re.IGNORECASE | re.DOTALL) # both <ref> and <reference>
  201. right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
  202. ignored_tag_patterns.append((left, right))
  203. for tag in ignoredTags:
  204. ignoreTag(tag)
  205. # Match selfClosing HTML tags
  206. selfClosing_tag_patterns = [
  207. re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
  208. ]
  209. # Match HTML placeholder tags
  210. placeholder_tag_patterns = [
  211. (re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
  212. repl) for tag, repl in placeholder_tags.items()
  213. ]
  214. # Match preformatted lines
  215. preformatted = re.compile(r'^ .*?$')
  216. # Match external links (space separates second optional parameter)
  217. externalLink = re.compile(r'\[\w+[^ ]*? (.*?)]')
  218. externalLinkNoAnchor = re.compile(r'\[\w+[&\]]*\]')
  219. # Matches bold/italic
  220. bold_italic = re.compile(r"'''''(.*?)'''''")
  221. bold = re.compile(r"'''(.*?)'''")
  222. italic_quote = re.compile(r"''\"([^\"]*?)\"''")
  223. italic = re.compile(r"''(.*?)''")
  224. quote_quote = re.compile(r'""([^"]*?)""')
  225. # Matches space
  226. spaces = re.compile(r' {2,}')
  227. # Matches dots
  228. dots = re.compile(r'\.{4,}')
  229. # ======================================================================
  230. class Template(list):
  231. """
  232. A Template is a list of TemplateText or TemplateArgs
  233. """
  234. @classmethod
  235. def parse(cls, body):
  236. tpl = Template()
  237. # we must handle nesting, s.a.
  238. # {{{1|{{PAGENAME}}}
  239. # {{{italics|{{{italic|}}}
  240. # {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|
  241. #
  242. start = 0
  243. for s, e in findMatchingBraces(body, 3):
  244. tpl.append(TemplateText(body[start:s]))
  245. tpl.append(TemplateArg(body[s + 3:e - 3]))
  246. start = e
  247. tpl.append(TemplateText(body[start:])) # leftover
  248. return tpl
  249. def subst(self, params, extractor, depth=0):
  250. # We perform parameter substitutions recursively.
  251. # We also limit the maximum number of iterations to avoid too long or
  252. # even endless loops (in case of malformed input).
  253. # :see: http://meta.wikimedia.org/wiki/Help:Expansion#Distinction_between_variables.2C_parser_functions.2C_and_templates
  254. #
  255. # Parameter values are assigned to parameters in two (?) passes.
  256. # Therefore a parameter name in a template can depend on the value of
  257. # another parameter of the same template, regardless of the order in
  258. # which they are specified in the template call, for example, using
  259. # Template:ppp containing "{{{{{{p}}}}}}", {{ppp|p=q|q=r}} and even
  260. # {{ppp|q=r|p=q}} gives r, but using Template:tvvv containing
  261. # "{{{{{{{{{p}}}}}}}}}", {{tvvv|p=q|q=r|r=s}} gives s.
  262. # logging.debug('subst tpl (%d, %d) %s', len(extractor.frame), depth, self)
  263. if depth > extractor.maxParameterRecursionLevels:
  264. extractor.recursion_exceeded_3_errs += 1
  265. return ''
  266. return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
  267. def __str__(self):
  268. return ''.join([unicode(x) for x in self])
  269. class TemplateText(unicode):
  270. """Fixed text of template"""
  271. def subst(self, params, extractor, depth):
  272. return self
  273. class TemplateArg(object):
  274. """
  275. parameter to a template.
  276. Has a name and a default value, both of which are Templates.
  277. """
  278. def __init__(self, parameter):
  279. """
  280. :param parameter: the parts of a tplarg.
  281. """
  282. # the parameter name itself might contain templates, e.g.:
  283. # appointe{{#if:{{{appointer14|}}}|r|d}}14|
  284. # 4|{{{{{subst|}}}CURRENTYEAR}}
  285. # any parts in a tplarg after the first (the parameter default) are
  286. # ignored, and an equals sign in the first part is treated as plain text.
  287. # logging.debug('TemplateArg %s', parameter)
  288. parts = splitParts(parameter)
  289. self.name = Template.parse(parts[0])
  290. if len(parts) > 1:
  291. # This parameter has a default value
  292. self.default = Template.parse(parts[1])
  293. else:
  294. self.default = None
  295. def __str__(self):
  296. if self.default:
  297. return '{{{%s|%s}}}' % (self.name, self.default)
  298. else:
  299. return '{{{%s}}}' % self.name
  300. def subst(self, params, extractor, depth):
  301. """
  302. Substitute value for this argument from dict :param params:
  303. Use :param extractor: to evaluate expressions for name and default.
  304. Limit substitution to the maximun :param depth:.
  305. """
  306. # the parameter name itself might contain templates, e.g.:
  307. # appointe{{#if:{{{appointer14|}}}|r|d}}14|
  308. paramName = self.name.subst(params, extractor, depth + 1)
  309. paramName = extractor.expandTemplates(paramName)
  310. res = ''
  311. if paramName in params:
  312. res = params[paramName] # use parameter value specified in template invocation
  313. elif self.default: # use the default value
  314. defaultValue = self.default.subst(params, extractor, depth + 1)
  315. res = extractor.expandTemplates(defaultValue)
  316. # logging.debug('subst arg %d %s -> %s' % (depth, paramName, res))
  317. return res
  318. # ======================================================================
  319. substWords = 'subst:|safesubst:'
  320. class Extractor(object):
  321. """
  322. An extraction task on a article.
  323. """
  324. ##
  325. # Whether to preserve links in output
  326. keepLinks = False
  327. ##
  328. # Whether to preserve section titles
  329. keepSections = True
  330. ##
  331. # Whether to preserve lists
  332. keeplists = False
  333. ##
  334. # Whether to output HTML instead of text
  335. toHTML = False
  336. ##
  337. # Whether to expand templates
  338. expand_templates = True
  339. def __init__(self, id, title, lines):
  340. """
  341. :param id: id of page.
  342. :param title: tutle of page.
  343. :param lines: a list of lines.
  344. """
  345. self.id = id
  346. self.title = title
  347. self.text = ''.join(lines)
  348. self.magicWords = MagicWords()
  349. self.frame = []
  350. self.recursion_exceeded_1_errs = 0 # template recursion within expandTemplates()
  351. self.recursion_exceeded_2_errs = 0 # template recursion within expandTemplate()
  352. self.recursion_exceeded_3_errs = 0 # parameter recursion
  353. self.template_title_errs = 0
  354. def extract(self, out):
  355. """
  356. :param out: a memory file.
  357. """
  358. logging.debug("%s\t%s", self.id, self.title)
  359. url = get_url(self.id)
  360. header = '<doc id="%s" url="%s" title="%s">\n' % (self.id, url, self.title)
  361. # Separate header from text with a newline.
  362. header += self.title + '\n\n'
  363. header = header.encode('utf-8')
  364. self.magicWords['pagename'] = self.title
  365. self.magicWords['fullpagename'] = self.title
  366. self.magicWords['currentyear'] = time.strftime('%Y')
  367. self.magicWords['currentmonth'] = time.strftime('%m')
  368. self.magicWords['currentday'] = time.strftime('%d')
  369. self.magicWords['currenthour'] = time.strftime('%H')
  370. self.magicWords['currenttime'] = time.strftime('%H:%M:%S')
  371. text = self.clean()
  372. footer = "\n</doc>\n"
  373. out.write(header)
  374. for line in compact(text):
  375. out.write(line.encode('utf-8'))
  376. out.write('\n')
  377. out.write(footer)
  378. errs = (self.template_title_errs,
  379. self.recursion_exceeded_1_errs,
  380. self.recursion_exceeded_2_errs,
  381. self.recursion_exceeded_3_errs)
  382. if any(errs):
  383. logging.warn("Template errors in article '%s' (%s): title(%d) recursion(%d, %d, %d)",
  384. self.title, self.id, *errs)
  385. def clean(self):
  386. """
  387. Transforms wiki markup. If the command line flag --escapedoc is set then the text is also escaped
  388. @see https://www.mediawiki.org/wiki/Help:Formatting
  389. """
  390. text = self.text
  391. self.text = '' # save memory
  392. if Extractor.expand_templates:
  393. # expand templates
  394. # See: http://www.mediawiki.org/wiki/Help:Templates
  395. text = self.expandTemplates(text)
  396. else:
  397. # Drop transclusions (template, parser functions)
  398. text = dropNested(text, r'{{', r'}}')
  399. # Drop tables
  400. text = dropNested(text, r'{\|', r'\|}')
  401. # replace external links
  402. text = replaceExternalLinks(text)
  403. # replace internal links
  404. text = replaceInternalLinks(text)
  405. # drop MagicWords behavioral switches
  406. text = magicWordsRE.sub('', text)
  407. # ############### Process HTML ###############
  408. # turn into HTML, except for the content of <syntaxhighlight>
  409. res = ''
  410. cur = 0
  411. for m in syntaxhighlight.finditer(text):
  412. res += unescape(text[cur:m.start()]) + m.group(1)
  413. cur = m.end()
  414. text = res + unescape(text[cur:])
  415. # Handle bold/italic/quote
  416. if self.toHTML:
  417. text = bold_italic.sub(r'<b>\1</b>', text)
  418. text = bold.sub(r'<b>\1</b>', text)
  419. text = italic.sub(r'<i>\1</i>', text)
  420. else:
  421. text = bold_italic.sub(r'\1', text)
  422. text = bold.sub(r'\1', text)
  423. text = italic_quote.sub(r'"\1"', text)
  424. text = italic.sub(r'"\1"', text)
  425. text = quote_quote.sub(r'"\1"', text)
  426. # residuals of unbalanced quotes
  427. text = text.replace("'''", '').replace("''", '"')
  428. # Collect spans
  429. spans = []
  430. # Drop HTML comments
  431. for m in comment.finditer(text):
  432. spans.append((m.start(), m.end()))
  433. # Drop self-closing tags
  434. for pattern in selfClosing_tag_patterns:
  435. for m in pattern.finditer(text):
  436. spans.append((m.start(), m.end()))
  437. # Drop ignored tags
  438. for left, right in ignored_tag_patterns:
  439. for m in left.finditer(text):
  440. spans.append((m.start(), m.end()))
  441. for m in right.finditer(text):
  442. spans.append((m.start(), m.end()))
  443. # Bulk remove all spans
  444. text = dropSpans(spans, text)
  445. # Drop discarded elements
  446. for tag in discardElements:
  447. text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
  448. if not self.toHTML:
  449. # Turn into text what is left (&amp;nbsp;) and <syntaxhighlight>
  450. text = unescape(text)
  451. # Expand placeholders
  452. for pattern, placeholder in placeholder_tag_patterns:
  453. index = 1
  454. for match in pattern.finditer(text):
  455. text = text.replace(match.group(), '%s_%d' % (placeholder, index))
  456. index += 1
  457. text = text.replace('<<', u'«').replace('>>', u'»')
  458. #############################################
  459. # Cleanup text
  460. text = text.replace('\t', ' ')
  461. text = spaces.sub(' ', text)
  462. text = dots.sub('...', text)
  463. text = re.sub(u' (,:\.\)\]»)', r'\1', text)
  464. text = re.sub(u'(\[\(«) ', r'\1', text)
  465. text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
  466. text = text.replace(',,', ',').replace(',.', '.')
  467. if escape_doc:
  468. text = cgi.escape(text)
  469. return text
  470. # ----------------------------------------------------------------------
  471. # Expand templates
  472. maxTemplateRecursionLevels = 30
  473. maxParameterRecursionLevels = 10
  474. # check for template beginning
  475. reOpen = re.compile('(?<!{){{(?!{)', re.DOTALL)
  476. def expandTemplates(self, wikitext):
  477. """
  478. :param wikitext: the text to be expanded.
  479. Templates are frequently nested. Occasionally, parsing mistakes may
  480. cause template insertion to enter an infinite loop, for instance when
  481. trying to instantiate Template:Country
  482. {{country_{{{1}}}|{{{2}}}|{{{2}}}|size={{{size|}}}|name={{{name|}}}}}
  483. which is repeatedly trying to insert template 'country_', which is
  484. again resolved to Template:Country. The straightforward solution of
  485. keeping track of templates that were already inserted for the current
  486. article would not work, because the same template may legally be used
  487. more than once, with different parameters in different parts of the
  488. article. Therefore, we limit the number of iterations of nested
  489. template inclusion.
  490. """
  491. # Test template expansion at:
  492. # https://en.wikipedia.org/wiki/Special:ExpandTemplates
  493. res = ''
  494. if len(self.frame) >= self.maxTemplateRecursionLevels:
  495. self.recursion_exceeded_1_errs += 1
  496. return res
  497. # logging.debug('<expandTemplates ' + str(len(self.frame)))
  498. cur = 0
  499. # look for matching {{...}}
  500. for s, e in findMatchingBraces(wikitext, 2):
  501. res += wikitext[cur:s] + self.expandTemplate(wikitext[s + 2:e - 2])
  502. cur = e
  503. # leftover
  504. res += wikitext[cur:]
  505. # logging.debug(' expandTemplates> %d %s', len(self.frame), res)
  506. return res
  507. def templateParams(self, parameters):
  508. """
  509. Build a dictionary with positional or name key to expanded parameters.
  510. :param parameters: the parts[1:] of a template, i.e. all except the title.
  511. """
  512. templateParams = {}
  513. if not parameters:
  514. return templateParams
  515. logging.debug('<templateParams: %s', '|'.join(parameters))
  516. # Parameters can be either named or unnamed. In the latter case, their
  517. # name is defined by their ordinal position (1, 2, 3, ...).
  518. unnamedParameterCounter = 0
  519. # It's legal for unnamed parameters to be skipped, in which case they
  520. # will get default values (if available) during actual instantiation.
  521. # That is {{template_name|a||c}} means parameter 1 gets
  522. # the value 'a', parameter 2 value is not defined, and parameter 3 gets
  523. # the value 'c'. This case is correctly handled by function 'split',
  524. # and does not require any special handling.
  525. for param in parameters:
  526. # Spaces before or after a parameter value are normally ignored,
  527. # UNLESS the parameter contains a link (to prevent possible gluing
  528. # the link to the following text after template substitution)
  529. # Parameter values may contain "=" symbols, hence the parameter
  530. # name extends up to the first such symbol.
  531. # It is legal for a parameter to be specified several times, in
  532. # which case the last assignment takes precedence. Example:
  533. # "{{t|a|b|c|2=B}}" is equivalent to "{{t|a|B|c}}".
  534. # Therefore, we don't check if the parameter has been assigned a
  535. # value before, because anyway the last assignment should override
  536. # any previous ones.
  537. # FIXME: Don't use DOTALL here since parameters may be tags with
  538. # attributes, e.g. <div class="templatequotecite">
  539. # Parameters may span several lines, like:
  540. # {{Reflist|colwidth=30em|refs=
  541. # &lt;ref name=&quot;Goode&quot;&gt;Title&lt;/ref&gt;
  542. # The '=' might occurr within an HTML attribute:
  543. # "&lt;ref name=value"
  544. # but we stop at first.
  545. m = re.match(' *([^= ]*?) *=(.*)', param, re.DOTALL)
  546. if m:
  547. # This is a named parameter. This case also handles parameter
  548. # assignments like "2=xxx", where the number of an unnamed
  549. # parameter ("2") is specified explicitly - this is handled
  550. # transparently.
  551. parameterName = m.group(1).strip()
  552. parameterValue = m.group(2)
  553. if ']]' not in parameterValue: # if the value does not contain a link, trim whitespace
  554. parameterValue = parameterValue.strip()
  555. templateParams[parameterName] = parameterValue
  556. else:
  557. # this is an unnamed parameter
  558. unnamedParameterCounter += 1
  559. if ']]' not in param: # if the value does not contain a link, trim whitespace
  560. param = param.strip()
  561. templateParams[str(unnamedParameterCounter)] = param
  562. logging.debug(' templateParams> %s', '|'.join(templateParams.values()))
  563. return templateParams
  564. def expandTemplate(self, body):
  565. """Expands template invocation.
  566. :param body: the parts of a template.
  567. :see http://meta.wikimedia.org/wiki/Help:Expansion for an explanation
  568. of the process.
  569. See in particular: Expansion of names and values
  570. http://meta.wikimedia.org/wiki/Help:Expansion#Expansion_of_names_and_values
  571. For most parser functions all names and values are expanded,
  572. regardless of what is relevant for the result. The branching functions
  573. (#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are exceptions.
  574. All names in a template call are expanded, and the titles of the
  575. tplargs in the template body, after which it is determined which
  576. values must be expanded, and for which tplargs in the template body
  577. the first part (default).
  578. In the case of a tplarg, any parts beyond the first are never
  579. expanded. The possible name and the value of the first part is
  580. expanded if the title does not match a name in the template call.
  581. :see code for braceSubstitution at
  582. https://doc.wikimedia.org/mediawiki-core/master/php/html/Parser_8php_source.html#3397:
  583. """
  584. # template = "{{" parts "}}"
  585. # Templates and tplargs are decomposed in the same way, with pipes as
  586. # separator, even though eventually any parts in a tplarg after the first
  587. # (the parameter default) are ignored, and an equals sign in the first
  588. # part is treated as plain text.
  589. # Pipes inside inner templates and tplargs, or inside double rectangular
  590. # brackets within the template or tplargs are not taken into account in
  591. # this decomposition.
  592. # The first part is called title, the other parts are simply called parts.
  593. # If a part has one or more equals signs in it, the first equals sign
  594. # determines the division into name = value. Equals signs inside inner
  595. # templates and tplargs, or inside double rectangular brackets within the
  596. # part are not taken into account in this decomposition. Parts without
  597. # equals sign are indexed 1, 2, .., given as attribute in the <name> tag.
  598. if len(self.frame) >= self.maxTemplateRecursionLevels:
  599. self.recursion_exceeded_2_errs += 1
  600. # logging.debug(' INVOCATION> %d %s', len(self.frame), body)
  601. return ''
  602. logging.debug('INVOCATION %d %s', len(self.frame), body)
  603. parts = splitParts(body)
  604. # title is the portion before the first |
  605. logging.debug('TITLE %s', parts[0].strip())
  606. title = self.expandTemplates(parts[0].strip())
  607. # SUBST
  608. # Apply the template tag to parameters without
  609. # substituting into them, e.g.
  610. # {{subst:t|a{{{p|q}}}b}} gives the wikitext start-a{{{p|q}}}b-end
  611. # @see https://www.mediawiki.org/wiki/Manual:Substitution#Partial_substitution
  612. subst = False
  613. if re.match(substWords, title, re.IGNORECASE):
  614. title = re.sub(substWords, '', title, 1, re.IGNORECASE)
  615. subst = True
  616. if title.lower() in self.magicWords.values:
  617. return self.magicWords[title.lower()]
  618. # Parser functions
  619. # The first argument is everything after the first colon.
  620. # It has been evaluated above.
  621. colon = title.find(':')
  622. if colon > 1:
  623. funct = title[:colon]
  624. parts[0] = title[colon + 1:].strip() # side-effect (parts[0] not used later)
  625. # arguments after first are not evaluated
  626. ret = callParserFunction(funct, parts, self.frame)
  627. return self.expandTemplates(ret)
  628. title = fullyQualifiedTemplateTitle(title)
  629. if not title:
  630. self.template_title_errs += 1
  631. return ''
  632. redirected = redirects.get(title)
  633. if redirected:
  634. title = redirected
  635. # get the template
  636. if title in templateCache:
  637. template = templateCache[title]
  638. elif title in templates:
  639. template = Template.parse(templates[title])
  640. # add it to cache
  641. templateCache[title] = template
  642. del templates[title]
  643. else:
  644. # The page being included could not be identified
  645. return ''
  646. logging.debug('TEMPLATE %s: %s', title, template)
  647. # tplarg = "{{{" parts "}}}"
  648. # parts = [ title *( "|" part ) ]
  649. # part = ( part-name "=" part-value ) / ( part-value )
  650. # part-name = wikitext-L3
  651. # part-value = wikitext-L3
  652. # wikitext-L3 = literal / template / tplarg / link / comment /
  653. # line-eating-comment / unclosed-comment /
  654. # xmlish-element / *wikitext-L3
  655. # A tplarg may contain other parameters as well as templates, e.g.:
  656. # {{{text|{{{quote|{{{1|{{error|Error: No text given}}}}}}}}}}}
  657. # hence no simple RE like this would work:
  658. # '{{{((?:(?!{{{).)*?)}}}'
  659. # We must use full CF parsing.
  660. # the parameter name itself might be computed, e.g.:
  661. # {{{appointe{{#if:{{{appointer14|}}}|r|d}}14|}}}
  662. # Because of the multiple uses of double-brace and triple-brace
  663. # syntax, expressions can sometimes be ambiguous.
  664. # Precedence rules specifed here:
  665. # http://www.mediawiki.org/wiki/Preprocessor_ABNF#Ideal_precedence
  666. # resolve ambiguities like this:
  667. # {{{{ }}}} -> { {{{ }}} }
  668. # {{{{{ }}}}} -> {{ {{{ }}} }}
  669. #
  670. # :see: https://en.wikipedia.org/wiki/Help:Template#Handling_parameters
  671. params = parts[1:]
  672. if not subst:
  673. # Evaluate parameters, since they may contain templates, including
  674. # the symbol "=".
  675. # {{#ifexpr: {{{1}}} = 1 }}
  676. params = [self.expandTemplates(p) for p in params]
  677. # build a dict of name-values for the parameter values
  678. params = self.templateParams(params)
  679. # Perform parameter substitution
  680. # extend frame before subst, since there may be recursion in default
  681. # parameter value, e.g. {{OTRS|celebrative|date=April 2015}} in article
  682. # 21637542 in enwiki.
  683. self.frame.append((title, params))
  684. instantiated = template.subst(params, self)
  685. logging.debug('instantiated %d %s', len(self.frame), instantiated)
  686. value = self.expandTemplates(instantiated)
  687. self.frame.pop()
  688. logging.debug(' INVOCATION> %s %d %s', title, len(self.frame), value)
  689. return value
  690. # ----------------------------------------------------------------------
  691. # parameter handling
  692. def splitParts(paramsList):
  693. """
  694. :param paramsList: the parts of a template or tplarg.
  695. Split template parameters at the separator "|".
  696. separator "=".
  697. Template parameters often contain URLs, internal links, text or even
  698. template expressions, since we evaluate templates outside in.
  699. This is required for cases like:
  700. {{#if: {{{1}}} | {{lc:{{{1}}} | "parameter missing"}}
  701. Parameters are separated by "|" symbols. However, we
  702. cannot simply split the string on "|" symbols, since these
  703. also appear inside templates and internal links, e.g.
  704. {{if:|
  705. |{{#if:the president|
  706. |{{#if:|
  707. [[Category:Hatnote templates|A{{PAGENAME}}]]
  708. }}
  709. }}
  710. }}
  711. We split parts at the "|" symbols that are not inside any pair
  712. {{{...}}}, {{...}}, [[...]], {|...|}.
  713. """
  714. # Must consider '[' as normal in expansion of Template:EMedicine2:
  715. # #ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}
  716. # as part of:
  717. # {{#ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}}} ped/180{{#if: |~}}]
  718. # should handle both tpl arg like:
  719. # 4|{{{{{subst|}}}CURRENTYEAR}}
  720. # and tpl parameters like:
  721. # ||[[Category:People|{{#if:A|A|{{PAGENAME}}}}]]
  722. sep = '|'
  723. parameters = []
  724. cur = 0
  725. for s, e in findMatchingBraces(paramsList):
  726. par = paramsList[cur:s].split(sep)
  727. if par:
  728. if parameters:
  729. # portion before | belongs to previous parameter
  730. parameters[-1] += par[0]
  731. if len(par) > 1:
  732. # rest are new parameters
  733. parameters.extend(par[1:])
  734. else:
  735. parameters = par
  736. elif not parameters:
  737. parameters = [''] # create first param
  738. # add span to last previous parameter
  739. parameters[-1] += paramsList[s:e]
  740. cur = e
  741. # leftover
  742. par = paramsList[cur:].split(sep)
  743. if par:
  744. if parameters:
  745. # portion before | belongs to previous parameter
  746. parameters[-1] += par[0]
  747. if len(par) > 1:
  748. # rest are new parameters
  749. parameters.extend(par[1:])
  750. else:
  751. parameters = par
  752. # logging.debug('splitParts %s %s\nparams: %s', sep, paramsList, str(parameters))
  753. return parameters
  754. def findMatchingBraces(text, ldelim=0):
  755. """
  756. :param ldelim: number of braces to match. 0 means match [[]], {{}} and {{{}}}.
  757. """
  758. # Parsing is done with respect to pairs of double braces {{..}} delimiting
  759. # a template, and pairs of triple braces {{{..}}} delimiting a tplarg.
  760. # If double opening braces are followed by triple closing braces or
  761. # conversely, this is taken as delimiting a template, with one left-over
  762. # brace outside it, taken as plain text. For any pattern of braces this
  763. # defines a set of templates and tplargs such that any two are either
  764. # separate or nested (not overlapping).
  765. # Unmatched double rectangular closing brackets can be in a template or
  766. # tplarg, but unmatched double rectangular opening brackets cannot.
  767. # Unmatched double or triple closing braces inside a pair of
  768. # double rectangular brackets are treated as plain text.
  769. # Other formulation: in ambiguity between template or tplarg on one hand,
  770. # and a link on the other hand, the structure with the rightmost opening
  771. # takes precedence, even if this is the opening of a link without any
  772. # closing, so not producing an actual link.
  773. # In the case of more than three opening braces the last three are assumed
  774. # to belong to a tplarg, unless there is no matching triple of closing
  775. # braces, in which case the last two opening braces are are assumed to
  776. # belong to a template.
  777. # We must skip individual { like in:
  778. # {{#ifeq: {{padleft:|1|}} | { | | &nbsp;}}
  779. # We must resolve ambiguities like this:
  780. # {{{{ }}}} -> { {{{ }}} }
  781. # {{{{{ }}}}} -> {{ {{{ }}} }}
  782. # {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|...}}
  783. # Handle:
  784. # {{{{{|safesubst:}}}#Invoke:String|replace|{{{1|{{{{{|safesubst:}}}PAGENAME}}}}}|%s+%([^%(]-%)$||plain=false}}
  785. # as well as expressions with stray }:
  786. # {{{link|{{ucfirst:{{{1}}}}}} interchange}}}
  787. if ldelim: # 2-3
  788. reOpen = re.compile('[{]{%d,}' % ldelim) # at least ldelim
  789. reNext = re.compile('[{]{2,}|}{2,}') # at least 2
  790. else:
  791. reOpen = re.compile('{{2,}|\[{2,}')
  792. reNext = re.compile('{{2,}|}{2,}|\[{2,}|]{2,}') # at least 2
  793. cur = 0
  794. while True:
  795. m1 = reOpen.search(text, cur)
  796. if not m1:
  797. return
  798. lmatch = m1.end() - m1.start()
  799. if m1.group()[0] == '{':
  800. stack = [lmatch] # stack of opening braces lengths
  801. else:
  802. stack = [-lmatch] # negative means [
  803. end = m1.end()
  804. while True:
  805. m2 = reNext.search(text, end)
  806. if not m2:
  807. return # unbalanced
  808. end = m2.end()
  809. brac = m2.group()[0]
  810. lmatch = m2.end() - m2.start()
  811. if brac == '{':
  812. stack.append(lmatch)
  813. elif brac == '}':
  814. while stack:
  815. openCount = stack.pop() # opening span
  816. if openCount == 0: # illegal unmatched [[
  817. continue
  818. if lmatch >= openCount:
  819. lmatch -= openCount
  820. if lmatch <= 1: # either close or stray }
  821. break
  822. else:
  823. # put back unmatched
  824. stack.append(openCount - lmatch)
  825. break
  826. if not stack:
  827. yield m1.start(), end - lmatch
  828. cur = end
  829. break
  830. elif len(stack) == 1 and 0 < stack[0] < ldelim:
  831. # ambiguous {{{{{ }}} }}
  832. yield m1.start() + stack[0], end
  833. cur = end
  834. break
  835. elif brac == '[': # [[
  836. stack.append(-lmatch)
  837. else: # ]]
  838. while stack and stack[-1] < 0: # matching [[
  839. openCount = -stack.pop()
  840. if lmatch >= openCount:
  841. lmatch -= openCount
  842. if lmatch <= 1: # either close or stray ]
  843. break
  844. else:
  845. # put back unmatched (negative)
  846. stack.append(lmatch - openCount)
  847. break
  848. if not stack:
  849. yield m1.start(), end - lmatch
  850. cur = end
  851. break
  852. # unmatched ]] are discarded
  853. cur = end
  854. def findBalanced(text, openDelim=['[['], closeDelim=[']]']):
  855. """
  856. Assuming that text contains a properly balanced expression using
  857. :param openDelim: as opening delimiters and
  858. :param closeDelim: as closing delimiters.
  859. :return: an iterator producing pairs (start, end) of start and end
  860. positions in text containing a balanced expression.
  861. """
  862. openPat = '|'.join([re.escape(x) for x in openDelim])
  863. # pattern for delimiters expected after each opening delimiter
  864. afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in izip(openDelim, closeDelim)}
  865. stack = []
  866. start = 0
  867. cur = 0
  868. # end = len(text)
  869. startSet = False
  870. startPat = re.compile(openPat)
  871. nextPat = startPat
  872. while True:
  873. next = nextPat.search(text, cur)
  874. if not next:
  875. return
  876. if not startSet:
  877. start = next.start()
  878. startSet = True
  879. delim = next.group(0)
  880. if delim in openDelim:
  881. stack.append(delim)
  882. nextPat = afterPat[delim]
  883. else:
  884. opening = stack.pop()
  885. # assert opening == openDelim[closeDelim.index(next.group(0))]
  886. if stack:
  887. nextPat = afterPat[stack[-1]]
  888. else:
  889. yield start, next.end()
  890. nextPat = startPat
  891. start = next.end()
  892. startSet = False
  893. cur = next.end()
  894. # ----------------------------------------------------------------------
  895. # Modules
  896. # Only minimal support
  897. # FIXME: import Lua modules.
  898. def if_empty(*rest):
  899. """
  900. This implements If_empty from English Wikipedia module:
  901. <title>Module:If empty</title>
  902. <ns>828</ns>
  903. <text>local p = {}
  904. function p.main(frame)
  905. local args = require('Module:Arguments').getArgs(frame, {wrappers = 'Template:If empty', removeBlanks = false})
  906. -- For backwards compatibility reasons, the first 8 parameters can be unset instead of being blank,
  907. -- even though there's really no legitimate use case for this. At some point, this will be removed.
  908. local lowestNil = math.huge
  909. for i = 8,1,-1 do
  910. if args[i] == nil then
  911. args[i] = ''
  912. lowestNil = i
  913. end
  914. end
  915. for k,v in ipairs(args) do
  916. if v ~= '' then
  917. if lowestNil &lt; k then
  918. -- If any uses of this template depend on the behavior above, add them to a tracking category.
  919. -- This is a rather fragile, convoluted, hacky way to do it, but it ensures that this module's output won't be modified
  920. -- by it.
  921. frame:extensionTag('ref', '[[Category:Instances of Template:If_empty missing arguments]]', {group = 'TrackingCategory'})
  922. frame:extensionTag('references', '', {group = 'TrackingCategory'})
  923. end
  924. return v
  925. end
  926. end
  927. end
  928. return p </text>
  929. """
  930. for arg in rest:
  931. if arg:
  932. return arg
  933. return ''
  934. modules = {
  935. 'convert': {
  936. 'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
  937. },
  938. 'If empty': {
  939. 'main': if_empty
  940. }
  941. }
  942. # ----------------------------------------------------------------------
  943. # variables
  944. class MagicWords(object):
  945. """
  946. One copy in each Extractor.
  947. @see https://doc.wikimedia.org/mediawiki-core/master/php/MagicWord_8php_source.html
  948. """
  949. names = [
  950. '!',
  951. 'currentmonth',
  952. 'currentmonth1',
  953. 'currentmonthname',
  954. 'currentmonthnamegen',
  955. 'currentmonthabbrev',
  956. 'currentday',
  957. 'currentday2',
  958. 'currentdayname',
  959. 'currentyear',
  960. 'currenttime',
  961. 'currenthour',
  962. 'localmonth',
  963. 'localmonth1',
  964. 'localmonthname',
  965. 'localmonthnamegen',
  966. 'localmonthabbrev',
  967. 'localday',
  968. 'localday2',
  969. 'localdayname',
  970. 'localyear',
  971. 'localtime',
  972. 'localhour',
  973. 'numberofarticles',
  974. 'numberoffiles',
  975. 'numberofedits',
  976. 'articlepath',
  977. 'pageid',
  978. 'sitename',
  979. 'server',
  980. 'servername',
  981. 'scriptpath',
  982. 'stylepath',
  983. 'pagename',
  984. 'pagenamee',
  985. 'fullpagename',
  986. 'fullpagenamee',
  987. 'namespace',
  988. 'namespacee',
  989. 'namespacenumber',
  990. 'currentweek',
  991. 'currentdow',
  992. 'localweek',
  993. 'localdow',
  994. 'revisionid',
  995. 'revisionday',
  996. 'revisionday2',
  997. 'revisionmonth',
  998. 'revisionmonth1',
  999. 'revisionyear',
  1000. 'revisiontimestamp',
  1001. 'revisionuser',
  1002. 'revisionsize',
  1003. 'subpagename',
  1004. 'subpagenamee',
  1005. 'talkspace',
  1006. 'talkspacee',
  1007. 'subjectspace',
  1008. 'subjectspacee',
  1009. 'talkpagename',
  1010. 'talkpagenamee',
  1011. 'subjectpagename',
  1012. 'subjectpagenamee',
  1013. 'numberofusers',
  1014. 'numberofactiveusers',
  1015. 'numberofpages',
  1016. 'currentversion',
  1017. 'rootpagename',
  1018. 'rootpagenamee',
  1019. 'basepagename',
  1020. 'basepagenamee',
  1021. 'currenttimestamp',
  1022. 'localtimestamp',
  1023. 'directionmark',
  1024. 'contentlanguage',
  1025. 'numberofadmins',
  1026. 'cascadingsources',
  1027. ]
  1028. def __init__(self):
  1029. self.values = {'!': '|'}
  1030. def __getitem__(self, name):
  1031. return self.values.get(name)
  1032. def __setitem__(self, name, value):
  1033. self.values[name] = value
  1034. switches = (
  1035. '__NOTOC__',
  1036. '__FORCETOC__',
  1037. '__TOC__',
  1038. '__TOC__',
  1039. '__NEWSECTIONLINK__',
  1040. '__NONEWSECTIONLINK__',
  1041. '__NOGALLERY__',
  1042. '__HIDDENCAT__',
  1043. '__NOCONTENTCONVERT__',
  1044. '__NOCC__',
  1045. '__NOTITLECONVERT__',
  1046. '__NOTC__',
  1047. '__START__',
  1048. '__END__',
  1049. '__INDEX__',
  1050. '__NOINDEX__',
  1051. '__STATICREDIRECT__',
  1052. '__DISAMBIG__'
  1053. )
  1054. magicWordsRE = re.compile('|'.join(MagicWords.switches))
  1055. # ----------------------------------------------------------------------
  1056. # parser functions utilities
  1057. def ucfirst(string):
  1058. """:return: a string with just its first character uppercase
  1059. We can't use title() since it coverts all words.
  1060. """
  1061. if string:
  1062. if len(string) > 1:
  1063. return string[0].upper() + string[1:]
  1064. else:
  1065. return string.upper()
  1066. else:
  1067. return ''
  1068. def lcfirst(string):
  1069. """:return: a string with its first character lowercase"""
  1070. if string:
  1071. if len(string) > 1:
  1072. return string[0].lower() + string[1:]
  1073. else:
  1074. return string.lower()
  1075. else:
  1076. return ''
  1077. def fullyQualifiedTemplateTitle(templateTitle):
  1078. """
  1079. Determine the namespace of the page being included through the template
  1080. mechanism
  1081. """
  1082. if templateTitle.startswith(':'):
  1083. # Leading colon by itself implies main namespace, so strip this colon
  1084. return ucfirst(templateTitle[1:])
  1085. else:
  1086. m = re.match('([^:]*)(:.*)', templateTitle)
  1087. if m:
  1088. # colon found but not in the first position - check if it
  1089. # designates a known namespace
  1090. prefix = normalizeNamespace(m.group(1))
  1091. if prefix in knownNamespaces:
  1092. return prefix + ucfirst(m.group(2))
  1093. # The title of the page being included is NOT in the main namespace and
  1094. # lacks any other explicit designation of the namespace - therefore, it
  1095. # is resolved to the Template namespace (that's the default for the
  1096. # template inclusion mechanism).
  1097. # This is a defense against pages whose title only contains UTF-8 chars
  1098. # that are reduced to an empty string. Right now I can think of one such
  1099. # case - <C2><A0> which represents the non-breaking space.
  1100. # In this particular case, this page is a redirect to [[Non-nreaking
  1101. # space]], but having in the system a redirect page with an empty title
  1102. # causes numerous problems, so we'll live happier without it.
  1103. if templateTitle:
  1104. return templatePrefix + ucfirst(templateTitle)
  1105. else:
  1106. return '' # caller may log as error
  1107. def normalizeNamespace(ns):
  1108. return ucfirst(ns)
  1109. # ----------------------------------------------------------------------
  1110. # Parser functions
  1111. # see http://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
  1112. # https://github.com/Wikia/app/blob/dev/extensions/ParserFunctions/ParserFunctions_body.php
  1113. class Infix:
  1114. """Infix operators.
  1115. The calling sequence for the infix is:
  1116. x |op| y
  1117. """
  1118. def __init__(self, function):
  1119. self.function = function
  1120. def __ror__(self, other):
  1121. return Infix(lambda x, self=self, other=other: self.function(other, x))
  1122. def __or__(self, other):
  1123. return self.function(other)
  1124. def __rlshift__(self, other):
  1125. return Infix(lambda x, self=self, other=other: self.function(other, x))
  1126. def __rshift__(self, other):
  1127. return self.function(other)
  1128. def __call__(self, value1, value2):
  1129. return self.function(value1, value2)
  1130. ROUND = Infix(lambda x, y: round(x, y))
  1131. def sharp_expr(expr):
  1132. try:
  1133. expr = re.sub('=', '==', expr)
  1134. expr = re.sub('mod', '%', expr)
  1135. expr = re.sub('\bdiv\b', '/', expr)
  1136. expr = re.sub('\bround\b', '|ROUND|', expr)
  1137. return unicode(eval(expr))
  1138. except:
  1139. return '<span class="error"></span>'
  1140. def sharp_if(testValue, valueIfTrue, valueIfFalse=None, *args):
  1141. # In theory, we should evaluate the first argument here,
  1142. # but it was evaluated while evaluating part[0] in expandTemplate().
  1143. if testValue.strip():
  1144. # The {{#if:}} function is an if-then-else construct.
  1145. # The applied condition is: "The condition string is non-empty".
  1146. valueIfTrue = valueIfTrue.strip()
  1147. if valueIfTrue:
  1148. return valueIfTrue
  1149. elif valueIfFalse:
  1150. return valueIfFalse.strip()
  1151. return ""
  1152. def sharp_ifeq(lvalue, rvalue, valueIfTrue, valueIfFalse=None, *args):
  1153. rvalue = rvalue.strip()
  1154. if rvalue:
  1155. # lvalue is always defined
  1156. if lvalue.strip() == rvalue:
  1157. # The {{#ifeq:}} function is an if-then-else construct. The
  1158. # applied condition is "is rvalue equal to lvalue". Note that this
  1159. # does only string comparison while MediaWiki implementation also
  1160. # supports numerical comparissons.
  1161. if valueIfTrue:
  1162. return valueIfTrue.strip()
  1163. else:
  1164. if valueIfFalse:
  1165. return valueIfFalse.strip()
  1166. return ""
  1167. def sharp_iferror(test, then='', Else=None, *args):
  1168. if re.match('<(?:strong|span|p|div)\s(?:[^\s>]*\s+)*?class="(?:[^"\s>]*\s+)*?error(?:\s[^">]*)?"', test):
  1169. return then
  1170. elif Else is None:
  1171. return test.strip()
  1172. else:
  1173. return Else.strip()
  1174. def sharp_switch(primary, *params):
  1175. # FIXME: we don't support numeric expressions in primary
  1176. # {{#switch: comparison string
  1177. # | case1 = result1
  1178. # | case2
  1179. # | case4 = result2
  1180. # | 1 | case5 = result3
  1181. # | #default = result4
  1182. # }}
  1183. primary = primary.strip()
  1184. found = False # for fall through cases
  1185. default = None
  1186. rvalue = None
  1187. lvalue = ''
  1188. for param in params:
  1189. # handle cases like:
  1190. # #default = [http://www.perseus.tufts.edu/hopper/text?doc=Perseus...]
  1191. pair = param.split('=', 1)
  1192. lvalue = pair[0].strip()
  1193. rvalue = None
  1194. if len(pair) > 1:
  1195. # got "="
  1196. rvalue = pair[1].strip()
  1197. # check for any of multiple values pipe separated
  1198. if found or primary in [v.strip() for v in lvalue.split('|')]:
  1199. # Found a match, return now
  1200. return rvalue
  1201. elif lvalue == '#default':
  1202. default = rvalue
  1203. rvalue = None # avoid defaulting to last case
  1204. elif lvalue == primary:
  1205. # If the value matches, set a flag and continue
  1206. found = True
  1207. # Default case
  1208. # Check if the last item had no = sign, thus specifying the default case
  1209. if rvalue is not None:
  1210. return lvalue
  1211. elif default is not None:
  1212. return default
  1213. return ''
  1214. # Extension Scribuntu
  1215. def sharp_invoke(module, function, frame):
  1216. functions = modules.get(module)
  1217. if functions:
  1218. funct = functions.get(function)
  1219. if funct:
  1220. # find parameters in frame whose title is the one of the original
  1221. # template invocation
  1222. templateTitle = fullyQualifiedTemplateTitle(module)
  1223. if not templateTitle:
  1224. logging.warn("Template with empty title")
  1225. pair = next((x for x in frame if x[0] == templateTitle), None)
  1226. if pair:
  1227. params = pair[1]
  1228. # extract positional args
  1229. params = [params.get(str(i + 1)) for i in range(len(params))]
  1230. return funct(*params)
  1231. else:
  1232. return funct()
  1233. return ''
  1234. parserFunctions = {
  1235. '#expr': sharp_expr,
  1236. '#if': sharp_if,
  1237. '#ifeq': sharp_ifeq,
  1238. '#iferror': sharp_iferror,
  1239. '#ifexpr': lambda *args: '', # not supported
  1240. '#ifexist': lambda *args: '', # not supported
  1241. '#rel2abs': lambda *args: '', # not supported
  1242. '#switch': sharp_switch,
  1243. '# language': lambda *args: '', # not supported
  1244. '#time': lambda *args: '', # not supported
  1245. '#timel': lambda *args: '', # not supported
  1246. '#titleparts': lambda *args: '', # not supported
  1247. # This function is used in some pages to construct links
  1248. # http://meta.wikimedia.org/wiki/Help:URL
  1249. 'urlencode': lambda string, *rest: urllib.quote(string.encode('utf-8')),
  1250. 'lc': lambda string, *rest: string.lower() if string else '',
  1251. 'lcfirst': lambda string, *rest: lcfirst(string),
  1252. 'uc': lambda string, *rest: string.upper() if string else '',
  1253. 'ucfirst': lambda string, *rest: ucfirst(string),
  1254. 'int': lambda string, *rest: str(int(string)),
  1255. }
  1256. def callParserFunction(functionName, args, frame):
  1257. """
  1258. Parser functions have similar syntax as templates, except that
  1259. the first argument is everything after the first colon.
  1260. :return: the result of the invocation, None in case of failure.
  1261. http://meta.wikimedia.org/wiki/Help:ParserFunctions
  1262. """
  1263. try:
  1264. if functionName == '#invoke':
  1265. # special handling of frame
  1266. arg0, arg1 = args[0].strip(), args[1].strip()
  1267. ret = sharp_invoke(arg0, arg1, frame)
  1268. # logging.debug('#invoke> %s %s %s', arg0, arg1, ret)
  1269. return ret
  1270. if functionName in parserFunctions:
  1271. ret = parserFunctions[functionName](*args)
  1272. # logging.debug('parserFunction> %s %s', functionName, ret)
  1273. return ret
  1274. except:
  1275. return "" # FIXME: fix errors
  1276. return ""
  1277. # ----------------------------------------------------------------------
  1278. # Expand using WikiMedia API
  1279. # import json
  1280. # def expandTemplates(text):
  1281. # """Expand templates invoking MediaWiki API"""
  1282. # text = urlib.urlencodew(text.encode('utf-8'))
  1283. # base = urlbase[:urlbase.rfind('/')]
  1284. # url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
  1285. # exp = json.loads(urllib.urlopen(url))
  1286. # return exp['expandtemplates']['*']
  1287. # ----------------------------------------------------------------------
  1288. # Extract Template definition
  1289. reNoinclude = re.compile(r'<noinclude>(?:.*?)</noinclude>', re.DOTALL)
  1290. reIncludeonly = re.compile(r'<includeonly>|</includeonly>', re.DOTALL)
  1291. # These are built before spawning processes, hence thay are shared.
  1292. templates = {}
  1293. redirects = {}
  1294. # cache of parser templates
  1295. # FIXME: sharing this with a Manager slows down.
  1296. templateCache = {}
  1297. def define_template(title, page):
  1298. """
  1299. Adds a template defined in the :param page:.
  1300. @see https://en.wikipedia.org/wiki/Help:Template#Noinclude.2C_includeonly.2C_and_onlyinclude
  1301. """
  1302. global templates
  1303. global redirects
  1304. # title = normalizeTitle(title)
  1305. # check for redirects
  1306. m = re.match('#REDIRECT.*?\[\[([^\]]*)]]', page[0], re.IGNORECASE)
  1307. if m:
  1308. redirects[title] = m.group(1) # normalizeTitle(m.group(1))
  1309. return
  1310. text = unescape(''.join(page))
  1311. # We're storing template text for future inclusion, therefore,
  1312. # remove all <noinclude> text and keep all <includeonly> text
  1313. # (but eliminate <includeonly> tags per se).
  1314. # However, if <onlyinclude> ... </onlyinclude> parts are present,
  1315. # then only keep them and discard the rest of the template body.
  1316. # This is because using <onlyinclude> on a text fragment is
  1317. # equivalent to enclosing it in <includeonly> tags **AND**
  1318. # enclosing all the rest of the template body in <noinclude> tags.
  1319. # remove comments
  1320. text = comment.sub('', text)
  1321. # eliminate <noinclude> fragments
  1322. text = reNoinclude.sub('', text)
  1323. # eliminate unterminated <noinclude> elements
  1324. text = re.sub(r'<noinclude\s*>.*$', '', text, flags=re.DOTALL)
  1325. text = re.sub(r'<noinclude/>', '', text)
  1326. onlyincludeAccumulator = ''
  1327. for m in re.finditer('<onlyinclude>(.*?)</onlyinclude>', text, re.DOTALL):
  1328. onlyincludeAccumulator += m.group(1)
  1329. if onlyincludeAccumulator:
  1330. text = onlyincludeAccumulator
  1331. else:
  1332. text = reIncludeonly.sub('', text)
  1333. if text:
  1334. if title in templates:
  1335. logging.warn('Redefining: %s', title)
  1336. templates[title] = text
  1337. # ----------------------------------------------------------------------
  1338. def dropNested(text, openDelim, closeDelim):
  1339. """
  1340. A matching function for nested expressions, e.g. namespaces and tables.
  1341. """
  1342. openRE = re.compile(openDelim, re.IGNORECASE)
  1343. closeRE = re.compile(closeDelim, re.IGNORECASE)
  1344. # partition text in separate blocks { } { }
  1345. spans = [] # pairs (s, e) for each partition
  1346. nest = 0 # nesting level
  1347. start = openRE.search(text, 0)
  1348. if not start:
  1349. return text
  1350. end = closeRE.search(text, start.end())
  1351. next = start
  1352. while end:
  1353. next = openRE.search(text, next.end())
  1354. if not next: # termination
  1355. while nest: # close all pending
  1356. nest -= 1
  1357. end0 = closeRE.search(text, end.end())
  1358. if end0:
  1359. end = end0
  1360. else:
  1361. break
  1362. spans.append((start.start(), end.end()))
  1363. break
  1364. while end.end() < next.start():
  1365. # { } {
  1366. if nest:
  1367. nest -= 1
  1368. # try closing more
  1369. last = end.end()
  1370. end = closeRE.search(text, end.end())
  1371. if not end: # unbalanced
  1372. if spans:
  1373. span = (spans[0][0], last)
  1374. else:
  1375. span = (start.start(), last)
  1376. spans = [span]
  1377. break
  1378. else:
  1379. spans.append((start.start(), end.end()))
  1380. # advance start, find next close
  1381. start = next
  1382. end = closeRE.search(text, next.end())
  1383. break # { }
  1384. if next != start:
  1385. # { { }
  1386. nest += 1
  1387. # collect text outside partitions
  1388. return dropSpans(spans, text)
  1389. def dropSpans(spans, text):
  1390. """
  1391. Drop from text the blocks identified in :param spans:, possibly nested.
  1392. """
  1393. spans.sort()
  1394. res = ''
  1395. offset = 0
  1396. for s, e in spans:
  1397. if offset <= s: # handle nesting
  1398. if offset < s:
  1399. res += text[offset:s]
  1400. offset = e
  1401. res += text[offset:]
  1402. return res
  1403. # ----------------------------------------------------------------------
  1404. # WikiLinks
  1405. # May be nested [[File:..|..[[..]]..|..]], [[Category:...]], etc.
  1406. # Also: [[Help:IPA for Catalan|[andora]]]
  1407. def replaceInternalLinks(text):
  1408. """
  1409. Replaces internal links of the form:
  1410. [[title |...|label]]trail
  1411. with title concatenated with trail, when present, e.g. 's' for plural.
  1412. See https://www.mediawiki.org/wiki/Help:Links#Internal_links
  1413. """
  1414. # call this after removal of external links, so we need not worry about
  1415. # triple closing ]]].
  1416. cur = 0
  1417. res = ''
  1418. for s, e in findBalanced(text):
  1419. m = tailRE.match(text, e)
  1420. if m:
  1421. trail = m.group(0)
  1422. end = m.end()
  1423. else:
  1424. trail = ''
  1425. end = e
  1426. inner = text[s + 2:e - 2]
  1427. # find first |
  1428. pipe = inner.find('|')
  1429. if pipe < 0:
  1430. title = inner
  1431. label = title
  1432. else:
  1433. title = inner[:pipe].rstrip()
  1434. # find last |
  1435. curp = pipe + 1
  1436. for s1, e1 in findBalanced(inner):
  1437. last = inner.rfind('|', curp, s1)
  1438. if last >= 0:
  1439. pipe = last # advance
  1440. curp = e1
  1441. label = inner[pipe + 1:].strip()
  1442. res += text[cur:s] + makeInternalLink(title, label) + trail
  1443. cur = end
  1444. return res + text[cur:]
  1445. # the official version is a method in class Parser, similar to this:
  1446. # def replaceInternalLinks2(text):
  1447. # global wgExtraInterlanguageLinkPrefixes
  1448. # # the % is needed to support urlencoded titles as well
  1449. # tc = Title::legalChars() + '#%'
  1450. # # Match a link having the form [[namespace:link|alternate]]trail
  1451. # e1 = re.compile("([%s]+)(?:\\|(.+?))?]](.*)" % tc, re.S | re.D)
  1452. # # Match cases where there is no "]]", which might still be images
  1453. # e1_img = re.compile("([%s]+)\\|(.*)" % tc, re.S | re.D)
  1454. # holders = LinkHolderArray(self)
  1455. # # split the entire text string on occurrences of [[
  1456. # iterBrackets = re.compile('[[').finditer(text)
  1457. # m in iterBrackets.next()
  1458. # # get the first element (all text up to first [[)
  1459. # s = text[:m.start()]
  1460. # cur = m.end()
  1461. # line = s
  1462. # useLinkPrefixExtension = self.getTargetLanguage().linkPrefixExtension()
  1463. # e2 = None
  1464. # if useLinkPrefixExtension:
  1465. # # Match the end of a line for a word that is not followed by whitespace,
  1466. # # e.g. in the case of "The Arab al[[Razi]]", "al" will be matched
  1467. # global wgContLang
  1468. # charset = wgContLang.linkPrefixCharset()
  1469. # e2 = re.compile("((?>.*[^charset]|))(.+)", re.S | re.D | re.U)
  1470. # if self.mTitle is None:
  1471. # raise MWException(__METHOD__ + ": \self.mTitle is null\n")
  1472. # nottalk = not self.mTitle.isTalkPage()
  1473. # if useLinkPrefixExtension:
  1474. # m = e2.match(s)
  1475. # if m:
  1476. # first_prefix = m.group(2)
  1477. # else:
  1478. # first_prefix = false
  1479. # else:
  1480. # prefix = ''
  1481. # useSubpages = self.areSubpagesAllowed()
  1482. # for m in iterBrackets:
  1483. # line = text[cur:m.start()]
  1484. # cur = m.end()
  1485. # # TODO: Check for excessive memory usage
  1486. # if useLinkPrefixExtension:
  1487. # m = e2.match(e2)
  1488. # if m:
  1489. # prefix = m.group(2)
  1490. # s = m.group(1)
  1491. # else:
  1492. # prefix = ''
  1493. # # first link
  1494. # if first_prefix:
  1495. # prefix = first_prefix
  1496. # first_prefix = False
  1497. # might_be_img = False
  1498. # m = e1.match(line)
  1499. # if m: # page with normal label or alt
  1500. # label = m.group(2)
  1501. # # If we get a ] at the beginning of m.group(3) that means we have a link that is something like:
  1502. # # [[Image:Foo.jpg|[http://example.com desc]]] <- having three ] in a row fucks up,
  1503. # # the real problem is with the e1 regex
  1504. # # See bug 1300.
  1505. # #
  1506. # # Still some problems for cases where the ] is meant to be outside punctuation,
  1507. # # and no image is in sight. See bug 2095.
  1508. # #
  1509. # if label and m.group(3)[0] == ']' and '[' in label:
  1510. # label += ']' # so that replaceExternalLinks(label) works later
  1511. # m.group(3) = m.group(3)[1:]
  1512. # # fix up urlencoded title texts
  1513. # if '%' in m.group(1):
  1514. # # Should anchors '#' also be rejected?
  1515. # m.group(1) = str_replace(array('<', '>'), array('&lt', '&gt'), rawurldecode(m.group(1)))
  1516. # trail = m.group(3)
  1517. # else:
  1518. # m = e1_img.match(line):
  1519. # if m:
  1520. # # Invalid, but might be an image with a link in its caption
  1521. # might_be_img = true
  1522. # label = m.group(2)
  1523. # if '%' in m.group(1):
  1524. # m.group(1) = rawurldecode(m.group(1))
  1525. # trail = ""
  1526. # else: # Invalid form; output directly
  1527. # s += prefix + '[[' + line
  1528. # continue
  1529. # origLink = m.group(1)
  1530. # # Dont allow internal links to pages containing
  1531. # # PROTO: where PROTO is a valid URL protocol these
  1532. # # should be external links.
  1533. # if (preg_match('/^(?i:' + self.mUrlProtocols + ')/', origLink)) {
  1534. # s += prefix + '[[' + line
  1535. # continue
  1536. # }
  1537. # # Make subpage if necessary
  1538. # if useSubpages:
  1539. # link = self.maybeDoSubpageLink(origLink, label)
  1540. # else:
  1541. # link = origLink
  1542. # noforce = origLink[0] != ':'
  1543. # if not noforce:
  1544. # # Strip off leading ':'
  1545. # link = link[1:]
  1546. # nt = Title::newFromText(self.mStripState.unstripNoWiki(link))
  1547. # if nt is None:
  1548. # s += prefix + '[[' + line
  1549. # continue
  1550. # ns = nt.getNamespace()
  1551. # iw = nt.getInterwiki()
  1552. # if might_be_img { # if this is actually an invalid link
  1553. # if (ns == NS_FILE and noforce) { # but might be an image
  1554. # found = False
  1555. # while True:
  1556. # # look at the next 'line' to see if we can close it there
  1557. # next_line = iterBrakets.next()
  1558. # if not next_line:
  1559. # break
  1560. # m = explode(']]', next_line, 3)
  1561. # if m.lastindex == 3:
  1562. # # the first ]] closes the inner link, the second the image
  1563. # found = True
  1564. # label += "[[%s]]%s" % (m.group(0), m.group(1))
  1565. # trail = m.group(2)
  1566. # break
  1567. # elif m.lastindex == 2:
  1568. # # if there is exactly one ]] that is fine, we will keep looking
  1569. # label += "[[{m[0]}]]{m.group(1)}"
  1570. # else:
  1571. # # if next_line is invalid too, we need look no further
  1572. # label += '[[' + next_line
  1573. # break
  1574. # if not found:
  1575. # # we couldnt find the end of this imageLink, so output it raw
  1576. # # but dont ignore what might be perfectly normal links in the text we ve examined
  1577. # holders.merge(self.replaceInternalLinks2(label))
  1578. # s += "{prefix}[[%s|%s" % (link, text)
  1579. # # note: no trail, because without an end, there *is* no trail
  1580. # continue
  1581. # } else: # it is not an image, so output it raw
  1582. # s += "{prefix}[[%s|%s" % (link, text)
  1583. # # note: no trail, because without an end, there *is* no trail
  1584. # continue
  1585. # }
  1586. # wasblank = (text == '')
  1587. # if wasblank:
  1588. # text = link
  1589. # else:
  1590. # # Bug 4598 madness. Handle the quotes only if they come from the alternate part
  1591. # # [[Lista d''e paise d''o munno]] . <a href="...">Lista d''e paise d''o munno</a>
  1592. # # [[Criticism of Harry Potter|Criticism of ''Harry Potter'']]
  1593. # # . <a href="Criticism of Harry Potter">Criticism of <i>Harry Potter</i></a>
  1594. # text = self.doQuotes(text)
  1595. # # Link not escaped by : , create the various objects
  1596. # if noforce and not nt.wasLocalInterwiki():
  1597. # # Interwikis
  1598. # if iw and mOptions.getInterwikiMagic() and nottalk and (
  1599. # Language::fetchLanguageName(iw, None, 'mw') or
  1600. # in_array(iw, wgExtraInterlanguageLinkPrefixes)):
  1601. # # Bug 24502: filter duplicates
  1602. # if iw not in mLangLinkLanguages:
  1603. # self.mLangLinkLanguages[iw] = True
  1604. # self.mOutput.addLanguageLink(nt.getFullText())
  1605. # s = rstrip(s + prefix)
  1606. # s += strip(trail, "\n") == '' ? '': prefix + trail
  1607. # continue
  1608. # if ns == NS_FILE:
  1609. # if not wfIsBadImage(nt.getDBkey(), self.mTitle):
  1610. # if wasblank:
  1611. # # if no parameters were passed, text
  1612. # # becomes something like "File:Foo.png",
  1613. # # which we dont want to pass on to the
  1614. # # image generator
  1615. # text = ''
  1616. # else:
  1617. # # recursively parse links inside the image caption
  1618. # # actually, this will parse them in any other parameters, too,
  1619. # # but it might be hard to fix that, and it doesnt matter ATM
  1620. # text = self.replaceExternalLinks(text)
  1621. # holders.merge(self.replaceInternalLinks2(text))
  1622. # # cloak any absolute URLs inside the image markup, so replaceExternalLinks() wont touch them
  1623. # s += prefix + self.armorLinks(
  1624. # self.makeImage(nt, text, holders)) + trail
  1625. # else:
  1626. # s += prefix + trail
  1627. # continue
  1628. # if ns == NS_CATEGORY:
  1629. # s = rstrip(s + "\n") # bug 87
  1630. # if wasblank:
  1631. # sortkey = self.getDefaultSort()
  1632. # else:
  1633. # sortkey = text
  1634. # sortkey = Sanitizer::decodeCharReferences(sortkey)
  1635. # sortkey = str_replace("\n", '', sortkey)
  1636. # sortkey = self.getConverterLanguage().convertCategoryKey(sortkey)
  1637. # self.mOutput.addCategory(nt.getDBkey(), sortkey)
  1638. # s += strip(prefix + trail, "\n") == '' ? '' : prefix + trail
  1639. # continue
  1640. # }
  1641. # }
  1642. # # Self-link checking. For some languages, variants of the title are checked in
  1643. # # LinkHolderArray::doVariants() to allow batching the existence checks necessary
  1644. # # for linking to a different variant.
  1645. # if ns != NS_SPECIAL and nt.equals(self.mTitle) and !nt.hasFragment():
  1646. # s += prefix + Linker::makeSelfLinkObj(nt, text, '', trail)
  1647. # continue
  1648. # # NS_MEDIA is a pseudo-namespace for linking directly to a file
  1649. # # @todo FIXME: Should do batch file existence checks, see comment below
  1650. # if ns == NS_MEDIA:
  1651. # # Give extensions a chance to select the file revision for us
  1652. # options = []
  1653. # descQuery = False
  1654. # Hooks::run('BeforeParserFetchFileAndTitle',
  1655. # [this, nt, &options, &descQuery])
  1656. # # Fetch and register the file (file title may be different via hooks)
  1657. # file, nt = self.fetchFileAndTitle(nt, options)
  1658. # # Cloak with NOPARSE to avoid replacement in replaceExternalLinks
  1659. # s += prefix + self.armorLinks(
  1660. # Linker::makeMediaLinkFile(nt, file, text)) + trail
  1661. # continue
  1662. # # Some titles, such as valid special pages or files in foreign repos, should
  1663. # # be shown as bluelinks even though they are not included in the page table
  1664. # #
  1665. # # @todo FIXME: isAlwaysKnown() can be expensive for file links; we should really do
  1666. # # batch file existence checks for NS_FILE and NS_MEDIA
  1667. # if iw == '' and nt.isAlwaysKnown():
  1668. # self.mOutput.addLink(nt)
  1669. # s += self.makeKnownLinkHolder(nt, text, array(), trail, prefix)
  1670. # else:
  1671. # # Links will be added to the output link list after checking
  1672. # s += holders.makeHolder(nt, text, array(), trail, prefix)
  1673. # }
  1674. # return holders
  1675. def makeInternalLink(title, label):
  1676. colon = title.find(':')
  1677. if colon > 0 and title[:colon] not in acceptedNamespaces:
  1678. return ''
  1679. if colon == 0:
  1680. # drop also :File:
  1681. colon2 = title.find(':', colon + 1)
  1682. if colon2 > 1 and title[colon + 1:colon2] not in acceptedNamespaces:
  1683. return ''
  1684. if Extractor.keepLinks:
  1685. return '<a href="%s">%s</a>' % (urllib.quote(title.encode('utf-8')), label)
  1686. else:
  1687. return label
  1688. # ----------------------------------------------------------------------
  1689. # External links
  1690. # from: https://doc.wikimedia.org/mediawiki-core/master/php/DefaultSettings_8php_source.html
  1691. wgUrlProtocols = [
  1692. 'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://',
  1693. 'https://', 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:',
  1694. 'nntp://', 'redis://', 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://',
  1695. 'svn://', 'tel:', 'telnet://', 'urn:', 'worldwind://', 'xmpp:', '//'
  1696. ]
  1697. # from: https://doc.wikimedia.org/mediawiki-core/master/php/Parser_8php_source.html
  1698. # Constants needed for external link processing
  1699. # Everything except bracket, space, or control characters
  1700. # \p{Zs} is unicode 'separator, space' category. It covers the space 0x20
  1701. # as well as U+3000 is IDEOGRAPHIC SPACE for bug 19052
  1702. EXT_LINK_URL_CLASS = r'[^][<>"\x00-\x20\x7F\s]'
  1703. ANCHOR_CLASS = r'[^][\x00-\x08\x0a-\x1F]'
  1704. ExtLinkBracketedRegex = re.compile(
  1705. '\[(((?i)' + '|'.join(wgUrlProtocols) + ')' + EXT_LINK_URL_CLASS + r'+)' +
  1706. r'\s*((?:' + ANCHOR_CLASS + r'|\[\[' + ANCHOR_CLASS + r'+\]\])' + r'*?)\]',
  1707. re.S | re.U)
  1708. # A simpler alternative:
  1709. # ExtLinkBracketedRegex = re.compile(r'\[(.*?)\](?!])')
  1710. EXT_IMAGE_REGEX = re.compile(
  1711. r"""^(http://|https://)([^][<>"\x00-\x20\x7F\s]+)
  1712. /([A-Za-z0-9_.,~%\-+&;#*?!=()@\x80-\xFF]+)\.((?i)gif|png|jpg|jpeg)$""",
  1713. re.X | re.S | re.U)
  1714. def replaceExternalLinks(text):
  1715. """
  1716. https://www.mediawiki.org/wiki/Help:Links#External_links
  1717. [URL anchor text]
  1718. """
  1719. s = ''
  1720. cur = 0
  1721. for m in ExtLinkBracketedRegex.finditer(text):
  1722. s += text[cur:m.start()]
  1723. cur = m.end()
  1724. url = m.group(1)
  1725. label = m.group(3)
  1726. # # The characters '<' and '>' (which were escaped by
  1727. # # removeHTMLtags()) should not be included in
  1728. # # URLs, per RFC 2396.
  1729. # m2 = re.search('&(lt|gt);', url)
  1730. # if m2:
  1731. # link = url[m2.end():] + ' ' + link
  1732. # url = url[0:m2.end()]
  1733. # If the link text is an image URL, replace it with an <img> tag
  1734. # This happened by accident in the original parser, but some people used it extensively
  1735. m = EXT_IMAGE_REGEX.match(label)
  1736. if m:
  1737. label = makeExternalImage(label)
  1738. # Use the encoded URL
  1739. # This means that users can paste URLs directly into the text
  1740. # Funny characters like ö aren't valid in URLs anyway
  1741. # This was changed in August 2004
  1742. s += makeExternalLink(url, label) # + trail
  1743. return s + text[cur:]
  1744. def makeExternalLink(url, anchor):
  1745. """Function applied to wikiLinks"""
  1746. if Extractor.keepLinks:
  1747. return '<a href="%s">%s</a>' % (urllib.quote(url.encode('utf-8')), anchor)
  1748. else:
  1749. return anchor
  1750. def makeExternalImage(url, alt=''):
  1751. if Extractor.keepLinks:
  1752. return '<img src="%s" alt="%s">' % (url, alt)
  1753. else:
  1754. return alt
  1755. # ----------------------------------------------------------------------
  1756. # match tail after wikilink
  1757. tailRE = re.compile('\w+')
  1758. syntaxhighlight = re.compile('&lt;syntaxhighlight .*?&gt;(.*?)&lt;/syntaxhighlight&gt;', re.DOTALL)
  1759. # skip level 1, it is page name level
  1760. section = re.compile(r'(==+)\s*(.*?)\s*\1')
  1761. listOpen = {'*': '<ul>', '#': '<ol>', ';': '<dl>', ':': '<dl>'}
  1762. listClose = {'*': '</ul>', '#': '</ol>', ';': '</dl>', ':': '</dl>'}
  1763. listItem = {'*': '<li>%s</li>', '#': '<li>%s</<li>', ';': '<dt>%s</dt>',
  1764. ':': '<dd>%s</dd>'}
  1765. def compact(text):
  1766. """Deal with headers, lists, empty sections, residuals of tables.
  1767. :param text: convert to HTML.
  1768. """
  1769. page = [] # list of paragraph
  1770. headers = {} # Headers for unfilled sections
  1771. emptySection = False # empty sections are discarded
  1772. listLevel = [] # nesting of lists
  1773. for line in text.split('\n'):
  1774. if not line:
  1775. continue
  1776. # Handle section titles
  1777. m = section.match(line)
  1778. if m:
  1779. title = m.group(2)
  1780. lev = len(m.group(1)) # header level
  1781. if Extractor.toHTML:
  1782. page.append("<h%d>%s</h%d>" % (lev, title, lev))
  1783. if title and title[-1] not in '!?':
  1784. title += '.' # terminate sentence.
  1785. headers[lev] = title
  1786. # drop previous headers
  1787. for i in headers.keys():
  1788. if i > lev:
  1789. del headers[i]
  1790. emptySection = True
  1791. listLevel = []
  1792. continue
  1793. # Handle page title
  1794. elif line.startswith('++'):
  1795. title = line[2:-2]
  1796. if title:
  1797. if title[-1] not in '!?':
  1798. title += '.'
  1799. page.append(title)
  1800. # handle indents
  1801. elif line[0] == ':':
  1802. # page.append(line.lstrip(':*#;'))
  1803. continue
  1804. # handle lists
  1805. elif line[0] in '*#;:':
  1806. i = 0
  1807. # c: current level char
  1808. # n: next level char
  1809. for c, n in izip_longest(listLevel, line, fillvalue=''):
  1810. if not n or n not in '*#;:': # shorter or different
  1811. if c:
  1812. if Extractor.toHTML:
  1813. page.append(listClose[c])
  1814. listLevel = listLevel[:-1]
  1815. continue
  1816. else:
  1817. break
  1818. # n != ''
  1819. if c != n and (not c or (c not in ';:' and n not in ';:')):
  1820. if c:
  1821. # close level
  1822. if Extractor.toHTML:
  1823. page.append(listClose[c])
  1824. listLevel = listLevel[:-1]
  1825. listLevel += n
  1826. if Extractor.toHTML:
  1827. page.append(listOpen[n])
  1828. i += 1
  1829. n = line[i - 1] # last list char
  1830. line = line[i:].strip()
  1831. if line: # FIXME: n is '"'
  1832. if Extractor.keepLists:
  1833. # emit open sections
  1834. items = headers.items()
  1835. items.sort()
  1836. for i, v in items:
  1837. page.append(v)
  1838. headers.clear()
  1839. # FIXME: use item count for #-lines
  1840. bullet = '1. ' if n == '#' else '- '
  1841. page.append('{0:{1}s}'.format(bullet, len(listLevel)) + line)
  1842. elif Extractor.toHTML:
  1843. page.append(listItem[n] % line)
  1844. elif len(listLevel):
  1845. page.append(line)
  1846. if Extractor.toHTML:
  1847. for c in reversed(listLevel):
  1848. page.append(listClose[c])
  1849. listLevel = []
  1850. # Drop residuals of lists
  1851. elif line[0] in '{|' or line[-1] == '}':
  1852. continue
  1853. # Drop irrelevant lines
  1854. elif (line[0] == '(' and line[-1] == ')') or line.strip('.-') == '':
  1855. continue
  1856. elif len(headers):
  1857. if Extractor.keepSections:
  1858. items = headers.items()
  1859. items.sort()
  1860. for i, v in items:
  1861. page.append(v)
  1862. headers.clear()
  1863. page.append(line) # first line
  1864. emptySection = False
  1865. elif not emptySection:
  1866. # Drop preformatted
  1867. if line[0] != ' ': # dangerous
  1868. page.append(line)
  1869. return page
  1870. def handle_unicode(entity):
  1871. numeric_code = int(entity[2:-1])
  1872. if numeric_code >= 0x10000: return ''
  1873. return unichr(numeric_code)
  1874. # ------------------------------------------------------------------------------
  1875. # Output
  1876. class NextFile(object):
  1877. """
  1878. Synchronous generation of next available file name.
  1879. """
  1880. filesPerDir = 100
  1881. def __init__(self, path_name):
  1882. self.path_name = path_name
  1883. self.dir_index = -1
  1884. self.file_index = -1
  1885. def next(self):
  1886. self.file_index = (self.file_index + 1) % NextFile.filesPerDir
  1887. if self.file_index == 0:
  1888. self.dir_index += 1
  1889. dirname = self._dirname()
  1890. if not os.path.isdir(dirname):
  1891. os.makedirs(dirname)
  1892. return self._filepath()
  1893. def _dirname(self):
  1894. char1 = self.dir_index % 26
  1895. char2 = self.dir_index / 26 % 26
  1896. return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
  1897. def _filepath(self):
  1898. return '%s/wiki_%02d' % (self._dirname(), self.file_index)
  1899. class OutputSplitter(object):
  1900. """
  1901. File-like object, that splits output to multiple files of a given max size.
  1902. """
  1903. def __init__(self, nextFile, max_file_size=0, compress=True):
  1904. """
  1905. :param nextFile: a NextFile object from which to obtain filenames
  1906. to use.
  1907. :param max_file_size: the maximum size of each file.
  1908. :para compress: whether to write data with bzip compression.
  1909. """
  1910. self.nextFile = nextFile
  1911. self.compress = compress
  1912. self.max_file_size = max_file_size
  1913. self.file = self.open(self.nextFile.next())
  1914. def reserve(self, size):
  1915. if self.file.tell() + size > self.max_file_size:
  1916. self.close()
  1917. self.file = self.open(self.nextFile.next())
  1918. def write(self, data):
  1919. self.reserve(len(data))
  1920. self.file.write(data)
  1921. def close(self):
  1922. self.file.close()
  1923. def open(self, filename):
  1924. if self.compress:
  1925. return bz2.BZ2File(filename + '.bz2', 'w')
  1926. else:
  1927. return open(filename, 'w')
  1928. # ----------------------------------------------------------------------
  1929. # READER
  1930. tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
  1931. # 1 2 3 4
  1932. def load_templates(file, output_file=None):
  1933. """
  1934. Load templates from :param file:.
  1935. :param output_file: file where to save templates and modules.
  1936. """
  1937. global templateNamespace, templatePrefix
  1938. templatePrefix = templateNamespace + ':'
  1939. global moduleNamespace, modulePrefix
  1940. modulePrefix = moduleNamespace + ':'
  1941. if output_file:
  1942. output = codecs.open(output_file, 'wb', 'utf-8')
  1943. for page_count, page_data in enumerate(pages_from(file)):
  1944. id, title, ns, page = page_data
  1945. if not output_file and (not templateNamespace or
  1946. not moduleNamespace): # do not know it yet
  1947. # reconstruct templateNamespace and moduleNamespace from the first title
  1948. if ns in templateKeys:
  1949. colon = title.find(':')
  1950. if colon > 1:
  1951. if ns == '10':
  1952. templateNamespace = title[:colon]
  1953. templatePrefix = title[:colon + 1]
  1954. elif ns == '828':
  1955. moduleNamespace = title[:colon]
  1956. modulePrefix = title[:colon + 1]
  1957. if ns in templateKeys:
  1958. text = ''.join(page)
  1959. define_template(title, text)
  1960. # save templates and modules to file
  1961. if output_file:
  1962. output.write('<page>\n')
  1963. output.write(' <title>%s</title>\n' % title)
  1964. output.write(' <ns>%s</ns>\n' % ns)
  1965. output.write(' <id>%s</id>\n' % id)
  1966. output.write(' <text>')
  1967. for line in page:
  1968. output.write(line)
  1969. output.write(' </text>\n')
  1970. output.write('</page>\n')
  1971. if page_count and page_count % 100000 == 0:
  1972. logging.info("Preprocessed %d pages", page_count)
  1973. if output_file:
  1974. output.close()
  1975. logging.info("Saved %d templates to '%s'", len(templates), output_file)
  1976. def pages_from(input):
  1977. """
  1978. Scans input extracting pages.
  1979. :return: (id, title, namespace, page), page is a list of lines.
  1980. """
  1981. # we collect individual lines, since str.join() is significantly faster
  1982. # than concatenation
  1983. page = []
  1984. id = None
  1985. ns = '0'
  1986. last_id = None
  1987. inText = False
  1988. redirect = False
  1989. for line in input:
  1990. line = line.decode('utf-8')
  1991. if '<' not in line: # faster than doing re.search()
  1992. if inText:
  1993. page.append(line)
  1994. continue
  1995. m = tagRE.search(line)
  1996. if not m:
  1997. continue
  1998. tag = m.group(2)
  1999. if tag == 'page':
  2000. page = []
  2001. redirect = False
  2002. elif tag == 'id' and not id: # skip nested <id>
  2003. id = m.group(3)
  2004. elif tag == 'title':
  2005. title = m.group(3)
  2006. elif tag == 'ns':
  2007. ns = m.group(3)
  2008. elif tag == 'redirect':
  2009. redirect = True
  2010. elif tag == 'text':
  2011. inText = True
  2012. line = line[m.start(3):m.end(3)]
  2013. page.append(line)
  2014. if m.lastindex == 4: # open-close
  2015. inText = False
  2016. elif tag == '/text':
  2017. if m.group(1):
  2018. page.append(m.group(1))
  2019. inText = False
  2020. elif inText:
  2021. page.append(line)
  2022. elif tag == '/page':
  2023. if id != last_id and not redirect:
  2024. yield (id, title, ns, page)
  2025. last_id = id
  2026. ns = '0'
  2027. id = None
  2028. page = []
  2029. def process_dump(input_file, template_file, out_file, file_size, file_compress,
  2030. process_count):
  2031. """
  2032. :param input_file: name of the wikipedia dump file; '-' to read from stdin
  2033. :param template_file: optional file with template definitions.
  2034. :param out_file: directory where to store extracted data, or '-' for stdout
  2035. :param file_size: max size of each extracted file, or None for no max (one file)
  2036. :param file_compress: whether to compress files with bzip.
  2037. :param process_count: number of extraction processes to spawn.
  2038. """
  2039. global urlbase
  2040. global knownNamespaces
  2041. global templateNamespace, templatePrefix
  2042. global moduleNamespace, modulePrefix
  2043. if input_file == '-':
  2044. input = sys.stdin
  2045. else:
  2046. input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
  2047. # collect siteinfo
  2048. for line in input:
  2049. line = line.decode('utf-8')
  2050. m = tagRE.search(line)
  2051. if not m:
  2052. continue
  2053. tag = m.group(2)
  2054. if tag == 'base':
  2055. # discover urlbase from the xml dump file
  2056. # /mediawiki/siteinfo/base
  2057. base = m.group(3)
  2058. urlbase = base[:base.rfind("/")]
  2059. elif tag == 'namespace':
  2060. knownNamespaces.add(m.group(3))
  2061. if re.search('key="10"', line):
  2062. templateNamespace = m.group(3)
  2063. templatePrefix = templateNamespace + ':'
  2064. elif re.search('key="828"', line):
  2065. moduleNamespace = m.group(3)
  2066. modulePrefix = moduleNamespace + ':'
  2067. elif tag == '/siteinfo':
  2068. break
  2069. if Extractor.expand_templates:
  2070. # preprocess
  2071. template_load_start = default_timer()
  2072. if template_file:
  2073. if os.path.exists(template_file):
  2074. logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
  2075. file = fileinput.FileInput(template_file, openhook=fileinput.hook_compressed)
  2076. load_templates(file)
  2077. file.close()
  2078. else:
  2079. if input_file == '-':
  2080. # can't scan then reset stdin; must error w/ suggestion to specify template_file
  2081. raise ValueError("to use templates with stdin dump, must supply explicit template-file")
  2082. logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
  2083. load_templates(input, template_file)
  2084. input.close()
  2085. input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
  2086. template_load_elapsed = default_timer() - template_load_start
  2087. logging.info("Loaded %d templates in %.1fs", len(templates), template_load_elapsed)
  2088. # process pages
  2089. logging.info("Starting page extraction from %s.", input_file)
  2090. extract_start = default_timer()
  2091. # Parallel Map/Reduce:
  2092. # - pages to be processed are dispatched to workers
  2093. # - a reduce process collects the results, sort them and print them.
  2094. maxsize = 10 * process_count
  2095. # output queue
  2096. output_queue = Queue(maxsize=maxsize)
  2097. if out_file == '-':
  2098. out_file = None
  2099. worker_count = max(1, process_count)
  2100. # load balancing
  2101. max_spool_length = 10000
  2102. spool_length = Value('i', 0, lock=False)
  2103. # reduce job that sorts and prints output
  2104. reduce = Process(target=reduce_process,
  2105. args=(output_queue, spool_length,
  2106. out_file, file_size, file_compress))
  2107. reduce.start()
  2108. # initialize jobs queue
  2109. jobs_queue = Queue(maxsize=maxsize)
  2110. # start worker processes
  2111. logging.info("Using %d extract processes.", worker_count)
  2112. workers = []
  2113. for i in xrange(worker_count):
  2114. extractor = Process(target=extract_process,
  2115. args=(i, jobs_queue, output_queue))
  2116. extractor.daemon = True # only live while parent process lives
  2117. extractor.start()
  2118. workers.append(extractor)
  2119. # Mapper process
  2120. page_num = 0
  2121. for page_data in pages_from(input):
  2122. id, title, ns, page = page_data
  2123. if ns not in templateKeys:
  2124. # slow down
  2125. delay = 0
  2126. if spool_length.value > max_spool_length:
  2127. # reduce to 10%
  2128. while spool_length.value > max_spool_length/10:
  2129. time.sleep(10)
  2130. delay += 10
  2131. if delay:
  2132. logging.info('Delay %ds', delay)
  2133. job = (id, title, page, page_num)
  2134. jobs_queue.put(job) # goes to any available extract_process
  2135. page_num += 1
  2136. page = None # free memory
  2137. input.close()
  2138. # signal termination
  2139. for _ in workers:
  2140. jobs_queue.put(None)
  2141. # wait for workers to terminate
  2142. for w in workers:
  2143. w.join()
  2144. # signal end of work to reduce process
  2145. output_queue.put(None)
  2146. # wait for it to finish
  2147. reduce.join()
  2148. extract_duration = default_timer() - extract_start
  2149. extract_rate = page_num / extract_duration
  2150. logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
  2151. process_count, page_num, extract_duration, extract_rate)
  2152. # ----------------------------------------------------------------------
  2153. # Multiprocess support
  2154. def extract_process(i, jobs_queue, output_queue):
  2155. """Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
  2156. :param i: process id.
  2157. :param jobs_queue: where to get jobs.
  2158. :param output_queue: where to queue extracted text for output.
  2159. """
  2160. out = StringIO() # memory buffer
  2161. while True:
  2162. job = jobs_queue.get() # job is (id, title, page, page_num)
  2163. if job:
  2164. id, title, page, page_num = job
  2165. try:
  2166. e = Extractor(*job[:3]) # (id, title, page)
  2167. page = None # free memory
  2168. e.extract(out)
  2169. text = out.getvalue()
  2170. except:
  2171. text = ''
  2172. logging.error('Processing page: %s %s', id, title)
  2173. output_queue.put((page_num, text))
  2174. out.truncate(0)
  2175. else:
  2176. logging.debug('Quit extractor')
  2177. break
  2178. out.close()
  2179. report_period = 10000 # progress report period
  2180. def reduce_process(output_queue, spool_length,
  2181. out_file=None, file_size=0, file_compress=True):
  2182. """Pull finished article text, write series of files (or stdout)
  2183. :param output_queue: text to be output.
  2184. :param spool_length: spool length.
  2185. :param out_file: filename where to print.
  2186. :param file_size: max file size.
  2187. :param file_compress: whether to compress output.
  2188. """
  2189. if out_file:
  2190. nextFile = NextFile(out_file)
  2191. output = OutputSplitter(nextFile, file_size, file_compress)
  2192. else:
  2193. output = sys.stdout
  2194. if file_compress:
  2195. logging.warn("writing to stdout, so no output compression (use an external tool)")
  2196. interval_start = default_timer()
  2197. # FIXME: use a heap
  2198. spool = {} # collected pages
  2199. next_page = 0 # sequence numbering of page
  2200. while True:
  2201. if next_page in spool:
  2202. output.write(spool.pop(next_page))
  2203. next_page += 1
  2204. # tell mapper our load:
  2205. spool_length.value = len(spool)
  2206. # progress report
  2207. if next_page % report_period == 0:
  2208. interval_rate = report_period / (default_timer() - interval_start)
  2209. logging.info("Extracted %d articles (%.1f art/s)",
  2210. next_page, interval_rate)
  2211. interval_start = default_timer()
  2212. else:
  2213. # mapper puts None to signal finish
  2214. pair = output_queue.get()
  2215. if not pair:
  2216. break
  2217. page_num, text = pair
  2218. spool[page_num] = text
  2219. # tell mapper our load:
  2220. spool_length.value = len(spool)
  2221. # FIXME: if an extractor dies, process stalls; the other processes
  2222. # continue to produce pairs, filling up memory.
  2223. if len(spool) > 200:
  2224. logging.debug('Collected %d, waiting: %d, %d', len(spool),
  2225. next_page, next_page == page_num)
  2226. if output != sys.stdout:
  2227. output.close()
  2228. # ----------------------------------------------------------------------
  2229. # Minimum size of output files
  2230. minFileSize = 200 * 1024
  2231. def main():
  2232. global urlbase, acceptedNamespaces
  2233. global templateCache, escape_doc
  2234. parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
  2235. formatter_class=argparse.RawDescriptionHelpFormatter,
  2236. description=__doc__)
  2237. parser.add_argument("input",
  2238. help="XML wiki dump file")
  2239. groupO = parser.add_argument_group('Output')
  2240. groupO.add_argument("-o", "--output", default="text",
  2241. help="directory for extracted files (or '-' for dumping to stdout)")
  2242. groupO.add_argument("-b", "--bytes", default="1M",
  2243. help="maximum bytes per output file (default %(default)s)",
  2244. metavar="n[KMG]")
  2245. groupO.add_argument("-c", "--compress", action="store_true",
  2246. help="compress output files using bzip")
  2247. groupP = parser.add_argument_group('Processing')
  2248. groupP.add_argument("--html", action="store_true",
  2249. help="produce HTML output, subsumes --links")
  2250. groupP.add_argument("-l", "--links", action="store_true",
  2251. help="preserve links")
  2252. groupP.add_argument("-s", "--sections", action="store_true",
  2253. help="preserve sections")
  2254. groupP.add_argument("--lists", action="store_true",
  2255. help="preserve lists")
  2256. groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
  2257. help="accepted namespaces")
  2258. groupP.add_argument("--templates",
  2259. help="use or create file containing templates")
  2260. groupP.add_argument("--no-templates", action="store_false",
  2261. help="Do not expand templates")
  2262. groupP.add_argument("--escapedoc", action="store_true",
  2263. help="use to escape the contents of the output <doc>...</doc>")
  2264. default_process_count = cpu_count() - 1
  2265. parser.add_argument("--processes", type=int, default=default_process_count,
  2266. help="Number of processes to use (default %(default)s)")
  2267. groupS = parser.add_argument_group('Special')
  2268. groupS.add_argument("-q", "--quiet", action="store_true",
  2269. help="suppress reporting progress info")
  2270. groupS.add_argument("--debug", action="store_true",
  2271. help="print debug info")
  2272. groupS.add_argument("-a", "--article", action="store_true",
  2273. help="analyze a file containing a single article (debug option)")
  2274. groupS.add_argument("-v", "--version", action="version",
  2275. version='%(prog)s ' + version,
  2276. help="print program version")
  2277. args = parser.parse_args()
  2278. Extractor.keepLinks = args.links
  2279. Extractor.keepSections = args.sections
  2280. Extractor.keepLists = args.lists
  2281. Extractor.toHTML = args.html
  2282. if args.html:
  2283. Extractor.keepLinks = True
  2284. Extractor.expand_templates = args.no_templates
  2285. escape_doc = args.escapedoc
  2286. try:
  2287. power = 'kmg'.find(args.bytes[-1].lower()) + 1
  2288. file_size = int(args.bytes[:-1]) * 1024 ** power
  2289. if file_size < minFileSize:
  2290. raise ValueError()
  2291. except ValueError:
  2292. logging.error('Insufficient or invalid size: %s', args.bytes)
  2293. return
  2294. if args.namespaces:
  2295. acceptedNamespaces = set(args.namespaces.split(','))
  2296. FORMAT = '%(levelname)s: %(message)s'
  2297. logging.basicConfig(format=FORMAT)
  2298. logger = logging.getLogger()
  2299. if not args.quiet:
  2300. logger.setLevel(logging.INFO)
  2301. if args.debug:
  2302. logger.setLevel(logging.DEBUG)
  2303. input_file = args.input
  2304. if not Extractor.keepLinks:
  2305. ignoreTag('a')
  2306. # sharing cache of parser templates is too slow:
  2307. # manager = Manager()
  2308. # templateCache = manager.dict()
  2309. if args.article:
  2310. if args.templates:
  2311. if os.path.exists(args.templates):
  2312. with open(args.templates) as file:
  2313. load_templates(file)
  2314. file = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
  2315. for page_data in pages_from(file):
  2316. id, title, ns, page = page_data
  2317. Extractor(id, title, page).extract(sys.stdout)
  2318. file.close()
  2319. return
  2320. output_path = args.output
  2321. if output_path != '-' and not os.path.isdir(output_path):
  2322. try:
  2323. os.makedirs(output_path)
  2324. except:
  2325. logging.error('Could not create: %s', output_path)
  2326. return
  2327. process_dump(input_file, args.templates, output_path, file_size,
  2328. args.compress, args.processes)
  2329. if __name__ == '__main__':
  2330. main()