You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

413 lines
17 KiB

4 years ago
  1. # cython: embedsignature=True
  2. # cython: profile=True
  3. # coding: utf8
  4. from __future__ import unicode_literals
  5. from collections import OrderedDict
  6. from cython.operator cimport dereference as deref
  7. from cython.operator cimport preincrement as preinc
  8. from cymem.cymem cimport Pool
  9. from preshed.maps cimport PreshMap
  10. import regex as re
  11. cimport cython
  12. from .tokens.doc cimport Doc
  13. from .strings cimport hash_string
  14. from .errors import Errors, Warnings, deprecation_warning
  15. from . import util
  16. cdef class Tokenizer:
  17. """Segment text, and create Doc objects with the discovered segment
  18. boundaries.
  19. """
  20. def __init__(self, Vocab vocab, rules=None, prefix_search=None,
  21. suffix_search=None, infix_finditer=None, token_match=None):
  22. """Create a `Tokenizer`, to create `Doc` objects given unicode text.
  23. vocab (Vocab): A storage container for lexical types.
  24. rules (dict): Exceptions and special-cases for the tokenizer.
  25. prefix_search (callable): A function matching the signature of
  26. `re.compile(string).search` to match prefixes.
  27. suffix_search (callable): A function matching the signature of
  28. `re.compile(string).search` to match suffixes.
  29. `infix_finditer` (callable): A function matching the signature of
  30. `re.compile(string).finditer` to find infixes.
  31. token_match (callable): A boolean function matching strings to be
  32. recognised as tokens.
  33. RETURNS (Tokenizer): The newly constructed object.
  34. EXAMPLE:
  35. >>> tokenizer = Tokenizer(nlp.vocab)
  36. >>> tokenizer = English().Defaults.create_tokenizer(nlp)
  37. """
  38. self.mem = Pool()
  39. self._cache = PreshMap()
  40. self._specials = PreshMap()
  41. self.token_match = token_match
  42. self.prefix_search = prefix_search
  43. self.suffix_search = suffix_search
  44. self.infix_finditer = infix_finditer
  45. self.vocab = vocab
  46. self._rules = {}
  47. if rules is not None:
  48. for chunk, substrings in sorted(rules.items()):
  49. self.add_special_case(chunk, substrings)
  50. def __reduce__(self):
  51. args = (self.vocab,
  52. self._rules,
  53. self.prefix_search,
  54. self.suffix_search,
  55. self.infix_finditer,
  56. self.token_match)
  57. return (self.__class__, args, None, None)
  58. cpdef Doc tokens_from_list(self, list strings):
  59. deprecation_warning(Warnings.W002)
  60. return Doc(self.vocab, words=strings)
  61. @cython.boundscheck(False)
  62. def __call__(self, unicode string):
  63. """Tokenize a string.
  64. string (unicode): The string to tokenize.
  65. RETURNS (Doc): A container for linguistic annotations.
  66. """
  67. if len(string) >= (2 ** 30):
  68. raise ValueError(Errors.E025.format(length=len(string)))
  69. cdef int length = len(string)
  70. cdef Doc doc = Doc(self.vocab)
  71. if length == 0:
  72. return doc
  73. cdef int i = 0
  74. cdef int start = 0
  75. cdef bint cache_hit
  76. cdef bint in_ws = string[0].isspace()
  77. cdef unicode span
  78. # The task here is much like string.split, but not quite
  79. # We find spans of whitespace and non-space characters, and ignore
  80. # spans that are exactly ' '. So, our sequences will all be separated
  81. # by either ' ' or nothing.
  82. for uc in string:
  83. if uc.isspace() != in_ws:
  84. if start < i:
  85. # When we want to make this fast, get the data buffer once
  86. # with PyUnicode_AS_DATA, and then maintain a start_byte
  87. # and end_byte, so we can call hash64 directly. That way
  88. # we don't have to create the slice when we hit the cache.
  89. span = string[start:i]
  90. key = hash_string(span)
  91. cache_hit = self._try_cache(key, doc)
  92. if not cache_hit:
  93. self._tokenize(doc, span, key)
  94. if uc == ' ':
  95. doc.c[doc.length - 1].spacy = True
  96. start = i + 1
  97. else:
  98. start = i
  99. in_ws = not in_ws
  100. i += 1
  101. if start < i:
  102. span = string[start:]
  103. key = hash_string(span)
  104. cache_hit = self._try_cache(key, doc)
  105. if not cache_hit:
  106. self._tokenize(doc, span, key)
  107. doc.c[doc.length - 1].spacy = string[-1] == ' ' and not in_ws
  108. return doc
  109. def pipe(self, texts, batch_size=1000, n_threads=2):
  110. """Tokenize a stream of texts.
  111. texts: A sequence of unicode texts.
  112. batch_size (int): Number of texts to accumulate in an internal buffer.
  113. n_threads (int): Number of threads to use, if the implementation
  114. supports multi-threading. The default tokenizer is single-threaded.
  115. YIELDS (Doc): A sequence of Doc objects, in order.
  116. """
  117. for text in texts:
  118. yield self(text)
  119. def _reset_cache(self, keys):
  120. for k in keys:
  121. del self._cache[k]
  122. cdef int _try_cache(self, hash_t key, Doc tokens) except -1:
  123. cached = <_Cached*>self._cache.get(key)
  124. if cached == NULL:
  125. return False
  126. cdef int i
  127. if cached.is_lex:
  128. for i in range(cached.length):
  129. tokens.push_back(cached.data.lexemes[i], False)
  130. else:
  131. for i in range(cached.length):
  132. tokens.push_back(&cached.data.tokens[i], False)
  133. return True
  134. cdef int _tokenize(self, Doc tokens, unicode span, hash_t orig_key) except -1:
  135. cdef vector[LexemeC*] prefixes
  136. cdef vector[LexemeC*] suffixes
  137. cdef int orig_size
  138. cdef int has_special
  139. orig_size = tokens.length
  140. span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes,
  141. &has_special)
  142. self._attach_tokens(tokens, span, &prefixes, &suffixes)
  143. self._save_cached(&tokens.c[orig_size], orig_key, has_special,
  144. tokens.length - orig_size)
  145. cdef unicode _split_affixes(self, Pool mem, unicode string,
  146. vector[const LexemeC*] *prefixes,
  147. vector[const LexemeC*] *suffixes,
  148. int* has_special):
  149. cdef size_t i
  150. cdef unicode prefix
  151. cdef unicode suffix
  152. cdef unicode minus_pre
  153. cdef unicode minus_suf
  154. cdef size_t last_size = 0
  155. while string and len(string) != last_size:
  156. if self.token_match and self.token_match(string):
  157. break
  158. last_size = len(string)
  159. pre_len = self.find_prefix(string)
  160. if pre_len != 0:
  161. prefix = string[:pre_len]
  162. minus_pre = string[pre_len:]
  163. # Check whether we've hit a special-case
  164. if minus_pre and self._specials.get(hash_string(minus_pre)) != NULL:
  165. string = minus_pre
  166. prefixes.push_back(self.vocab.get(mem, prefix))
  167. has_special[0] = 1
  168. break
  169. if self.token_match and self.token_match(string):
  170. break
  171. suf_len = self.find_suffix(string)
  172. if suf_len != 0:
  173. suffix = string[-suf_len:]
  174. minus_suf = string[:-suf_len]
  175. # Check whether we've hit a special-case
  176. if minus_suf and (self._specials.get(hash_string(minus_suf)) != NULL):
  177. string = minus_suf
  178. suffixes.push_back(self.vocab.get(mem, suffix))
  179. has_special[0] = 1
  180. break
  181. if pre_len and suf_len and (pre_len + suf_len) <= len(string):
  182. string = string[pre_len:-suf_len]
  183. prefixes.push_back(self.vocab.get(mem, prefix))
  184. suffixes.push_back(self.vocab.get(mem, suffix))
  185. elif pre_len:
  186. string = minus_pre
  187. prefixes.push_back(self.vocab.get(mem, prefix))
  188. elif suf_len:
  189. string = minus_suf
  190. suffixes.push_back(self.vocab.get(mem, suffix))
  191. if string and (self._specials.get(hash_string(string)) != NULL):
  192. has_special[0] = 1
  193. break
  194. return string
  195. cdef int _attach_tokens(self, Doc tokens, unicode string,
  196. vector[const LexemeC*] *prefixes,
  197. vector[const LexemeC*] *suffixes) except -1:
  198. cdef bint cache_hit
  199. cdef int split, end
  200. cdef const LexemeC* const* lexemes
  201. cdef const LexemeC* lexeme
  202. cdef unicode span
  203. cdef int i
  204. if prefixes.size():
  205. for i in range(prefixes.size()):
  206. tokens.push_back(prefixes[0][i], False)
  207. if string:
  208. cache_hit = self._try_cache(hash_string(string), tokens)
  209. if cache_hit:
  210. pass
  211. elif self.token_match and self.token_match(string):
  212. # We're always saying 'no' to spaces here -- the caller will
  213. # fix up the outermost one, with reference to the original.
  214. # See Issue #859
  215. tokens.push_back(self.vocab.get(tokens.mem, string), False)
  216. else:
  217. matches = self.find_infix(string)
  218. if not matches:
  219. tokens.push_back(self.vocab.get(tokens.mem, string), False)
  220. else:
  221. # let's say we have dyn-o-mite-dave - the regex finds the
  222. # start and end positions of the hyphens
  223. start = 0
  224. start_before_infixes = start
  225. for match in matches:
  226. infix_start = match.start()
  227. infix_end = match.end()
  228. if infix_start == start_before_infixes:
  229. continue
  230. if infix_start != start:
  231. span = string[start:infix_start]
  232. tokens.push_back(self.vocab.get(tokens.mem, span), False)
  233. if infix_start != infix_end:
  234. # If infix_start != infix_end, it means the infix
  235. # token is non-empty. Empty infix tokens are useful
  236. # for tokenization in some languages (see
  237. # https://github.com/explosion/spaCy/issues/768)
  238. infix_span = string[infix_start:infix_end]
  239. tokens.push_back(self.vocab.get(tokens.mem, infix_span), False)
  240. start = infix_end
  241. span = string[start:]
  242. if span:
  243. tokens.push_back(self.vocab.get(tokens.mem, span), False)
  244. cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin()
  245. while it != suffixes.rend():
  246. lexeme = deref(it)
  247. preinc(it)
  248. tokens.push_back(lexeme, False)
  249. cdef int _save_cached(self, const TokenC* tokens, hash_t key,
  250. int has_special, int n) except -1:
  251. cdef int i
  252. for i in range(n):
  253. if self.vocab._by_hash.get(tokens[i].lex.orth) == NULL:
  254. return 0
  255. # See https://github.com/explosion/spaCy/issues/1250
  256. if has_special:
  257. return 0
  258. cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached))
  259. cached.length = n
  260. cached.is_lex = True
  261. lexemes = <const LexemeC**>self.mem.alloc(n, sizeof(LexemeC**))
  262. for i in range(n):
  263. lexemes[i] = tokens[i].lex
  264. cached.data.lexemes = <const LexemeC* const*>lexemes
  265. self._cache.set(key, cached)
  266. def find_infix(self, unicode string):
  267. """Find internal split points of the string, such as hyphens.
  268. string (unicode): The string to segment.
  269. RETURNS (list): A list of `re.MatchObject` objects that have `.start()`
  270. and `.end()` methods, denoting the placement of internal segment
  271. separators, e.g. hyphens.
  272. """
  273. if self.infix_finditer is None:
  274. return 0
  275. return list(self.infix_finditer(string))
  276. def find_prefix(self, unicode string):
  277. """Find the length of a prefix that should be segmented from the
  278. string, or None if no prefix rules match.
  279. string (unicode): The string to segment.
  280. RETURNS (int): The length of the prefix if present, otherwise `None`.
  281. """
  282. if self.prefix_search is None:
  283. return 0
  284. match = self.prefix_search(string)
  285. return (match.end() - match.start()) if match is not None else 0
  286. def find_suffix(self, unicode string):
  287. """Find the length of a suffix that should be segmented from the
  288. string, or None if no suffix rules match.
  289. string (unicode): The string to segment.
  290. Returns (int): The length of the suffix if present, otherwise `None`.
  291. """
  292. if self.suffix_search is None:
  293. return 0
  294. match = self.suffix_search(string)
  295. return (match.end() - match.start()) if match is not None else 0
  296. def _load_special_tokenization(self, special_cases):
  297. """Add special-case tokenization rules."""
  298. for chunk, substrings in sorted(special_cases.items()):
  299. self.add_special_case(chunk, substrings)
  300. def add_special_case(self, unicode string, substrings):
  301. """Add a special-case tokenization rule.
  302. string (unicode): The string to specially tokenize.
  303. token_attrs (iterable): A sequence of dicts, where each dict describes
  304. a token and its attributes. The `ORTH` fields of the attributes
  305. must exactly match the string when they are concatenated.
  306. """
  307. substrings = list(substrings)
  308. cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached))
  309. cached.length = len(substrings)
  310. cached.is_lex = False
  311. cached.data.tokens = self.vocab.make_fused_token(substrings)
  312. key = hash_string(string)
  313. self._specials.set(key, cached)
  314. self._cache.set(key, cached)
  315. self._rules[string] = substrings
  316. def to_disk(self, path, **exclude):
  317. """Save the current state to a directory.
  318. path (unicode or Path): A path to a directory, which will be created if
  319. it doesn't exist. Paths may be either strings or Path-like objects.
  320. """
  321. with path.open('wb') as file_:
  322. file_.write(self.to_bytes(**exclude))
  323. def from_disk(self, path, **exclude):
  324. """Loads state from a directory. Modifies the object in place and
  325. returns it.
  326. path (unicode or Path): A path to a directory. Paths may be either
  327. strings or `Path`-like objects.
  328. RETURNS (Tokenizer): The modified `Tokenizer` object.
  329. """
  330. with path.open('rb') as file_:
  331. bytes_data = file_.read()
  332. self.from_bytes(bytes_data, **exclude)
  333. return self
  334. def to_bytes(self, **exclude):
  335. """Serialize the current state to a binary string.
  336. **exclude: Named attributes to prevent from being serialized.
  337. RETURNS (bytes): The serialized form of the `Tokenizer` object.
  338. """
  339. serializers = OrderedDict((
  340. ('vocab', lambda: self.vocab.to_bytes()),
  341. ('prefix_search', lambda: self.prefix_search.__self__.pattern),
  342. ('suffix_search', lambda: self.suffix_search.__self__.pattern),
  343. ('infix_finditer', lambda: self.infix_finditer.__self__.pattern),
  344. ('token_match', lambda: self.token_match.__self__.pattern),
  345. ('exceptions', lambda: OrderedDict(sorted(self._rules.items())))
  346. ))
  347. return util.to_bytes(serializers, exclude)
  348. def from_bytes(self, bytes_data, **exclude):
  349. """Load state from a binary string.
  350. bytes_data (bytes): The data to load from.
  351. **exclude: Named attributes to prevent from being loaded.
  352. RETURNS (Tokenizer): The `Tokenizer` object.
  353. """
  354. data = OrderedDict()
  355. deserializers = OrderedDict((
  356. ('vocab', lambda b: self.vocab.from_bytes(b)),
  357. ('prefix_search', lambda b: data.setdefault('prefix_search', b)),
  358. ('suffix_search', lambda b: data.setdefault('suffix_search', b)),
  359. ('infix_finditer', lambda b: data.setdefault('infix_finditer', b)),
  360. ('token_match', lambda b: data.setdefault('token_match', b)),
  361. ('exceptions', lambda b: data.setdefault('rules', b))
  362. ))
  363. msg = util.from_bytes(bytes_data, deserializers, exclude)
  364. if 'prefix_search' in data:
  365. self.prefix_search = re.compile(data['prefix_search']).search
  366. if 'suffix_search' in data:
  367. self.suffix_search = re.compile(data['suffix_search']).search
  368. if 'infix_finditer' in data:
  369. self.infix_finditer = re.compile(data['infix_finditer']).finditer
  370. if 'token_match' in data:
  371. self.token_match = re.compile(data['token_match']).search
  372. for string, substrings in data.get('rules', {}).items():
  373. self.add_special_case(string, substrings)
  374. return self