You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

679 lines
28 KiB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
  1. import spacy
  2. import nltk
  3. from nltk.stem.snowball import SnowballStemmer
  4. import hickle as hkl
  5. import FASTsearch
  6. stemmer = SnowballStemmer("german")
  7. class Passiv2Aktiv(object):
  8. def __init__(self, hklDatabaseDir_Aktiv, hklDatabaseDir_Vorgangspassiv, hklDatabaseDir_Zustandspassiv):
  9. if hklDatabaseDir_Aktiv is not None:
  10. self.AktivDB = hkl.load(hklDatabaseDir_Aktiv)
  11. if hklDatabaseDir_Vorgangspassiv is not None:
  12. self.VorgangspassivDB = hkl.load(hklDatabaseDir_Vorgangspassiv)
  13. if hklDatabaseDir_Zustandspassiv is not None:
  14. self.ZustandspassivDB = hkl.load(hklDatabaseDir_Zustandspassiv)
  15. #print('loading the german spacy model..')
  16. self.nlp = spacy.load('de_core_news_sm')
  17. #print('done')
  18. #print('loading the stemmer..')
  19. self.stemmer = SnowballStemmer("german")
  20. #print('done')
  21. return
  22. def create_hklDB_from_csv(self, csvDbDir, StemOrNot):
  23. with open(csvDbDir) as lines:
  24. self.DB_All = []
  25. for line in lines:
  26. #print(line)
  27. self.DB_All.append(list(eval(line)))
  28. self.hkldb1 = []
  29. self.hkldb2 = []
  30. counter = 0
  31. for n in range(len(self.DB_All)):
  32. counter += 1
  33. if counter % 1000 == 0:
  34. print(counter)
  35. self.hkldb1.append([self.DB_All[n][0][0]] )
  36. self.hkldb2.append([self.DB_All[n][1][0]] )
  37. print('creating the hkl dump of DBAll')
  38. hkl.dump(self.DB_All, 'hkldb_All' + csvDbDir[:-4] + '.hkl', mode='w', compression='lzf')
  39. #print('done..')
  40. print('Creating the hkl dump of DB 1')
  41. hkl.dump(self.hkldb1, 'hkldb1' + csvDbDir[:-4] + '.hkl', mode='w', compression='lzf')
  42. #print('done..')
  43. print('Creating the hkl dump of DB 2')
  44. hkl.dump(self.hkldb2, 'hkldb2' + csvDbDir[:-4] + '.hkl', mode='w', compression='lzf')
  45. #print('done..')
  46. return 'done'
  47. def load_DB_into_FASTsearch(self):
  48. #print('loading the hkldb_All databases..')
  49. self.hkldbAktiv_All = hkl.load('hkldb_AllAktiv.hkl')
  50. #print('first done')
  51. self.hkldbVorgangspassiv_All = hkl.load('hkldb_AllVorgangspassiv.hkl')
  52. #print('second done')
  53. self.hkldbZustandspassiv_All = hkl.load('hkldb_AllZustandspassiv.hkl')
  54. #print('third done')
  55. #print('loading hkldbIndi_Conju 1..')
  56. self.fsearchAktiv1 = FASTsearch.FASTsearch('hkldb1Aktiv.hkl')
  57. #print('done')
  58. #print('loading hkldbIndi_Conju 2..')
  59. self.fsearchAktiv2 = FASTsearch.FASTsearch('hkldb2Aktiv.hkl')
  60. #print('done')
  61. # generate bow model only necessary the first time
  62. #print('generating BoW Model 1..')
  63. #self.fsearchAktiv1.Gen_BoW_Model(20000, "word", punctuation = False)
  64. #print('done')
  65. #print('generating BoW Model 2..')
  66. #self.fsearchAktiv2.Gen_BoW_Model(20000, "word", punctuation = False)
  67. #print('done')
  68. #print('loading the bow model 1')
  69. self.fsearchAktiv1.Load_BoW_Model('bagofwordshkldb1Aktiv.pkl', 'DataBaseOneZeroshkldb1Aktiv.hkl')
  70. #print('done')
  71. #print('loading the bow model 2')
  72. self.fsearchAktiv2.Load_BoW_Model('bagofwordshkldb2Aktiv.pkl', 'DataBaseOneZeroshkldb2Aktiv.hkl')
  73. #print('done')
  74. #print('loading hkldbIndi_Conju 1..')
  75. self.fsearchVorgangspassiv1 = FASTsearch.FASTsearch('hkldb1Vorgangspassiv.hkl')
  76. #print('done')
  77. #print('loading hkldbIndi_Conju 2..')
  78. self.fsearchVorgangspassiv2 = FASTsearch.FASTsearch('hkldb2Vorgangspassiv.hkl')
  79. #print('done')
  80. # uncomment if models are not there
  81. #print('generating BoW Model 1..')
  82. #self.fsearchVorgangspassiv1.Gen_BoW_Model(20000, "word", punctuation = False)
  83. #print('done')
  84. #print('generating BoW Model 2..')
  85. #self.fsearchVorgangspassiv2.Gen_BoW_Model(20000, "word", punctuation = False)
  86. #print('done')
  87. #print('loading the bow model 1')
  88. self.fsearchVorgangspassiv1.Load_BoW_Model('bagofwordshkldb1Vorgangspassiv.pkl', 'DataBaseOneZeroshkldb1Vorgangspassiv.hkl')
  89. #print('done')
  90. #print('loading the bow model 2')
  91. self.fsearchVorgangspassiv2.Load_BoW_Model('bagofwordshkldb2Vorgangspassiv.pkl', 'DataBaseOneZeroshkldb2Vorgangspassiv.hkl')
  92. #print('done')
  93. #print('loading hkldbIndi_Conju 1..')
  94. self.fsearchZustandspassiv1 = FASTsearch.FASTsearch('hkldb1Zustandspassiv.hkl')
  95. #print('done')
  96. #print('loading hkldbIndi_Conju 2..')
  97. self.fsearchZustandspassiv2 = FASTsearch.FASTsearch('hkldb2Zustandspassiv.hkl')
  98. #print('done')
  99. #print('generating BoW Model 1..')
  100. #self.fsearchZustandspassiv1.Gen_BoW_Model(20000, "word", punctuation = False)
  101. #print('done')
  102. #print('generating BoW Model 2..')
  103. #self.fsearchZustandspassiv2.Gen_BoW_Model(20000, "word", punctuation = False)
  104. #print('done')
  105. #print('loading the bow model 1')
  106. self.fsearchZustandspassiv1.Load_BoW_Model('bagofwordshkldb1Zustandspassiv.pkl', 'DataBaseOneZeroshkldb1Zustandspassiv.hkl')
  107. #print('done')
  108. #print('loading the bow model 2')
  109. self.fsearchZustandspassiv2.Load_BoW_Model('bagofwordshkldb2Zustandspassiv.pkl', 'DataBaseOneZeroshkldb2Zustandspassiv.hkl')
  110. #print('done')
  111. import GS_Utils
  112. #print('initializing the gs utils..')
  113. self.gs = GS_Utils.GS_Utils('de_core_news_sm')
  114. #print('done')
  115. from SentGlue import SentGlueMach
  116. #print('loading the Stochastic Gradient models..')
  117. self.sgm = SentGlueMach('trainedSGD.pkl', 'bagofwords.pkl')
  118. #print('done')
  119. #print('initializing the SGM..')
  120. self.sgm.initialize()
  121. #print('done')
  122. #print('oi thats the get_feature_names', self.fsearch1.vectorizer.get_feature_names())
  123. #print('oi thats the get_feature_names', self.fsearch2.vectorizer.get_feature_names())
  124. def replacePassivForms(self,sentences):
  125. endsentences = []
  126. sentencecount = 0
  127. for sentence in sentences:
  128. try:
  129. sentencecount += 1
  130. #print('processing sentence', sentencecount)
  131. doc = self.nlp(' '.join(sentence))
  132. verbs_of_sentence = []
  133. wordindex_to_replace = []
  134. count = 0
  135. subjectofsentence = []
  136. subjectindex = []
  137. erindex = []
  138. Erindex = []
  139. undindex = []
  140. for word in doc:
  141. count += 1
  142. #print(word.text)
  143. #print(word.dep_)
  144. if word.dep_ == 'sb':
  145. #print('oi')
  146. subjectofsentence.append(word.text)
  147. subjectindex.append(count)
  148. if word.text == 'er':
  149. erindex.append(count)
  150. if word.text == 'Er':
  151. Erindex.append(count)
  152. if word.text == 'und':
  153. undindex.append(count)
  154. if word.tag_[0] == 'V':
  155. verbs_of_sentence.append(word.text)
  156. wordindex_to_replace.append(count)
  157. if len(verbs_of_sentence) == 1 and verbs_of_sentence[0] == ('wurde' or 'wird' or 'werden' or 'wirst' or 'werde' or 'war'):
  158. verbs_of_sentence[0] = 'bliblablubdudidu'
  159. verbs_of_sentence_string = ' '.join(verbs_of_sentence)
  160. length_verbs_of_sentence_string = len(verbs_of_sentence_string)
  161. verbs_of_sentence_string += ' ' + str(length_verbs_of_sentence_string)
  162. #print(verbs_of_sentence_string)
  163. bestmatchesZustandspassiv1, matchindexZustandspassiv1 = self.fsearchZustandspassiv1.search_with_highest_multiplikation_Output(verbs_of_sentence_string, 1)
  164. bestmatchesVorgangspassiv1, matchindexVorgangspassiv1 = self.fsearchVorgangspassiv1.search_with_highest_multiplikation_Output(verbs_of_sentence_string, 1)
  165. #print('verbs of sentence string', verbs_of_sentence_string)
  166. #print(len(verbs_of_sentence))
  167. #print(matchindexVorgangspassiv1)
  168. #print(matchindexZustandspassiv1)
  169. vorgangORnot = 0
  170. zustandORnot = 0
  171. if (len(verbs_of_sentence) + 1) == matchindexVorgangspassiv1[1]:
  172. workindex = matchindexVorgangspassiv1[0]
  173. vorgangORnot = 1
  174. if (len(verbs_of_sentence) + 1) == matchindexZustandspassiv1[1]:
  175. workindex = matchindexZustandspassiv1[0]
  176. zustandORnot = 1
  177. #print(workindex)
  178. #print(self.hkldbAktiv_All[matchindexVorgangspassiv1[0]])
  179. #print(self.hkldbVorgangspassiv_All[matchindexVorgangspassiv1[0]])
  180. #print(self.hkldbZustandspassiv_All[matchindexZustandspassiv1[0]])
  181. formToReplace = []
  182. if vorgangORnot == 1:
  183. completeform = self.hkldbVorgangspassiv_All[workindex]
  184. if len(verbs_of_sentence_string.split()) != len(completeform[0][0].split()):
  185. vorgangORnot = 0
  186. if vorgangORnot == 1:
  187. completeform = self.hkldbVorgangspassiv_All[workindex]
  188. formToReplace = self.hkldbVorgangspassiv_All[workindex][1][0].split()[-2:]
  189. #print('formtoreplace vorgang',formToReplace)
  190. #print('complete form', completeform)
  191. formToReplace = '3. Person Singular ' + ' '.join(formToReplace)
  192. #print(formToReplace)
  193. thrdPersonAktivindex = self.fsearchAktiv2.search_with_highest_multiplikation_Output(formToReplace, 1)[0]
  194. thrdPersonAktiv = self.hkldbAktiv_All[thrdPersonAktivindex[0]][0][0].split()[:-1]
  195. #print(thrdPersonAktiv)
  196. thrdPersonAktiv = ' '.join(thrdPersonAktiv)
  197. dalist = verbs_of_sentence_string.split()[:-1]
  198. for verb in dalist:
  199. #print(sentence)
  200. #print(index)
  201. sentence.remove(verb)
  202. thereisasubjectEr = 0
  203. for index in subjectindex:
  204. for ind in undindex:
  205. if index - 1 == ind:
  206. if index - 2 == ('er' or 'Er'):
  207. thereisasubjectEr = 1
  208. if index + 1 == ind:
  209. if index + 2 == 'er' or index + 2 == 'Er':
  210. thereisasubjectEr = 1
  211. #print('subjectofsentence', subjectofsentence)
  212. thereisasubjectich = 0
  213. thereisasubjectdu = 0
  214. thereisasubjectihr = 0
  215. thereisasubjectwir = 0
  216. for word in subjectofsentence:
  217. if word == 'er' or word == 'Er':
  218. thereisasubjectEr = 1
  219. if word == 'ich':
  220. thereisasubjectich = 1
  221. if word == 'du':
  222. thereisasubjectdu = 1
  223. if word == 'ihr':
  224. thereisasubjectihr = 1
  225. if word == 'wir':
  226. thereisasubjectwir = 1
  227. #print('there is a subjecter', thereisasubjectEr)
  228. if thereisasubjectEr == 1:
  229. try:
  230. sentence.remove('Er')
  231. except:
  232. sentence.remove('er')
  233. sentence.append('ihn')
  234. if thereisasubjectich == 1:
  235. sentence.remove('ich')
  236. sentence.append('mich')
  237. if thereisasubjectdu == 1:
  238. sentence.remove('du')
  239. sentence.append('dich')
  240. if thereisasubjectihr == 1:
  241. sentence.remove('ihr')
  242. sentence.append('euch')
  243. if thereisasubjectwir == 1:
  244. sentence.remove('wir')
  245. sentence.append('uns')
  246. sentence.append(thrdPersonAktiv)
  247. #print('sentence in the vorgangornot', sentence)
  248. jemandornot = 1
  249. wordstodelete = []
  250. for n in range(len(sentence) - 1):
  251. if sentence[n] == 'von':
  252. if sentence[n + 1] == 'ihr':
  253. sentence[n + 1] = 'sie'
  254. wordstodelete.append(n)
  255. jemandornot = 0
  256. if sentence[n + 1] == 'ihm':
  257. sentence[n + 1] = 'er'
  258. wordstodelete.append(n)
  259. jemandornot = 0
  260. import spacy
  261. nlp = spacy.load('de_core_news_sm')
  262. token1 = nlp(sentence[n - 1])
  263. token2 = nlp(sentence[n + 1])
  264. for word in token1:
  265. if word.tag_ != 'NN' and word.tag_ != 'NE':
  266. for word in token2:
  267. if word.tag_ == 'NN' or word.tag_ == 'NE':
  268. wordstodelete.append(n)
  269. jemandornot = 0
  270. if sentence[n + 1] == 'dem' or sentence[n + 1] == 'einem':
  271. token3 = nlp(sentence[n-1])
  272. for word in token3:
  273. if word.tag_ != 'NN' and word.tag_ != 'NE':
  274. sentence[n + 1] = 'ein'
  275. wordstodelete.append(n)
  276. jemandornot = 0
  277. if sentence[n + 1] == 'der' or sentence[n + 1] == 'einer':
  278. token4 = nlp(sentence[n-1])
  279. for word in token4:
  280. if word.tag_ != 'NN' and word.tag_ != 'NE':
  281. sentence[n + 1] = 'eine'
  282. wordstodelete.append(n)
  283. jemandornot = 0
  284. if sentence[n] == 'vom':
  285. sentence[n] = 'ein'
  286. jemandornot = 0
  287. for index in wordstodelete[::-1]:
  288. del sentence[index]
  289. if jemandornot == 1:
  290. sentence.append('jemand')
  291. #print('sentence checkpoint 2', sentence)
  292. #print('get the tuples and triples to check..')
  293. tuplesTocheck, triplesTocheck, quadruplesToCheck = self.gs.GetTuplesinSentence(sentence)
  294. #print('done')
  295. #print(tuplesTocheck, triplesTocheck)
  296. grammpiecessentence = self.gs.createTupleofGrammarpieces( sentence, tuplesTocheck, triplesTocheck, quadruplesToCheck)
  297. if len(grammpiecessentence) > 7:
  298. print('A sentence is too long, too many permutations. \n piping wrong grammar..')
  299. endsentences.append(' '.join(grammpiecessentence).split())
  300. else:
  301. #print('the grammpiecessentence', grammpiecessentence)
  302. #print('genrating the permutations')
  303. permutations = self.sgm.GeneratePermutationsOfSentence(grammpiecessentence)
  304. #print('done')
  305. #print(permutations)
  306. #if (len(tuplesTocheck) != 0) or (len(triplesTocheck) != 0):
  307. # print('filtering the permutations based on the tuples and triples..')
  308. # filteredpermutations = self.gs.filterpermutationsaccordingtotuples(permutations, tuplesTocheck, triplesTocheck)
  309. # print('done')
  310. #else:
  311. # print('there are no triples or tuples to check..')
  312. # filteredpermutations = permutations
  313. sentencesToCheck = []
  314. for sentence in permutations:
  315. sentencesToCheck.append(' '.join(sentence))
  316. #print('sentencesToCheck', sentencesToCheck)
  317. #print('classifying the probability for right grammar in the filtered permutations..')
  318. #print(' '.join(sentence))
  319. endsentence = self.sgm.GetBestSentenceFromSentencesAccordingToGrammar(sentencesToCheck, ' '.join(sentence))
  320. #print('done')
  321. #print('the endsentence', endsentence)
  322. endsentences.append(endsentence.split())
  323. #count1 = 0
  324. #print(subjectindex)
  325. #subjectindex = subjectindex[0]
  326. #if subjectindex != 0:
  327. #for word in sentence[subjectindex - 1:subjectindex + 1]:
  328. #count1 += 1
  329. #if word == 'und':
  330. #thereIsanUnd = count1
  331. #if subjectindex == 0:
  332. #for word in sentence[subjectindex:subjectindex + 1]:
  333. #count1 += 1
  334. #if word == 'und':
  335. #thereIsanUnd = count1
  336. #thereisanEr = 0
  337. #if sentence[subjectindex - 1 + thereIsanUnd] == 'er' or sentence[subjectindex - 1 + thereIsanUnd] == 'Er':
  338. #thereisanEr = 1
  339. #if thereisanEr == 1:
  340. #sentence.remove('Er')
  341. #sentence.remove('er')
  342. #sentence.append('ihn')
  343. #print('zustandornot',zustandORnot)
  344. #print('vorgang', vorgangORnot)
  345. if zustandORnot == 1:
  346. completeform = self.hkldbZustandspassiv_All[workindex]
  347. if len(verbs_of_sentence_string.split()) != len(completeform[0][0].split()):
  348. zustandORnot = 0
  349. if zustandORnot == 1:
  350. #completeform = self.hkldbZustandspassiv_All[workindex]
  351. formToReplace = self.hkldbZustandspassiv_All[workindex][1][0].split()[-2:]
  352. formToReplace = '3. Person Singular ' + ' '.join(formToReplace)
  353. #print('formtoreplace zustand',formToReplace)
  354. #print('complete form', completeform)
  355. thrdPersonAktivindex = self.fsearchAktiv2.search_with_highest_multiplikation_Output(formToReplace, 1)[0]
  356. thrdPersonAktiv = self.hkldbAktiv_All[thrdPersonAktivindex[0]][0][0].split()[:-1]
  357. thrdPersonAktiv = ' '.join(thrdPersonAktiv)
  358. for verb in verbs_of_sentence_string.split()[:-1]:
  359. #print(sentence)
  360. #print(index)
  361. sentence.remove(verb)
  362. thereisasubjectEr = 0
  363. for index in subjectindex:
  364. for ind in undindex:
  365. if index - 1 == ind:
  366. if index - 2 == ('er' or 'Er'):
  367. thereisasubjectEr = 1
  368. if index + 1 == ind:
  369. if index + 2 == 'er' or index + 2 == 'Er':
  370. thereisasubjectEr = 1
  371. #print('subjectofsentence', subjectofsentence)
  372. thereisasubjectich = 0
  373. thereisasubjectdu = 0
  374. thereisasubjectihr = 0
  375. thereisasubjectwir = 0
  376. for word in subjectofsentence:
  377. if word == 'er' or word == 'Er':
  378. thereisasubjectEr = 1
  379. if word == 'ich':
  380. thereisasubjectich = 1
  381. if word == 'du':
  382. thereisasubjectdu = 1
  383. if word == 'ihr':
  384. thereisasubjectihr = 1
  385. if word == 'wir':
  386. thereisasubjectwir = 1
  387. if thereisasubjectEr == 1:
  388. try:
  389. sentence.remove('Er')
  390. except:
  391. sentence.remove('er')
  392. sentence.append('ihn')
  393. if thereisasubjectich == 1:
  394. sentence.remove('ich')
  395. sentence.append('mich')
  396. if thereisasubjectdu == 1:
  397. sentence.remove('du')
  398. sentence.append('dich')
  399. if thereisasubjectihr == 1:
  400. sentence.remove('ihr')
  401. sentence.append('euch')
  402. if thereisasubjectwir == 1:
  403. sentence.remove('wir')
  404. sentence.append('uns')
  405. sentence.append(thrdPersonAktiv)
  406. jemandornot = 1
  407. wordstodelete = []
  408. for n in range(len(sentence) - 1):
  409. if sentence[n] == 'von':
  410. if sentence[n + 1] == 'ihr':
  411. sentence[n + 1] = 'sie'
  412. wordstodelete.append(n)
  413. jemandornot = 0
  414. if sentence[n + 1] == 'ihm':
  415. sentence[n + 1] = 'er'
  416. wordstodelete.append(n)
  417. jemandornot = 0
  418. import spacy
  419. nlp = spacy.load('de_core_news_sm')
  420. token1 = nlp(sentence[n - 1])
  421. token2 = nlp(sentence[n + 1])
  422. for word in token1:
  423. if word.tag_ != 'NN' and word.tag_ != 'NE':
  424. for word in token2:
  425. if word.tag_ == 'NN' or word.tag_ == 'NE':
  426. wordstodelete.append(n)
  427. jemandornot = 0
  428. if sentence[n + 1] == 'dem' or sentence[n + 1] == 'einem':
  429. token3 = nlp(sentence[n-1])
  430. for word in token3:
  431. if word.tag_ != 'NN' and word.tag_ != 'NE':
  432. sentence[n + 1] = 'ein'
  433. wordstodelete.append(n)
  434. jemandornot = 0
  435. if sentence[n + 1] == 'der' or sentence[n + 1] == 'einer':
  436. token4 = nlp(sentence[n-1])
  437. for word in token4:
  438. if word.tag_ != 'NN' and word.tag_ != 'NE':
  439. sentence[n + 1] = 'eine'
  440. wordstodelete.append(n)
  441. jemandornot = 0
  442. if sentence[n] == 'vom':
  443. sentence[n] = 'ein'
  444. jemandornot = 0
  445. for index in wordstodelete[::-1]:
  446. del sentence[index]
  447. if jemandornot == 1:
  448. sentence.append('jemand')
  449. #print(sentence)
  450. #print('get the tuples and triples to check..')
  451. tuplesTocheck, triplesTocheck, quadruplesTocheck = self.gs.GetTuplesinSentence(sentence)
  452. #print('done')
  453. #print(tuplesTocheck, triplesTocheck)
  454. grammpiecessentence = self.gs.createTupleofGrammarpieces( sentence, tuplesTocheck, triplesTocheck, quadruplesTocheck)
  455. if len(grammpiecessentence) > 7:
  456. print('A sentence is too long, too many permutations. \n piping wrong grammar..')
  457. endsentences.append(' '.join(grammpiecessentence).split())
  458. else:
  459. #print('the grammpiecessentence', grammpiecessentence)
  460. #print('genrating the permutations')
  461. permutations = self.sgm.GeneratePermutationsOfSentence(grammpiecessentence)
  462. #print('done')
  463. #print(permutations)
  464. #if (len(tuplesTocheck) != 0) or (len(triplesTocheck) != 0):
  465. # print('filtering the permutations based on the tuples and triples..')
  466. # filteredpermutations = self.gs.filterpermutationsaccordingtotuples(permutations, tuplesTocheck, triplesTocheck)
  467. # print('done')
  468. #else:
  469. # print('there are no triples or tuples to check..')
  470. # filteredpermutations = permutations
  471. sentencesToCheck = []
  472. for sentence in permutations:
  473. sentencesToCheck.append(' '.join(sentence))
  474. #print('sentencesToCheck', sentencesToCheck)
  475. #print('classifying the probability for right grammar in the filtered permutations..')
  476. #print(' '.join(sentence))
  477. endsentence = self.sgm.GetBestSentenceFromSentencesAccordingToGrammar(sentencesToCheck, ' '.join(sentence))
  478. #print('done')
  479. #print('the endsentence', endsentence)
  480. endsentences.append(endsentence.split())
  481. if zustandORnot == 0 and vorgangORnot == 0:
  482. #print('it is coming to the else')
  483. endsentences.append(sentence)
  484. except:
  485. print('the sentence ' + str(sentence) + ' caused an error in the module passive2active')
  486. if endsentences[-1] == sentence:
  487. pass
  488. else:
  489. endsentences.append(sentence)
  490. return endsentences
  491. # Vorgangspassiv wird auf selbe Zeit gemappt, 3. Person Singular.
  492. # Zustandspassiv: Immer eine Zeit dahinter. D.h.
  493. # Präsens => Präteritum, Präteritum => Perfekt