You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

673 lines
27 KiB

4 years ago
  1. import spacy
  2. import nltk
  3. from nltk.stem.snowball import SnowballStemmer
  4. import hickle as hkl
  5. import FASTsearch
  6. stemmer = SnowballStemmer("german")
  7. class Passiv2Aktiv(object):
  8. def __init__(self, hklDatabaseDir_Aktiv, hklDatabaseDir_Vorgangspassiv, hklDatabaseDir_Zustandspassiv):
  9. if hklDatabaseDir_Aktiv is not None:
  10. self.AktivDB = hkl.load(hklDatabaseDir_Aktiv)
  11. if hklDatabaseDir_Vorgangspassiv is not None:
  12. self.VorgangspassivDB = hkl.load(hklDatabaseDir_Vorgangspassiv)
  13. if hklDatabaseDir_Zustandspassiv is not None:
  14. self.ZustandspassivDB = hkl.load(hklDatabaseDir_Zustandspassiv)
  15. #print('loading the german spacy model..')
  16. self.nlp = spacy.load('de_core_news_sm')
  17. #print('done')
  18. #print('loading the stemmer..')
  19. self.stemmer = SnowballStemmer("german")
  20. #print('done')
  21. return
  22. def create_hklDB_from_csv(self, csvDbDir, StemOrNot):
  23. with open(csvDbDir) as lines:
  24. self.DB_All = []
  25. for line in lines:
  26. #print(line)
  27. self.DB_All.append(list(eval(line)))
  28. self.hkldb1 = []
  29. self.hkldb2 = []
  30. counter = 0
  31. for n in range(len(self.DB_All)):
  32. counter += 1
  33. if counter % 1000 == 0:
  34. print(counter)
  35. self.hkldb1.append([self.DB_All[n][0][0]] )
  36. self.hkldb2.append([self.DB_All[n][1][0]] )
  37. print('creating the hkl dump of DBAll')
  38. hkl.dump(self.DB_All, 'hkldb_All' + csvDbDir[:-4] + '.hkl', mode='w', compression='lzf')
  39. #print('done..')
  40. print('Creating the hkl dump of DB 1')
  41. hkl.dump(self.hkldb1, 'hkldb1' + csvDbDir[:-4] + '.hkl', mode='w', compression='lzf')
  42. #print('done..')
  43. print('Creating the hkl dump of DB 2')
  44. hkl.dump(self.hkldb2, 'hkldb2' + csvDbDir[:-4] + '.hkl', mode='w', compression='lzf')
  45. #print('done..')
  46. return 'done'
  47. def load_DB_into_FASTsearch(self):
  48. #print('loading the hkldb_All databases..')
  49. self.hkldbAktiv_All = hkl.load('hkldb_AllAktiv.hkl')
  50. #print('first done')
  51. self.hkldbVorgangspassiv_All = hkl.load('hkldb_AllVorgangspassiv.hkl')
  52. #print('second done')
  53. self.hkldbZustandspassiv_All = hkl.load('hkldb_AllZustandspassiv.hkl')
  54. #print('third done')
  55. #print('loading hkldbIndi_Conju 1..')
  56. self.fsearchAktiv1 = FASTsearch.FASTsearch('hkldb1Aktiv.hkl')
  57. #print('done')
  58. #print('loading hkldbIndi_Conju 2..')
  59. self.fsearchAktiv2 = FASTsearch.FASTsearch('hkldb2Aktiv.hkl')
  60. #print('done')
  61. # generate bow model only necessary the first time
  62. #print('generating BoW Model 1..')
  63. #self.fsearchAktiv1.Gen_BoW_Model(20000, "word", punctuation = False)
  64. #print('done')
  65. #print('generating BoW Model 2..')
  66. #self.fsearchAktiv2.Gen_BoW_Model(20000, "word", punctuation = False)
  67. #print('done')
  68. #print('loading the bow model 1')
  69. self.fsearchAktiv1.Load_BoW_Model('bagofwordshkldb1Aktiv.pkl', 'DataBaseOneZeroshkldb1Aktiv.hkl')
  70. #print('done')
  71. #print('loading the bow model 2')
  72. self.fsearchAktiv2.Load_BoW_Model('bagofwordshkldb2Aktiv.pkl', 'DataBaseOneZeroshkldb2Aktiv.hkl')
  73. #print('done')
  74. #print('loading hkldbIndi_Conju 1..')
  75. self.fsearchVorgangspassiv1 = FASTsearch.FASTsearch('hkldb1Vorgangspassiv.hkl')
  76. #print('done')
  77. #print('loading hkldbIndi_Conju 2..')
  78. self.fsearchVorgangspassiv2 = FASTsearch.FASTsearch('hkldb2Vorgangspassiv.hkl')
  79. #print('done')
  80. # uncomment if models are not there
  81. #print('generating BoW Model 1..')
  82. #self.fsearchVorgangspassiv1.Gen_BoW_Model(20000, "word", punctuation = False)
  83. #print('done')
  84. #print('generating BoW Model 2..')
  85. #self.fsearchVorgangspassiv2.Gen_BoW_Model(20000, "word", punctuation = False)
  86. #print('done')
  87. #print('loading the bow model 1')
  88. self.fsearchVorgangspassiv1.Load_BoW_Model('bagofwordshkldb1Vorgangspassiv.pkl', 'DataBaseOneZeroshkldb1Vorgangspassiv.hkl')
  89. #print('done')
  90. #print('loading the bow model 2')
  91. self.fsearchVorgangspassiv2.Load_BoW_Model('bagofwordshkldb2Vorgangspassiv.pkl', 'DataBaseOneZeroshkldb2Vorgangspassiv.hkl')
  92. #print('done')
  93. #print('loading hkldbIndi_Conju 1..')
  94. self.fsearchZustandspassiv1 = FASTsearch.FASTsearch('hkldb1Zustandspassiv.hkl')
  95. #print('done')
  96. #print('loading hkldbIndi_Conju 2..')
  97. self.fsearchZustandspassiv2 = FASTsearch.FASTsearch('hkldb2Zustandspassiv.hkl')
  98. #print('done')
  99. #print('generating BoW Model 1..')
  100. #self.fsearchZustandspassiv1.Gen_BoW_Model(20000, "word", punctuation = False)
  101. #print('done')
  102. #print('generating BoW Model 2..')
  103. #self.fsearchZustandspassiv2.Gen_BoW_Model(20000, "word", punctuation = False)
  104. #print('done')
  105. #print('loading the bow model 1')
  106. self.fsearchZustandspassiv1.Load_BoW_Model('bagofwordshkldb1Zustandspassiv.pkl', 'DataBaseOneZeroshkldb1Zustandspassiv.hkl')
  107. #print('done')
  108. #print('loading the bow model 2')
  109. self.fsearchZustandspassiv2.Load_BoW_Model('bagofwordshkldb2Zustandspassiv.pkl', 'DataBaseOneZeroshkldb2Zustandspassiv.hkl')
  110. #print('done')
  111. import GS_Utils
  112. #print('initializing the gs utils..')
  113. self.gs = GS_Utils.GS_Utils('de_core_news_sm')
  114. #print('done')
  115. from SentGlue import SentGlueMach
  116. #print('loading the Stochastic Gradient models..')
  117. self.sgm = SentGlueMach('trainedSGD.pkl', 'bagofwords.pkl')
  118. #print('done')
  119. #print('initializing the SGM..')
  120. self.sgm.initialize()
  121. #print('done')
  122. #print('oi thats the get_feature_names', self.fsearch1.vectorizer.get_feature_names())
  123. #print('oi thats the get_feature_names', self.fsearch2.vectorizer.get_feature_names())
  124. def replacePassivForms(self,sentences):
  125. endsentences = []
  126. sentencecount = 0
  127. for sentence in sentences:
  128. sentencecount += 1
  129. #print('processing sentence', sentencecount)
  130. doc = self.nlp(' '.join(sentence))
  131. verbs_of_sentence = []
  132. wordindex_to_replace = []
  133. count = 0
  134. subjectofsentence = []
  135. subjectindex = []
  136. erindex = []
  137. Erindex = []
  138. undindex = []
  139. for word in doc:
  140. count += 1
  141. #print(word.text)
  142. #print(word.dep_)
  143. if word.dep_ == 'sb':
  144. #print('oi')
  145. subjectofsentence.append(word.text)
  146. subjectindex.append(count)
  147. if word.text == 'er':
  148. erindex.append(count)
  149. if word.text == 'Er':
  150. Erindex.append(count)
  151. if word.text == 'und':
  152. undindex.append(count)
  153. if word.tag_[0] == 'V':
  154. verbs_of_sentence.append(word.text)
  155. wordindex_to_replace.append(count)
  156. if len(verbs_of_sentence) == 1 and verbs_of_sentence[0] == ('wurde' or 'wird' or 'werden' or 'wirst' or 'werde' or 'war'):
  157. verbs_of_sentence[0] = 'bliblablubdudidu'
  158. verbs_of_sentence_string = ' '.join(verbs_of_sentence)
  159. length_verbs_of_sentence_string = len(verbs_of_sentence_string)
  160. verbs_of_sentence_string += ' ' + str(length_verbs_of_sentence_string)
  161. #print(verbs_of_sentence_string)
  162. bestmatchesZustandspassiv1, matchindexZustandspassiv1 = self.fsearchZustandspassiv1.search_with_highest_multiplikation_Output(verbs_of_sentence_string, 1)
  163. bestmatchesVorgangspassiv1, matchindexVorgangspassiv1 = self.fsearchVorgangspassiv1.search_with_highest_multiplikation_Output(verbs_of_sentence_string, 1)
  164. #print('verbs of sentence string', verbs_of_sentence_string)
  165. #print(len(verbs_of_sentence))
  166. #print(matchindexVorgangspassiv1)
  167. #print(matchindexZustandspassiv1)
  168. vorgangORnot = 0
  169. zustandORnot = 0
  170. if (len(verbs_of_sentence) + 1) == matchindexVorgangspassiv1[1]:
  171. workindex = matchindexVorgangspassiv1[0]
  172. vorgangORnot = 1
  173. if (len(verbs_of_sentence) + 1) == matchindexZustandspassiv1[1]:
  174. workindex = matchindexZustandspassiv1[0]
  175. zustandORnot = 1
  176. #print(workindex)
  177. #print(self.hkldbAktiv_All[matchindexVorgangspassiv1[0]])
  178. #print(self.hkldbVorgangspassiv_All[matchindexVorgangspassiv1[0]])
  179. #print(self.hkldbZustandspassiv_All[matchindexZustandspassiv1[0]])
  180. formToReplace = []
  181. if vorgangORnot == 1:
  182. completeform = self.hkldbVorgangspassiv_All[workindex]
  183. if len(verbs_of_sentence_string.split()) != len(completeform[0][0].split()):
  184. vorgangORnot = 0
  185. if vorgangORnot == 1:
  186. completeform = self.hkldbVorgangspassiv_All[workindex]
  187. formToReplace = self.hkldbVorgangspassiv_All[workindex][1][0].split()[-2:]
  188. #print('formtoreplace vorgang',formToReplace)
  189. #print('complete form', completeform)
  190. formToReplace = '3. Person Singular ' + ' '.join(formToReplace)
  191. #print(formToReplace)
  192. thrdPersonAktivindex = self.fsearchAktiv2.search_with_highest_multiplikation_Output(formToReplace, 1)[0]
  193. thrdPersonAktiv = self.hkldbAktiv_All[thrdPersonAktivindex[0]][0][0].split()[:-1]
  194. #print(thrdPersonAktiv)
  195. thrdPersonAktiv = ' '.join(thrdPersonAktiv)
  196. dalist = verbs_of_sentence_string.split()[:-1]
  197. for verb in dalist:
  198. #print(sentence)
  199. #print(index)
  200. sentence.remove(verb)
  201. thereisasubjectEr = 0
  202. for index in subjectindex:
  203. for ind in undindex:
  204. if index - 1 == ind:
  205. if index - 2 == ('er' or 'Er'):
  206. thereisasubjectEr = 1
  207. if index + 1 == ind:
  208. if index + 2 == 'er' or index + 2 == 'Er':
  209. thereisasubjectEr = 1
  210. #print('subjectofsentence', subjectofsentence)
  211. thereisasubjectich = 0
  212. thereisasubjectdu = 0
  213. thereisasubjectihr = 0
  214. thereisasubjectwir = 0
  215. for word in subjectofsentence:
  216. if word == 'er' or word == 'Er':
  217. thereisasubjectEr = 1
  218. if word == 'ich':
  219. thereisasubjectich = 1
  220. if word == 'du':
  221. thereisasubjectdu = 1
  222. if word == 'ihr':
  223. thereisasubjectihr = 1
  224. if word == 'wir':
  225. thereisasubjectwir = 1
  226. #print('there is a subjecter', thereisasubjectEr)
  227. if thereisasubjectEr == 1:
  228. try:
  229. sentence.remove('Er')
  230. except:
  231. sentence.remove('er')
  232. sentence.append('ihn')
  233. if thereisasubjectich == 1:
  234. sentence.remove('ich')
  235. sentence.append('mich')
  236. if thereisasubjectdu == 1:
  237. sentence.remove('du')
  238. sentence.append('dich')
  239. if thereisasubjectihr == 1:
  240. sentence.remove('ihr')
  241. sentence.append('euch')
  242. if thereisasubjectwir == 1:
  243. sentence.remove('wir')
  244. sentence.append('uns')
  245. sentence.append(thrdPersonAktiv)
  246. #print('sentence in the vorgangornot', sentence)
  247. jemandornot = 1
  248. wordstodelete = []
  249. for n in range(len(sentence) - 1):
  250. if sentence[n] == 'von':
  251. if sentence[n + 1] == 'ihr':
  252. sentence[n + 1] = 'sie'
  253. wordstodelete.append(n)
  254. jemandornot = 0
  255. if sentence[n + 1] == 'ihm':
  256. sentence[n + 1] = 'er'
  257. wordstodelete.append(n)
  258. jemandornot = 0
  259. import spacy
  260. nlp = spacy.load('de_core_news_sm')
  261. token1 = nlp(sentence[n - 1])
  262. token2 = nlp(sentence[n + 1])
  263. for word in token1:
  264. if word.tag_ != 'NN' and word.tag_ != 'NE':
  265. for word in token2:
  266. if word.tag_ == 'NN' or word.tag_ == 'NE':
  267. wordstodelete.append(n)
  268. jemandornot = 0
  269. if sentence[n + 1] == 'dem' or sentence[n + 1] == 'einem':
  270. token3 = nlp(sentence[n-1])
  271. for word in token3:
  272. if word.tag_ != 'NN' and word.tag_ != 'NE':
  273. sentence[n + 1] = 'ein'
  274. wordstodelete.append(n)
  275. jemandornot = 0
  276. if sentence[n + 1] == 'der' or sentence[n + 1] == 'einer':
  277. token4 = nlp(sentence[n-1])
  278. for word in token4:
  279. if word.tag_ != 'NN' and word.tag_ != 'NE':
  280. sentence[n + 1] = 'eine'
  281. wordstodelete.append(n)
  282. jemandornot = 0
  283. if sentence[n] == 'vom':
  284. sentence[n] = 'ein'
  285. jemandornot = 0
  286. for index in wordstodelete[::-1]:
  287. del sentence[index]
  288. if jemandornot == 1:
  289. sentence.append('jemand')
  290. #print('sentence checkpoint 2', sentence)
  291. #print('get the tuples and triples to check..')
  292. tuplesTocheck, triplesTocheck, quadruplesToCheck = self.gs.GetTuplesinSentence(sentence)
  293. #print('done')
  294. #print(tuplesTocheck, triplesTocheck)
  295. grammpiecessentence = self.gs.createTupleofGrammarpieces( sentence, tuplesTocheck, triplesTocheck, quadruplesToCheck)
  296. if len(grammpiecessentence) > 7:
  297. print('A sentence is too long, too many permutations. \n piping wrong grammar..')
  298. endsentences.append(' '.join(grammpiecessentence).split())
  299. else:
  300. #print('the grammpiecessentence', grammpiecessentence)
  301. #print('genrating the permutations')
  302. permutations = self.sgm.GeneratePermutationsOfSentence(grammpiecessentence)
  303. #print('done')
  304. #print(permutations)
  305. #if (len(tuplesTocheck) != 0) or (len(triplesTocheck) != 0):
  306. # print('filtering the permutations based on the tuples and triples..')
  307. # filteredpermutations = self.gs.filterpermutationsaccordingtotuples(permutations, tuplesTocheck, triplesTocheck)
  308. # print('done')
  309. #else:
  310. # print('there are no triples or tuples to check..')
  311. # filteredpermutations = permutations
  312. sentencesToCheck = []
  313. for sentence in permutations:
  314. sentencesToCheck.append(' '.join(sentence))
  315. #print('sentencesToCheck', sentencesToCheck)
  316. #print('classifying the probability for right grammar in the filtered permutations..')
  317. #print(' '.join(sentence))
  318. endsentence = self.sgm.GetBestSentenceFromSentencesAccordingToGrammar(sentencesToCheck, ' '.join(sentence))
  319. #print('done')
  320. #print('the endsentence', endsentence)
  321. endsentences.append(endsentence.split())
  322. #count1 = 0
  323. #print(subjectindex)
  324. #subjectindex = subjectindex[0]
  325. #if subjectindex != 0:
  326. #for word in sentence[subjectindex - 1:subjectindex + 1]:
  327. #count1 += 1
  328. #if word == 'und':
  329. #thereIsanUnd = count1
  330. #if subjectindex == 0:
  331. #for word in sentence[subjectindex:subjectindex + 1]:
  332. #count1 += 1
  333. #if word == 'und':
  334. #thereIsanUnd = count1
  335. #thereisanEr = 0
  336. #if sentence[subjectindex - 1 + thereIsanUnd] == 'er' or sentence[subjectindex - 1 + thereIsanUnd] == 'Er':
  337. #thereisanEr = 1
  338. #if thereisanEr == 1:
  339. #sentence.remove('Er')
  340. #sentence.remove('er')
  341. #sentence.append('ihn')
  342. #print('zustandornot',zustandORnot)
  343. #print('vorgang', vorgangORnot)
  344. if zustandORnot == 1:
  345. completeform = self.hkldbZustandspassiv_All[workindex]
  346. if len(verbs_of_sentence_string.split()) != len(completeform[0][0].split()):
  347. zustandORnot = 0
  348. if zustandORnot == 1:
  349. #completeform = self.hkldbZustandspassiv_All[workindex]
  350. formToReplace = self.hkldbZustandspassiv_All[workindex][1][0].split()[-2:]
  351. formToReplace = '3. Person Singular ' + ' '.join(formToReplace)
  352. #print('formtoreplace zustand',formToReplace)
  353. #print('complete form', completeform)
  354. thrdPersonAktivindex = self.fsearchAktiv2.search_with_highest_multiplikation_Output(formToReplace, 1)[0]
  355. thrdPersonAktiv = self.hkldbAktiv_All[thrdPersonAktivindex[0]][0][0].split()[:-1]
  356. thrdPersonAktiv = ' '.join(thrdPersonAktiv)
  357. for verb in verbs_of_sentence_string.split()[:-1]:
  358. #print(sentence)
  359. #print(index)
  360. sentence.remove(verb)
  361. thereisasubjectEr = 0
  362. for index in subjectindex:
  363. for ind in undindex:
  364. if index - 1 == ind:
  365. if index - 2 == ('er' or 'Er'):
  366. thereisasubjectEr = 1
  367. if index + 1 == ind:
  368. if index + 2 == 'er' or index + 2 == 'Er':
  369. thereisasubjectEr = 1
  370. #print('subjectofsentence', subjectofsentence)
  371. thereisasubjectich = 0
  372. thereisasubjectdu = 0
  373. thereisasubjectihr = 0
  374. thereisasubjectwir = 0
  375. for word in subjectofsentence:
  376. if word == 'er' or word == 'Er':
  377. thereisasubjectEr = 1
  378. if word == 'ich':
  379. thereisasubjectich = 1
  380. if word == 'du':
  381. thereisasubjectdu = 1
  382. if word == 'ihr':
  383. thereisasubjectihr = 1
  384. if word == 'wir':
  385. thereisasubjectwir = 1
  386. if thereisasubjectEr == 1:
  387. try:
  388. sentence.remove('Er')
  389. except:
  390. sentence.remove('er')
  391. sentence.append('ihn')
  392. if thereisasubjectich == 1:
  393. sentence.remove('ich')
  394. sentence.append('mich')
  395. if thereisasubjectdu == 1:
  396. sentence.remove('du')
  397. sentence.append('dich')
  398. if thereisasubjectihr == 1:
  399. sentence.remove('ihr')
  400. sentence.append('euch')
  401. if thereisasubjectwir == 1:
  402. sentence.remove('wir')
  403. sentence.append('uns')
  404. sentence.append(thrdPersonAktiv)
  405. jemandornot = 1
  406. wordstodelete = []
  407. for n in range(len(sentence) - 1):
  408. if sentence[n] == 'von':
  409. if sentence[n + 1] == 'ihr':
  410. sentence[n + 1] = 'sie'
  411. wordstodelete.append(n)
  412. jemandornot = 0
  413. if sentence[n + 1] == 'ihm':
  414. sentence[n + 1] = 'er'
  415. wordstodelete.append(n)
  416. jemandornot = 0
  417. import spacy
  418. nlp = spacy.load('de_core_news_sm')
  419. token1 = nlp(sentence[n - 1])
  420. token2 = nlp(sentence[n + 1])
  421. for word in token1:
  422. if word.tag_ != 'NN' and word.tag_ != 'NE':
  423. for word in token2:
  424. if word.tag_ == 'NN' or word.tag_ == 'NE':
  425. wordstodelete.append(n)
  426. jemandornot = 0
  427. if sentence[n + 1] == 'dem' or sentence[n + 1] == 'einem':
  428. token3 = nlp(sentence[n-1])
  429. for word in token3:
  430. if word.tag_ != 'NN' and word.tag_ != 'NE':
  431. sentence[n + 1] = 'ein'
  432. wordstodelete.append(n)
  433. jemandornot = 0
  434. if sentence[n + 1] == 'der' or sentence[n + 1] == 'einer':
  435. token4 = nlp(sentence[n-1])
  436. for word in token4:
  437. if word.tag_ != 'NN' and word.tag_ != 'NE':
  438. sentence[n + 1] = 'eine'
  439. wordstodelete.append(n)
  440. jemandornot = 0
  441. if sentence[n] == 'vom':
  442. sentence[n] = 'ein'
  443. jemandornot = 0
  444. for index in wordstodelete[::-1]:
  445. del sentence[index]
  446. if jemandornot == 1:
  447. sentence.append('jemand')
  448. #print(sentence)
  449. #print('get the tuples and triples to check..')
  450. tuplesTocheck, triplesTocheck, quadruplesTocheck = self.gs.GetTuplesinSentence(sentence)
  451. #print('done')
  452. #print(tuplesTocheck, triplesTocheck)
  453. grammpiecessentence = self.gs.createTupleofGrammarpieces( sentence, tuplesTocheck, triplesTocheck, quadruplesTocheck)
  454. if len(grammpiecessentence) > 7:
  455. print('A sentence is too long, too many permutations. \n piping wrong grammar..')
  456. endsentences.append(' '.join(grammpiecessentence).split())
  457. else:
  458. #print('the grammpiecessentence', grammpiecessentence)
  459. #print('genrating the permutations')
  460. permutations = self.sgm.GeneratePermutationsOfSentence(grammpiecessentence)
  461. #print('done')
  462. #print(permutations)
  463. #if (len(tuplesTocheck) != 0) or (len(triplesTocheck) != 0):
  464. # print('filtering the permutations based on the tuples and triples..')
  465. # filteredpermutations = self.gs.filterpermutationsaccordingtotuples(permutations, tuplesTocheck, triplesTocheck)
  466. # print('done')
  467. #else:
  468. # print('there are no triples or tuples to check..')
  469. # filteredpermutations = permutations
  470. sentencesToCheck = []
  471. for sentence in permutations:
  472. sentencesToCheck.append(' '.join(sentence))
  473. #print('sentencesToCheck', sentencesToCheck)
  474. #print('classifying the probability for right grammar in the filtered permutations..')
  475. #print(' '.join(sentence))
  476. endsentence = self.sgm.GetBestSentenceFromSentencesAccordingToGrammar(sentencesToCheck, ' '.join(sentence))
  477. #print('done')
  478. #print('the endsentence', endsentence)
  479. endsentences.append(endsentence.split())
  480. if zustandORnot == 0 and vorgangORnot == 0:
  481. #print('it is coming to the else')
  482. endsentences.append(sentence)
  483. return endsentences
  484. # Vorgangspassiv wird auf selbe Zeit gemappt, 3. Person Singular.
  485. # Zustandspassiv: Immer eine Zeit dahinter. D.h.
  486. # Präsens => Präteritum, Präteritum => Perfekt