Search on legal documents using Tensorflow and a web_actix web interface
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

342 lines
11 KiB

3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
  1. # The new class FASTsearch. Every DB can be represented in Lists. The Brain actually is constituted from lists. Access to all Documents almost the same moment.
  2. # TODO GPU Multithreading has to be implemented.
  3. # USAGE: Learn scikit-learn count vectorizer on a database of lines or docs.
  4. import joblib
  5. from sklearn.feature_extraction.text import CountVectorizer
  6. import numpy as np
  7. import scipy as sc
  8. import tensorflow.compat.v1 as tf
  9. tf.compat.v1.disable_eager_execution()
  10. import _pickle as cPickle
  11. import hickle as hkl
  12. import os
  13. # Define function to convert scipy csr matrix to tf tensor for working on gpu
  14. def convert_sparse_matrix_to_sparse_tensor(X):
  15. coo = sc.sparse.coo_matrix(X)
  16. indices = np.mat([coo.row, coo.col]).transpose()
  17. return tf.SparseTensorValue(indices, coo.data, coo.shape)
  18. # The whole class is initialized with input of the database in [['word','word2'],[],[],[]] List format, 2 dimensional, the index of the list in the matrix defines its id
  19. ## in every list element of the input, each document is represented by one string
  20. # This list must be saved as a hkl dump and then loaded into the database.
  21. def my_tokenizer(s):
  22. return s.split('\+')
  23. class FASTsearch(object):
  24. def __init__(self, DatabaseDir):
  25. self.DatabaseDir = DatabaseDir[:-4]
  26. database = []
  27. hkl_load = hkl.load(DatabaseDir)
  28. for element in hkl_load:
  29. #print('element',element)
  30. #print('joined element', ' '.join(element))
  31. database.append(' '.join(element))
  32. # input has to be hkl format
  33. self.database = database
  34. def Gen_BoW_Model(self, max_features, analyzer, punctuation = False):
  35. print("Creating the bag of words...\n")
  36. from sklearn.feature_extraction.text import CountVectorizer
  37. # Initialize the "CountVectorizer" object, which is scikit-learn's
  38. # bag of words tool.
  39. if punctuation == False:
  40. vectorizer = CountVectorizer(analyzer = analyzer, \
  41. tokenizer = None, \
  42. preprocessor = None, \
  43. stop_words = None, \
  44. max_features = max_features)
  45. if punctuation == True:
  46. vectorizer = CountVectorizer(analyzer = analyzer, \
  47. tokenizer = my_tokenizer, \
  48. preprocessor = None, \
  49. stop_words = None, \
  50. max_features = max_features)
  51. # token_pattern = r'(?u)\w')
  52. # fit_transform() does two functions: First, it fits the model
  53. # and learns the vocabulary; second, it transforms our training data
  54. # into feature vectors. The input to fit_transform should be a list of
  55. # strings.
  56. train_data_features = vectorizer.fit_transform(self.database)
  57. joblib.dump(vectorizer, 'bagofwords' + self.DatabaseDir + '.pkl')
  58. print('dumping the data to hkl format..')
  59. hkl.dump(train_data_features, 'DataBaseOneZeros' + self.DatabaseDir + '.hkl', mode='w', compression='gzip')
  60. print('done')
  61. return vectorizer
  62. def Load_BoW_Model(self, BoWModelDir, DatabaseOneZerosDir):
  63. # input has to be pkl format
  64. self.vectorizer = joblib.load(BoWModelDir)
  65. self.dbOZ = hkl.load(DatabaseOneZerosDir).astype('float32')
  66. return self.vectorizer
  67. # input: string to search for in the documents, the numberofmatches to get the best n documents
  68. # output the numberofmatches documents with their indexes on the database which is searched, the highest accordance number plus index [index, number]
  69. def search(self, string , numberofmatches):
  70. numberofmatches = numberofmatches
  71. # Convert user input to Zeros and Ones
  72. user_array = []
  73. user_array.append(string)
  74. user_input_OnesZeros = self.vectorizer.transform(user_array)
  75. uOZ = user_input_OnesZeros.toarray()[0].astype(np.float32, copy=False)
  76. uiOZ = uOZ[np.newaxis, :]
  77. uiOZ = uiOZ.transpose()
  78. sess = tf.Session()
  79. with tf.device('/gpu:0'):
  80. with sess.as_default():
  81. uiOZ_tensor = tf.constant(uiOZ)
  82. dbOZ_tensor_sparse = convert_sparse_matrix_to_sparse_tensor(self.dbOZ)
  83. #uiOZ_tensor_sparse =tf.contrib.layers.dense_to_sparse(uiOZ_tensor, eos_token=0, outputs_collections=None, scope=None )
  84. #dbOZ_tensor_sparse =tf.contrib.layers.dense_to_sparse(dbOZ_tensor, eos_token=0, outputs_collections=None, scope=None )
  85. #wordCountDoku = tf.matmul(uiOZ_tensor, dbOZ_tensor)
  86. wordCountDoku = tf.sparse_tensor_dense_matmul(dbOZ_tensor_sparse, uiOZ_tensor)
  87. wCD = np.array(wordCountDoku.eval())
  88. indexedwCD = []
  89. for n in range(len(wCD)):
  90. indexedwCD.append([n,wCD[n][0]])
  91. indexedwCD = sorted(indexedwCD[::-1], key=lambda tup: tup[1], reverse=True)
  92. best_n_documents = []
  93. eq_number = 0
  94. for number in uiOZ:
  95. #print(number)
  96. eq_number += number ** 2
  97. #print(eq_number)
  98. n = 0
  99. done = False
  100. while n < len(indexedwCD) and done == False:
  101. n += 1
  102. if indexedwCD[n][1] == eq_number:
  103. best_n_documents = indexedwCD[n][0]
  104. done = True
  105. if indexedwCD[n][1] < eq_number:
  106. best_n_documents = indexedwCD[n - 1][0]
  107. done = True
  108. #for n in range(numberofmatches):
  109. #best_n_documents.append([indexedwCD[n][0], indexedwCD[n][1]])
  110. return best_n_documents, indexedwCD[0]
  111. def search_with_highest_multiplikation_Output(self, string , numberofmatches):
  112. numberofmatches = numberofmatches
  113. # Convert user input to Zeros and Ones
  114. user_array = []
  115. user_array.append(string)
  116. user_input_OnesZeros = self.vectorizer.transform(user_array)
  117. uOZ = user_input_OnesZeros.toarray()[0].astype(np.float32, copy=False)
  118. uiOZ = uOZ[np.newaxis, :]
  119. uiOZ = uiOZ.transpose()
  120. sess = tf.Session()
  121. with tf.device('/gpu:0'):
  122. with sess.as_default():
  123. uiOZ_tensor = tf.constant(uiOZ)
  124. dbOZ_tensor_sparse = convert_sparse_matrix_to_sparse_tensor(self.dbOZ)
  125. #uiOZ_tensor_sparse =tf.contrib.layers.dense_to_sparse(uiOZ_tensor, eos_token=0, outputs_collections=None, scope=None )
  126. #dbOZ_tensor_sparse =tf.contrib.layers.dense_to_sparse(dbOZ_tensor, eos_token=0, outputs_collections=None, scope=None )
  127. #wordCountDoku = tf.matmul(uiOZ_tensor, dbOZ_tensor)
  128. wordCountDoku = tf.sparse_tensor_dense_matmul(dbOZ_tensor_sparse, uiOZ_tensor)
  129. wCD = np.array(wordCountDoku.eval())
  130. indexedwCD = []
  131. for n in range(len(wCD)):
  132. indexedwCD.append([n,wCD[n][0]])
  133. indexedwCD = sorted(indexedwCD[::-1], key=lambda tup: tup[1], reverse=True)
  134. best_n_documents = []
  135. for n in range(numberofmatches):
  136. best_n_documents.append(indexedwCD[n][0])
  137. return best_n_documents, indexedwCD[0]
  138. def searchPatternMatch(self, string , numberofmatches):
  139. numberofmatches = numberofmatches
  140. # Convert user input to Zeros and Ones
  141. user_array = []
  142. user_array.append(string)
  143. user_input_OnesZeros = self.vectorizer.transform(user_array)
  144. uOZ = user_input_OnesZeros.toarray()[0].astype(np.float32, copy=False)
  145. uiOZ = uOZ[np.newaxis, :]
  146. uiOZ = uiOZ.transpose()
  147. sess = tf.Session()
  148. with tf.device('/gpu:0'):
  149. with sess.as_default():
  150. uiOZ_tensor = tf.constant(uiOZ)
  151. dbOZ_tensor_sparse = convert_sparse_matrix_to_sparse_tensor(self.dbOZ)
  152. #uiOZ_tensor_sparse =tf.contrib.layers.dense_to_sparse(uiOZ_tensor, eos_token=0, outputs_collections=None, scope=None )
  153. #dbOZ_tensor_sparse =tf.contrib.layers.dense_to_sparse(dbOZ_tensor, eos_token=0, outputs_collections=None, scope=None )
  154. #wordCountDoku = tf.matmul(uiOZ_tensor, dbOZ_tensor)
  155. wordCountDoku = tf.sparse_tensor_dense_matmul(dbOZ_tensor_sparse, uiOZ_tensor)
  156. wCD = np.array(wordCountDoku.eval())
  157. indexedwCD = []
  158. for n in range(len(wCD)):
  159. indexedwCD.append([n,wCD[n][0]])
  160. # Sort the biggest matches
  161. indexedwCD = sorted(indexedwCD[::-1], key=lambda tup: tup[1], reverse=True)
  162. best_n_documents = []
  163. best_docs_surrounding = []
  164. # Get the number which is result when same words would be in the document as in one grammar scheme
  165. eq_number = 0
  166. for number in uiOZ:
  167. #print(number)
  168. eq_number += number ** 2
  169. print(eq_number)
  170. # Create new array of closest grammar schemes, I have chosen around 3 (in the matchnumber, not regarding words or so)
  171. n = 0
  172. done = False
  173. while n < len(indexedwCD) and done == False:
  174. n += 1
  175. #print('a',indexedwCD)
  176. #print('oo', indexedwCD[n])
  177. if indexedwCD[n][1] == eq_number:
  178. best_docs_surrounding.append(indexedwCD[n][0])
  179. #if indexedwCD[n][1] < eq_number:
  180. #best_docs_surrounding.append(indexedwCD[n][0])
  181. if indexedwCD[n][1] < eq_number :
  182. done = True
  183. # Count for these docs in surrounding the matches of wordnumbers per word
  184. # would be much faster when using the sparse class
  185. best_docs_surrounding_new = []
  186. for doc in best_docs_surrounding:
  187. dok_BoW = self.dbOZ[doc].toarray()[0].astype(np.float32, copy=False)
  188. Number_equal_words = 0
  189. for n in range(len(uiOZ)):
  190. #print(uiOZ[n])
  191. #print(dok_BoW[n])
  192. #print('dok_BoW',dok_BoW)
  193. if uiOZ[n] == dok_BoW[n]:
  194. Number_equal_words += 1
  195. best_docs_surrounding_new.append([doc , Number_equal_words])
  196. # Sort the result again with the original indexes
  197. best_n_documents = sorted(best_docs_surrounding_new[::-1], key=lambda tup: tup[1], reverse=True)
  198. #for n in range(numberofmatches):
  199. #best_n_documents.append([indexedwCD[n][0], indexedwCD[n][1]])
  200. return best_n_documents