I am very new to NLP and IR programs. I am trying to implement a deep NLP pipeline i.e. adding Lemmatizing, Dependency parsing features to the indexing of sentences. Following is my schema and searcher.
my_analyzer = RegexTokenizer()| StopFilter()| LowercaseFilter() | StemFilter() | Lemmatizer()
pos_analyser = RegexTokenizer() | StopFilter()| LowercaseFilter() | PosTagger()
schema = Schema(id=ID(stored=True, unique=True), stem_text=TEXT(stored= True, analyzer=my_analyzer), pos_tag= pos_analyser)
for sentence in sent_tokenize_list1:
writer.add_document(stem_text = sentence, pos_tag = sentence)
for sentence in sent_tokenize_list2:
writer.add_document(stem_text = sentence, pos_tag = sentence)
writer.commit()
with ix.searcher() as searcher:
og = qparser.OrGroup.factory(0.9)
query_text = MultifieldParser(["stem_text","pos_tag"], schema = ix.schema, group= og).parse(
"who is controlling the threat of locusts?")
results = searcher.search(query_text, sortedby= scores, limit = 10 )
This is the custom analyzer.
class PosTagger(Filter):
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self == other
def __init__(self):
self.cache = {}
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
words = []
tokens1, tokens2 = itertools.tee(tokens)
for t in tokens1:
words.append(t.text)
tags = pos_tag(words)
i=0
for t in tokens2:
t.text = tags[i][0] + " "+ tags[i][1]
i += 1
yield t
I am getting the following error.
whoosh.fields.FieldConfigurationError: CompositeAnalyzer(RegexTokenizer(expression=re.compile('\w+(\.?\w+)*'), gaps=False), StopFilter(stops=frozenset({'for', 'will', 'tbd', 'with', 'and', 'the', 'if', 'it', 'by', 'is', 'are', 'this', 'as', 'when', 'us', 'or', 'from', 'yet', 'you', 'have', 'can', 'be', 'we', 'of', 'to', 'on', 'a', 'an', 'your', 'at', 'in', 'may', 'not', 'that'}), min=2, max=None, renumber=True), LowercaseFilter(), PosTagger(cache={})) is not a FieldType object
Am I doing it a wrong way? Is this the proper way to add NLP pipeline to search engine?
pos_tag
should assigned to a field TEXT(stored= True, analyzer=pos_analyzer)
not to the pos_analyser
directly.
So in schema
you should have:
schema = Schema(id=ID(stored=True, unique=True), stem_text=TEXT(stored= True, analyzer=my_analyzer), post_tag=TEXT(stored= True, analyzer=pos_analyzer))