Search code examples
elasticsearchtokenizenestanalysisanalyzer

ElasticSearch NEST manually map subfields needed for analyzer


I have been able to get the correct ElasticSearch commands to insert and search for my records based on my original request below:

Original ElasticSearch trying to be converted to NEST

PUT /sample
{
  "settings": {
    "index.number_of_shards": 5,
    "index.number_of_replicas": 0,
    "analysis": {
      "filter": {
        "nGram_filter": {
          "type": "nGram",
          "min_gram": 2,
          "max_gram": 20,
          "token_chars": [
            "letter",
            "digit"
          ]
        },
        "edgenGram_filter": {
          "type": "edgeNGram",
          "min_gram": 2,
          "max_gram": 20
        }
      },
      "analyzer": {
        "ngram_index_analyzer": {
          "type": "custom",
          "tokenizer": "keyword",
          "filter": [
            "lowercase",
            "nGram_filter"
          ]
        },
        "edge_ngram_index_analyzer": {
          "type": "custom",
          "tokenizer": "keyword",
          "filter": [
            "lowercase",
            "edgenGram_filter"
          ]
        }
      }
    }
  },
  "mappings": {
    "test": {
      "properties": {
        "name": {
          "type": "string",
          "fields": {
            "prefixes": {
              "type": "string",
              "analyzer": "edge_ngram_index_analyzer",
              "search_analyzer": "standard"
            },
            "substrings": {
              "type": "string",
              "analyzer": "ngram_index_analyzer",
              "search_analyzer": "standard"
            }
          }
        }
      }
    }
  }
}

However, I am now having problems trying to convert this sample using the NEST command. Here is what I have done so far, it compiles and will create the index, but the subfield of prefixes and substrings do not exist.

Client.CreateIndex("sample", i => i
                    .Settings(s => s
                        .NumberOfShards(10) 
                        .NumberOfReplicas(0)
                        .Analysis(a => a
                            .TokenFilters(tf => tf
                                .NGram("nGram_filter", td => td
                                    .MinGram(2)
                                    .MaxGram(20)
                                 )
                                .EdgeNGram("edgeNGram", td => td
                                    .MinGram(2)
                                    .MaxGram(20)
                                 )
                            )
                            .Analyzers(anz => anz
                                .Custom("ngram_index_analyzer", cc => cc
                                    .Tokenizer("keyword")
                                    .Filters("lowercase", "nGram_filter")
                                )
                                .Custom("edge_ngram_index_analyzer", cc => cc
                                    .Tokenizer("keyword")
                                    .Filters("lowercase", "edgenGram_filter")
                                )
                            )
                        )
                    )
                    .Mappings(m => m
                        .Map<test>(map => map
                            .Properties(ps => ps
                                 .Text(t => t
                                     .Name(n => n.name)                 
                                     .Fields(f => f
                                         .Text(tt => tt
                                            .Name("prefixes")
                                            .Analyzer("edge_ngram_index_analyzer")
                                            .SearchAnalyzer("standard")
                                         )
                                         .Text(tt => tt
                                            .Name("substrings")
                                            .Analyzer("ngram_index_analyzer")
                                            .SearchAnalyzer("standard")
                                         )
                                     )
                                 )
                            )
                        )
                    )
                );

Solution

  • It seems there is a typo in EdgeNGram token filter name. One entry is edgeNGram where as another one is edgenGram_filter. So when I run the following mapping using nest

    client.CreateIndex("sample", i => i
            .Settings(s => s
                .NumberOfShards(10)
                .NumberOfReplicas(0)
                .Analysis(a => a
                    .Analyzers(anz => anz
                        .Custom("ngram_index_analyzer", cc => cc
                            .Tokenizer("keyword")
                            .Filters("lowercase", "nGram_filter")
                        )
                        .Custom("edge_ngram_index_analyzer", cc => cc
                            .Tokenizer("keyword")
                            .Filters("lowercase", "edgeNGram_filter")
                        )
                    )
                    .TokenFilters(tf => tf
                        .NGram("nGram_filter", td => td
                            .MinGram(2)
                            .MaxGram(20)
                            )
                        .EdgeNGram("edgeNGram_filter", td => td
                            .MinGram(2)
                            .MaxGram(20)
                            )
                    )
                )
            )
            .Mappings(m => m
                .Map<test>(map => map
                    .Properties(ps => ps
                            .Text(t => t
                                .Name(n => n.name)
                                .Fields(f => f
                                    .Text(tt => tt
                                    .Name("prefixes")
                                    .Analyzer("edge_ngram_index_analyzer")
                                    .SearchAnalyzer("standard")
                                    )
                                    .Text(tt => tt
                                    .Name("substrings")
                                    .Analyzer("ngram_index_analyzer")
                                    .SearchAnalyzer("standard")
                                    )
                                )
                            )
                    )
                )
            )
        );
    

    it converts it into a proper JSON including both subfields. Hope that helps.