Search code examples
elasticsearchindexingelasticsearch-5

"unknown key [tagindex_v2] for create index" error when creating an index in elasticsearch


Hu, I'm trying to copy some indices from one Elasticsearch instance to another. I'm trying top copy this index called "tagindex_v2".  I've used http://localhost:9400/tagindex_v2 and in the body i've copied the json,

{
    "tagindex_v2": {
        "aliases": {},
        "mappings": {
            "properties": {
                "deprecated": {
                    "type": "boolean"
                },
                "description": {
                    "type": "keyword",
                    "normalizer": "keyword_normalizer",
                    "fields": {
                        "delimited": {
                            "type": "text",
                            "analyzer": "word_delimited"
                        },
                        "keyword": {
                            "type": "keyword"
                        }
                    }
                },
                "hasOwners": {
                    "type": "boolean"
                },
                "id": {
                    "type": "keyword",
                    "normalizer": "keyword_normalizer",
                    "fields": {
                        "delimited": {
                            "type": "text",
                            "analyzer": "word_delimited"
                        },
                        "keyword": {
                            "type": "keyword"
                        },
                        "ngram": {
                            "type": "text",
                            "analyzer": "partial"
                        }
                    }
                },
                "name": {
                    "type": "keyword",
                    "normalizer": "keyword_normalizer",
                    "fields": {
                        "delimited": {
                            "type": "text",
                            "analyzer": "word_delimited"
                        },
                        "keyword": {
                            "type": "keyword"
                        },
                        "ngram": {
                            "type": "text",
                            "analyzer": "partial"
                        }
                    }
                },
                "owners": {
                    "type": "text",
                    "fields": {
                        "keyword": {
                            "type": "keyword"
                        }
                    },
                    "analyzer": "urn_component"
                },
                "removed": {
                    "type": "boolean"
                },
                "urn": {
                    "type": "keyword"
                }
            }
        },
        "settings": {
            "index": {
                "max_ngram_diff": "17",
                "routing": {
                    "allocation": {
                        "include": {
                            "_tier_preference": "data_content"
                        }
                    }
                },
                "number_of_shards": "1",
                "provided_name": "tagindex_v2",
                "creation_date": "1660141415133",
                "analysis": {
                    "filter": {
                        "partial_filter": {
                            "type": "edge_ngram",
                            "min_gram": "3",
                            "max_gram": "20"
                        },
                        "custom_delimiter": {
                            "type": "word_delimiter",
                            "preserve_original": "true",
                            "split_on_numerics": "false"
                        },
                        "urn_stop_filter": {
                            "type": "stop",
                            "stopwords": [
                                "urn",
                                "li",
                                "container",
                                "datahubpolicy",
                                "datahubaccesstoken",
                                "datahubupgrade",
                                "corpgroup",
                                "dataprocess",
                                "mlfeaturetable",
                                "mlmodelgroup",
                                "datahubexecutionrequest",
                                "invitetoken",
                                "datajob",
                                "assertion",
                                "dataplatforminstance",
                                "schemafield",
                                "tag",
                                "glossaryterm",
                                "mlprimarykey",
                                "dashboard",
                                "notebook",
                                "mlmodeldeployment",
                                "datahubretention",
                                "dataplatform",
                                "corpuser",
                                "test",
                                "mlmodel",
                                "glossarynode",
                                "mlfeature",
                                "dataflow",
                                "datahubingestionsource",
                                "domain",
                                "telemetry",
                                "datahubsecret",
                                "dataset",
                                "chart",
                                "dataprocessinstance"
                            ]
                        }
                    },
                    "normalizer": {
                        "keyword_normalizer": {
                            "filter": [
                                "lowercase",
                                "asciifolding"
                            ]
                        }
                    },
                    "analyzer": {
                        "browse_path_hierarchy": {
                            "tokenizer": "path_hierarchy"
                        },
                        "slash_pattern": {
                            "filter": [
                                "lowercase"
                            ],
                            "tokenizer": "slash_tokenizer"
                        },
                        "partial_urn_component": {
                            "filter": [
                                "lowercase",
                                "urn_stop_filter",
                                "custom_delimiter",
                                "partial_filter"
                            ],
                            "tokenizer": "urn_char_group"
                        },
                        "word_delimited": {
                            "filter": [
                                "custom_delimiter",
                                "lowercase",
                                "stop"
                            ],
                            "tokenizer": "main_tokenizer"
                        },
                        "partial": {
                            "filter": [
                                "custom_delimiter",
                                "lowercase",
                                "partial_filter"
                            ],
                            "tokenizer": "main_tokenizer"
                        },
                        "urn_component": {
                            "filter": [
                                "lowercase",
                                "urn_stop_filter",
                                "custom_delimiter"
                            ],
                            "tokenizer": "urn_char_group"
                        },
                        "custom_keyword": {
                            "filter": [
                                "lowercase",
                                "asciifolding"
                            ],
                            "tokenizer": "keyword"
                        }
                    },
                    "tokenizer": {
                        "main_tokenizer": {
                            "pattern": "[ ./]",
                            "type": "pattern"
                        },
                        "slash_tokenizer": {
                            "pattern": "[/]",
                            "type": "pattern"
                        },
                        "urn_char_group": {
                            "pattern": "[:\\s(),]",
                            "type": "pattern"
                        }
                    }
                },
                "number_of_replicas": "1",
                "uuid": "AoFgpzTXRHyyTL7cuLsS1A",
                "version": {
                    "created": "7160299"
                }
            }
        }
    }
}

I get this error

{
    "error": {
        "root_cause": [
            {
                "type": "parse_exception",
                "reason": "unknown key [tagindex_v2] for create index"
            }
        ],
        "type": "parse_exception",
        "reason": "unknown key [tagindex_v2] for create index"
    },
    "status": 400
}

Can someone please tell me how i should fix this? I'm simply copying JSON content from an existing index from a different instance and creating it here.

Thanks in advance


Solution

  • You need to take only what's located inside the tagindex_v2 key, i.e.

    {
        "aliases": {},
        "mappings": {
            "properties": {
                "deprecated": {
                    "type": "boolean"
                },
                ...
            }
        },
        "settings": {
            "index": {
                "max_ngram_diff": "17",
                ...
        }
    }
    

    You also need to remove the following properties from the settings section:

    • provided_name
    • creation_date
    • uuid
    • version