kibana操作ES

kibana操作ES

我只写一部分,后面再完善吧..........

# 先创建一个索引
# 如果首字母大写会报405错误
PUT /user

# 插入数据[restful风格规范]
POST user/student/1
{
  "sname":"zhangsan",
  "sno":"20165420",
  "score":{
    "math":[76,88,100],
    "english":[78,86,80],
    "chinese":[90,102,106]
  }
}

POST user/student/2
{
  "sname":"李四",
  "sno":"20165420",
  "score":{
    "math":[76,88,100],
    "english":[78,86,80],
    "chinese":[90,102,106]
  }
}

POST user/student/3
{
  "sname":"刘三",
  "sno":"20165420",
  "score":{
    "math":[76,88,100],
    "english":[78,86,80],
    "chinese":[90,102,106]
  }
}

# 修改数据
# 修改操作就是覆盖,把数据删除然后覆盖所有的数据
PUT user/student/1
{
  "sname":"张三",
  "sno":"20165420",
  "sex":"",
  "age":"18",
  "score":{
    "math":[76,88,100],
    "english":[78,86,80],
    "chinese":[90,102,106]
  }
}

# 删除数据
DELETE user/student/1

# 查询数据/查询单个
GET user/student/1

# 查询数据/查询所有
GET user/student/_search

# 查询数据/添加查询条件查询
# ES中有2种分词方式
# keyword:完全匹配
# standard:拆分成一个一个的字

# 结果:"张三"
GET _analyze
{
  "analyzer": "keyword",
  "text":"张三"
}
# 结果:"张","三"
GET _analyze
{
  "analyzer": "standard",
  "text": "张三"
}

# 默认的分词器没有意义
# 我们就添加一个中文分词器
# 选择对应版本的ik分词器下载
# 下载地址:https://github.com/medcl/elasticsearch-analysis-ik/releases
# 将解压后的文件全部复制到ES的plugins目录下,新建一个ik,把所有文件放进去重启即可

# 分词器有两种
# ik_smart
# max_word

# "科技有限公司"结果:"科技","有限公司"
# "张三"结果:"张三"
# "zhangsan"结果:"zhangsan"
GET _analyze
{
  "analyzer": "ik_smart",
  "text":"zhangsan"
}
# 结果:"科技","有限公司","有限","公司"
# "张三"结果:"张三","张","三"
# "zhangsan"结果:"zhangsan"
GET _analyze
{
  "analyzer": "ik_max_word",
  "text":"zhangsan"
}

# 添加分词器设置,创建索引的时候不指定,就是默认的standard分词
# 不能在已经存在的表上面修改,所以我们先删除,在创建索引库的时候设置
DELETE /user   # 不知道为啥删不了
DELETE /user/student/2
DELETE /user/student/3
原文地址:https://www.cnblogs.com/IT_CH/p/12813606.html