0%

空查询

// 查询所有
GET /index/_search
{}
// 分页查询
GET /index/_search
{
  "from": 1,
  "size": 2
}

bool联合查询

  • must: 文档必须完全匹配条件
  • should: should下面会带一个以上的条件,至少满足一个条件
  • must_not: 文档必须不匹配条件
GET /index/_search
{
  "query": {
    "bool": {
      "must_not": [
        {"match": {
          "content": "中国"
        }},
        {
          "match": {
            "content": "平均"
          }
        }
      ]
    }
  }
}

高级检索

// 正则检索
GET /index/_search
{
  "query": {
    "regexp": {
      "FIELD": "REGEXP"
    }
  }
}
// 前缀检索
GET /index/_search
{
  "query": {
    "prefix": {
      "content": {
        "value": "美国"
      }
    }
  }
}
// 不分词检索
GET /index/_search
{
  "query": {
    "term": {
      "content": {
        "value": "美国"
      }
    }
  }
}
// 通配符检索
GET /index/_search
{
  "query": {
    "wildcard": {
      "content": {
        "value": "*天*"
      }
    }
  }
}
// 过滤filter
// 模糊fuzzy
// 权重

在线安装

# v6.3.0版本改成es对应的版本
./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.5.2/elasticsearch-analysis-ik-7.5.2.zip

离线安装

cd your-es-root/plugins/ && mkdir ik
# 解压到目录
your-es-root/plugins/ik
# docker安装的可以本地解压后拷贝到容器
docker cp ik/ es:/usr/share/elasticsearch/plugins/

所有ES节点都需要安装!

验证

Kibana打开Dev Tools:

  • 测试分词

    // ik_max_word: 会将文本做最细粒度的拆分
    GET _analyze
    {
    "text": "中国早日收复台湾","tokenizer": "ik_max_word"
    }
    // ik_smart: 会做最粗粒度的拆分
    GET _analyze
    {
    "text": "中国早日收复台湾","tokenizer": "ik_smart"
    }
  • 读写数据

    // 新建index
    PUT /index
    // 新建type,如果ik分词器没有安装成功会报错
    POST index/_mapping
    {
    "properties": {
        "content": {
            "type": "text",
            "analyzer": "ik_max_word",
            "search_analyzer": "ik_smart"
        }
    }
    }
    // 写入数据1
    POST index/_create/1
    {
    "content":"美国留给伊拉克的是个烂摊子吗"
    }
    // 写入数据2
    POST index/_create/2
    {
    "content":"中韩渔警冲突调查:韩警平均每天扣1艘中国渔船"
    }
    // 写入数据3
    POST index/_create/3
    {
    "content":"公安部:各地校车将享最高路权"
    }
    // 写入数据4
    POST index/_create/4
    {
    "content":"中国驻洛杉矶领事馆遭亚裔男子枪击 嫌犯已自首"
    }
    // 查询
    POST index/_search
    {
    "query": {
      "match": {
        "content": "中国"
      }
    }
    }

elasticsearch-analysis-ik

1.创建证书

# 启动一个单机的es
# 进入容器执行下面命令
bin/elasticsearch-certutil cert -out config/elastic-certificates.p12 -pass ""
# 证书拷出容器
docker cp es:/usr/share/elasticsearch/config/elastic-certificates.p12 .
# 将证书拷贝到各个节点

2.各个节点准备es配置文件

# 集群名称保持一致
cluster.name: elasticsearch-cluster
# 集群内唯一
node.name: es-node1
network.bind_host: 0.0.0.0
# 本节点IP
network.publish_host: 192.168.3.17
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
# 这些节点争抢master
cluster.initial_master_nodes: 192.168.3.17,192.168.3.19,192.168.3.20
discovery.seed_hosts: 192.168.3.19,192.168.3.20
# 安全验证相关的
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/elastic-certificates.p12

3.准备data目录

# 用于保存es数据,需要是777权限
mkdir data
chmod 777 -R data/

4.启动

docker run -d --name es --net host \
-v /root/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml \
-v /root/es/elastic-certificates.p12:/usr/share/elasticsearch/config/elastic-certificates.p12 \
-v /root/es/data:/usr/share/elasticsearch/data \
elasticsearch:7.5.2

5.创建用户

# 登录一台es容器内
# 自动生成好默认用户和密码
bin/elasticsearch-setup-passwords auto
# 手动输入密码
[root@data1 bin]# elasticsearch-setup-passwords interactive
Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system,remote_monitoring_user.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N]y
Enter password for [elastic]: 
Reenter password for [elastic]: 
Enter password for [apm_system]: 
Reenter password for [apm_system]: 
Enter password for [kibana]: 
Reenter password for [kibana]: 
Enter password for [logstash_system]: 
Reenter password for [logstash_system]: 
Enter password for [beats_system]: 
Reenter password for [beats_system]: 
Enter password for [remote_monitoring_user]: 
Reenter password for [remote_monitoring_user]: 
Changed password for user [apm_system]
Changed password for user [kibana]
Changed password for user [logstash_system]
Changed password for user [beats_system]
Changed password for user [remote_monitoring_user]
Changed password for user [elastic]

6.验证

# 用户密码都是elastic
curl --user elastic:elastic 'localhost:9200/_cluster/health?pretty'
curl -X GET --user elastic:elastic  "localhost:9200/_cat/nodes?v&pretty"

安装kibana

  • kibana配置
    server.name: kibana
    server.host: "0"
    elasticsearch.hosts: ["http://192.168.3.17:9200","http://192.168.3.19:9200","http://192.168.3.20:9200"]
    xpack.monitoring.ui.container.elasticsearch.enabled: true
    elasticsearch.username: "elastic"
    elasticsearch.password: "elastic"
    # 中文页面
    i18n.locale: zh-CN
  • 启动
    docker run -d --name kibana -p 5601:5601 -v /root/es/kibana.yml:/usr/share/kibana/config/kibana.yml kibana:7.5.2

docker run -d --name kibana -p 5601:5601 \
-e ELASTICSEARCH_HOSTS='["http://192.168.3.17:9200","http://192.168.3.19:9200","http://192.168.3.20:9200"]' \
kibana:7.5.2

环境要求

# 检查vm.max_map_count设置,至少要求262144
grep vm.max_map_count /etc/sysctl.conf
vm.max_map_count=262144
# 在线设置
sysctl -w vm.max_map_count=262144
# ES的data目录需要设置rwx权限
chmod g+rwx /root/es/data

三台机器搭建集群

  • node1
    ```bash
    docker run -d –name es –net host \

  • e node.name=es1 -e cluster.name=es-docker-cluster \

  • e discovery.seed_hosts=192.168.3.19,192.168.3.20 \

  • e cluster.initial_master_nodes=192.168.3.17,192.168.3.19,192.168.3.20 \

  • e network.publish_host=192.168.3.17 \

  • v /root/es/data:/usr/share/elasticsearch/data
    elasticsearch:7.5.2
    ```

  • node2
    ```bash
    docker run -d –name es –net host \

  • e node.name=es2 -e cluster.name=es-docker-cluster \

  • e discovery.seed_hosts=192.168.3.17,192.168.3.20 \

  • e cluster.initial_master_nodes=192.168.3.17,192.168.3.19,192.168.3.20 \

  • e network.publish_host=192.168.3.19 \

  • v /root/es/data:/usr/share/elasticsearch/data
    elasticsearch:7.5.2

  • node3
    ```bash
    docker run -d –name es –net host \

  • e node.name=es3 -e cluster.name=es-docker-cluster \

  • e discovery.seed_hosts=192.168.3.17,192.168.3.19 \

  • e cluster.initial_master_nodes=192.168.3.17,192.168.3.19,192.168.3.20 \

  • e network.publish_host=192.168.3.20 \

  • v /root/es/data:/usr/share/elasticsearch/data
    elasticsearch:7.5.2

测试

[root@data2 es]# curl -X GET "localhost:9200/_cat/nodes?v&pretty"
ip           heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.3.17           20          98   1    0.07    0.07     0.05 dilm      *      es1
192.168.3.20           12          97   1    0.04    0.03     0.05 dilm      -      es3
192.168.3.19            9          93   1    0.01    0.04     0.05 dilm      -      es2
[root@data1 es]# curl 'localhost:9200/_cluster/health?pretty'
{
  "cluster_name" : "es-docker-cluster",
  "status" : "green",
  "timed_out" : false,
  "number_of_nodes" : 3,
  "number_of_data_nodes" : 3,
  "active_primary_shards" : 0,
  "active_shards" : 0,
  "relocating_shards" : 0,
  "initializing_shards" : 0,
  "unassigned_shards" : 0,
  "delayed_unassigned_shards" : 0,
  "number_of_pending_tasks" : 0,
  "number_of_in_flight_fetch" : 0,
  "task_max_waiting_in_queue_millis" : 0,
  "active_shards_percent_as_number" : 100.0
}

docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.5.2