Failback when clusterA comes back
Failback when clusterA
comes back
When clusterA
comes back, clusterB
becomes the new leader and clusterA
becomes the follower.
Set up remote cluster
clusterB
onclusterA
.resp = client.cluster.put_settings(
persistent={
"cluster": {
"remote": {
"clusterB": {
"mode": "proxy",
"skip_unavailable": "true",
"server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",
"proxy_socket_connections": "18",
"proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400"
}
}
}
},
)
print(resp)
response = client.cluster.put_settings(
body: {
persistent: {
cluster: {
remote: {
"clusterB": {
mode: 'proxy',
skip_unavailable: 'true',
server_name: 'clusterb.es.region-b.gcp.elastic-cloud.com',
proxy_socket_connections: '18',
proxy_address: 'clusterb.es.region-b.gcp.elastic-cloud.com:9400'
}
}
}
}
}
)
puts response
const response = await client.cluster.putSettings({
persistent: {
cluster: {
remote: {
clusterB: {
mode: "proxy",
skip_unavailable: "true",
server_name: "clusterb.es.region-b.gcp.elastic-cloud.com",
proxy_socket_connections: "18",
proxy_address: "clusterb.es.region-b.gcp.elastic-cloud.com:9400",
},
},
},
},
});
console.log(response);
### On clusterA ###
PUT _cluster/settings
{
"persistent": {
"cluster": {
"remote": {
"clusterB": {
"mode": "proxy",
"skip_unavailable": "true",
"server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",
"proxy_socket_connections": "18",
"proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400"
}
}
}
}
}
Existing data needs to be discarded before you can turn any index into a follower. Ensure the most up-to-date data is available on
clusterB
prior to deleting any indices onclusterA
.resp = client.indices.delete(
index="kibana_sample_data_ecommerce",
)
print(resp)
response = client.indices.delete(
index: 'kibana_sample_data_ecommerce'
)
puts response
const response = await client.indices.delete({
index: "kibana_sample_data_ecommerce",
});
console.log(response);
### On clusterA ###
DELETE kibana_sample_data_ecommerce
Create a follower index on
clusterA
, now following the leader index inclusterB
.resp = client.ccr.follow(
index="kibana_sample_data_ecommerce",
wait_for_active_shards="1",
remote_cluster="clusterB",
leader_index="kibana_sample_data_ecommerce2",
)
print(resp)
const response = await client.ccr.follow({
index: "kibana_sample_data_ecommerce",
wait_for_active_shards: 1,
remote_cluster: "clusterB",
leader_index: "kibana_sample_data_ecommerce2",
});
console.log(response);
### On clusterA ###
PUT /kibana_sample_data_ecommerce/_ccr/follow?wait_for_active_shards=1
{
"remote_cluster": "clusterB",
"leader_index": "kibana_sample_data_ecommerce2"
}
The index on the follower cluster now contains the updated documents.
resp = client.search(
index="kibana_sample_data_ecommerce",
q="kimchy",
)
print(resp)
response = client.search(
index: 'kibana_sample_data_ecommerce',
q: 'kimchy'
)
puts response
const response = await client.search({
index: "kibana_sample_data_ecommerce",
q: "kimchy",
});
console.log(response);
### On clusterA ###
GET kibana_sample_data_ecommerce/_search?q=kimchy
If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see index.soft_deletes.retention_lease.period for more details.
当前内容版权归 elasticsearch 或其关联方所有,如需对内容或内容相关联开源项目进行关注与资助,请访问 elasticsearch .