Failback when clusterA comes back

Failback when clusterA comes back

When clusterA comes back, clusterB becomes the new leader and clusterA becomes the follower.

  1. Set up remote cluster clusterB on clusterA.

    1. resp = client.cluster.put_settings(
    2. persistent={
    3. "cluster": {
    4. "remote": {
    5. "clusterB": {
    6. "mode": "proxy",
    7. "skip_unavailable": "true",
    8. "server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",
    9. "proxy_socket_connections": "18",
    10. "proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400"
    11. }
    12. }
    13. }
    14. },
    15. )
    16. print(resp)
    1. response = client.cluster.put_settings(
    2. body: {
    3. persistent: {
    4. cluster: {
    5. remote: {
    6. "clusterB": {
    7. mode: 'proxy',
    8. skip_unavailable: 'true',
    9. server_name: 'clusterb.es.region-b.gcp.elastic-cloud.com',
    10. proxy_socket_connections: '18',
    11. proxy_address: 'clusterb.es.region-b.gcp.elastic-cloud.com:9400'
    12. }
    13. }
    14. }
    15. }
    16. }
    17. )
    18. puts response
    1. const response = await client.cluster.putSettings({
    2. persistent: {
    3. cluster: {
    4. remote: {
    5. clusterB: {
    6. mode: "proxy",
    7. skip_unavailable: "true",
    8. server_name: "clusterb.es.region-b.gcp.elastic-cloud.com",
    9. proxy_socket_connections: "18",
    10. proxy_address: "clusterb.es.region-b.gcp.elastic-cloud.com:9400",
    11. },
    12. },
    13. },
    14. },
    15. });
    16. console.log(response);
    1. ### On clusterA ###
    2. PUT _cluster/settings
    3. {
    4. "persistent": {
    5. "cluster": {
    6. "remote": {
    7. "clusterB": {
    8. "mode": "proxy",
    9. "skip_unavailable": "true",
    10. "server_name": "clusterb.es.region-b.gcp.elastic-cloud.com",
    11. "proxy_socket_connections": "18",
    12. "proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400"
    13. }
    14. }
    15. }
    16. }
    17. }
  2. Existing data needs to be discarded before you can turn any index into a follower. Ensure the most up-to-date data is available on clusterB prior to deleting any indices on clusterA.

    1. resp = client.indices.delete(
    2. index="kibana_sample_data_ecommerce",
    3. )
    4. print(resp)
    1. response = client.indices.delete(
    2. index: 'kibana_sample_data_ecommerce'
    3. )
    4. puts response
    1. const response = await client.indices.delete({
    2. index: "kibana_sample_data_ecommerce",
    3. });
    4. console.log(response);
    1. ### On clusterA ###
    2. DELETE kibana_sample_data_ecommerce
  3. Create a follower index on clusterA, now following the leader index in clusterB.

    1. resp = client.ccr.follow(
    2. index="kibana_sample_data_ecommerce",
    3. wait_for_active_shards="1",
    4. remote_cluster="clusterB",
    5. leader_index="kibana_sample_data_ecommerce2",
    6. )
    7. print(resp)
    1. const response = await client.ccr.follow({
    2. index: "kibana_sample_data_ecommerce",
    3. wait_for_active_shards: 1,
    4. remote_cluster: "clusterB",
    5. leader_index: "kibana_sample_data_ecommerce2",
    6. });
    7. console.log(response);
    1. ### On clusterA ###
    2. PUT /kibana_sample_data_ecommerce/_ccr/follow?wait_for_active_shards=1
    3. {
    4. "remote_cluster": "clusterB",
    5. "leader_index": "kibana_sample_data_ecommerce2"
    6. }
  4. The index on the follower cluster now contains the updated documents.

    1. resp = client.search(
    2. index="kibana_sample_data_ecommerce",
    3. q="kimchy",
    4. )
    5. print(resp)
    1. response = client.search(
    2. index: 'kibana_sample_data_ecommerce',
    3. q: 'kimchy'
    4. )
    5. puts response
    1. const response = await client.search({
    2. index: "kibana_sample_data_ecommerce",
    3. q: "kimchy",
    4. });
    5. console.log(response);
    1. ### On clusterA ###
    2. GET kibana_sample_data_ecommerce/_search?q=kimchy

    If a soft delete is merged away before it can be replicated to a follower the following process will fail due to incomplete history on the leader, see index.soft_deletes.retention_lease.period for more details.