Package org.elasticsearch.cluster.node

Examples of org.elasticsearch.cluster.node.DiscoveryNode


        assertThat("Nodes urls must not be empty...", adresses, not(emptyCollectionOf(TransportAddress.class)));

    // Testing if we are really connected to a cluster node
        assertThat("We should be connected at least to one node.", tClient.connectedNodes(), not(emptyCollectionOf(DiscoveryNode.class)));

    DiscoveryNode node = tClient.connectedNodes().get(0);
        assertThat(node.getName(), is("junit.node.transport"));
        assertThat("We should be connected to the master node.", node.isMasterNode(), is(true));
  }
View Full Code Here


                l.add(routing);
            }
        }
        for (Iterator<RiverRouting> it = unassigned.iterator(); it.hasNext(); ) {
            RiverRouting routing = it.next();
            DiscoveryNode smallest = null;
            int smallestSize = Integer.MAX_VALUE;
            for (Map.Entry<DiscoveryNode, List<RiverRouting>> entry : nodesToRivers.entrySet()) {
                if (RiverNodeHelper.isRiverNode(entry.getKey(), routing.riverName())) {
                    if (entry.getValue().size() < smallestSize) {
                        smallestSize = entry.getValue().size();
View Full Code Here

    }

    private class ApplyRivers implements RiverClusterStateListener {
        @Override
        public void riverClusterChanged(RiverClusterChangedEvent event) {
            DiscoveryNode localNode = clusterService.localNode();
            RiverClusterState state = event.state();

            // first, go over and delete ones that either don't exists or are not allocated
            for (final RiverName riverName : rivers.keySet()) {
                RiverRouting routing = state.routing().routing(riverName);
                if (routing == null || !localNode.equals(routing.node())) {
                    // not routed at all, and not allocated here, clean it (we delete the relevant ones before)
                    closeRiver(riverName);
                    // also, double check and delete the river content if it was deleted (_meta does not exists)
                    try {
                        client.prepareGet(riverIndexName, riverName.name(), "_meta").setListenerThreaded(true).execute(new ActionListener<GetResponse>() {
View Full Code Here

                } else if (isPeerRecovery(shardRouting)) {
                    // check if there is an existing recovery going, and if so, and the source node is not the same, cancel the recovery to restart it
                    RecoveryState recoveryState = recoveryTarget.recoveryState(indexShard);
                    if (recoveryState != null && recoveryState.getStage() != RecoveryState.Stage.DONE) {
                        // we have an ongoing recovery, find the source based on current routing and compare them
                        DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
                        if (!recoveryState.getSourceNode().equals(sourceNode)) {
                            logger.debug("[{}][{}] removing shard (recovery source changed), current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting);
                            // closing the shard will also cancel any ongoing recovery.
                            indexService.removeShard(shardRouting.id(), "removing shard (recovery source node changed)");
                            shardHasBeenRemoved = true;
View Full Code Here

                }
            }
        }

        // if we're in peer recovery, try to find out the source node now so in case it fails, we will not create the index shard
        DiscoveryNode sourceNode = null;
        if (isPeerRecovery(shardRouting)) {
            sourceNode = findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
            if (sourceNode == null) {
                logger.trace("ignoring initializing shard {} - no source node can be found.", shardRouting.shardId());
                return;
View Full Code Here

                    logger.trace("[cluster_shutdown]: stopping the cluster service so no re-routing will occur");
                    clusterService.stop();

                    final CountDownLatch latch = new CountDownLatch(nodes.size());
                    for (ObjectCursor<DiscoveryNode> cursor : nodes) {
                        final DiscoveryNode node = cursor.value;
                        if (node.id().equals(state.nodes().masterNodeId())) {
                            // don't shutdown the master yet...
                            latch.countDown();
                        } else {
                            logger.trace("[cluster_shutdown]: sending shutdown request to [{}]", node);
                            transportService.sendRequest(node, SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
                                @Override
                                public void handleResponse(TransportResponse.Empty response) {
                                    logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node);
                                    latch.countDown();
                                }

                                @Override
                                public void handleException(TransportException exp) {
                                    logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node);
                                    latch.countDown();
                                }
                            });
                        }
                    }
                    try {
                        latch.await();
                    } catch (InterruptedException e) {
                        // ignore
                    }
                    logger.info("[cluster_shutdown]: done shutting down all nodes except master, proceeding to master");

                    // now, kill the master
                    logger.trace("[cluster_shutdown]: shutting down the master [{}]", state.nodes().masterNode());
                    transportService.sendRequest(state.nodes().masterNode(), SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
                        @Override
                        public void handleResponse(TransportResponse.Empty response) {
                            logger.trace("[cluster_shutdown]: received shutdown response from master");
                        }

                        @Override
                        public void handleException(TransportException exp) {
                            logger.warn("[cluster_shutdown]: received failed shutdown response master", exp);
                        }
                    });
                }
            });
            t.start();
        } else {
            final String[] nodesIds = state.nodes().resolveNodesIds(request.nodesIds);
            logger.info("[partial_cluster_shutdown]: requested, shutting down [{}] in [{}]", nodesIds, request.delay);

            for (String nodeId : nodesIds) {
                final DiscoveryNode node = state.nodes().get(nodeId);
                if (node != null) {
                    nodes.add(node);
                }
            }

            Thread t = new Thread(new Runnable() {
                @Override
                public void run() {
                    try {
                        Thread.sleep(request.delay.millis());
                    } catch (InterruptedException e) {
                        // ignore
                    }

                    final CountDownLatch latch = new CountDownLatch(nodesIds.length);
                    for (String nodeId : nodesIds) {
                        final DiscoveryNode node = state.nodes().get(nodeId);
                        if (node == null) {
                            logger.warn("[partial_cluster_shutdown]: no node to shutdown for node_id [{}]", nodeId);
                            latch.countDown();
                            continue;
                        }
View Full Code Here

     * Finds the routing source node for peer recovery, return null if its not found. Note, this method expects the shard
     * routing to *require* peer recovery, use {@link #isPeerRecovery(org.elasticsearch.cluster.routing.ShardRouting)} to
     * check if its needed or not.
     */
    private DiscoveryNode findSourceNodeForPeerRecovery(RoutingTable routingTable, DiscoveryNodes nodes, ShardRouting shardRouting) {
        DiscoveryNode sourceNode = null;
        if (!shardRouting.primary()) {
            IndexShardRoutingTable shardRoutingTable = routingTable.index(shardRouting.index()).shard(shardRouting.id());
            for (ShardRouting entry : shardRoutingTable) {
                if (entry.primary() && entry.started()) {
                    // only recover from started primary, if we can't find one, we will do it next round
View Full Code Here

        transportService.removeHandler(ACTION_NAME);
    }

    public void publish(RiverClusterState clusterState) {
        final DiscoveryNodes discoNodes = clusterService.state().nodes();
        final DiscoveryNode localNode = discoNodes.localNode();
        for (final DiscoveryNode node : discoNodes) {
            if (node.equals(localNode)) {
                // no need to send to our self
                continue;
            }
View Full Code Here

            }

            // if the allocated or relocation node id doesn't exists in the cluster state  it may be a stale node,
            // make sure we don't do anything with this until the routing table has properly been rerouted to reflect
            // the fact that the node does not exists
            DiscoveryNode node = state.nodes().get(shardRouting.currentNodeId());
            if (node == null) {
                return false;
            }
            // If all nodes have been upgraded to >= 1.3.0 at some point we get back here and have the chance to
            // run this api. (when cluster state is then updated)
            if (node.getVersion().before(Version.V_1_3_0)) {
                logger.debug("Skip deleting deleting shard instance [{}], a node holding a shard instance is < 1.3.0", shardRouting);
                return false;
            }
            if (shardRouting.relocatingNodeId() != null) {
                node = state.nodes().get(shardRouting.relocatingNodeId());
                if (node == null) {
                    return false;
                }
                if (node.getVersion().before(Version.V_1_3_0)) {
                    logger.debug("Skip deleting deleting shard instance [{}], a node holding a shard instance is < 1.3.0", shardRouting);
                    return false;
                }
            }
View Full Code Here

        List<Tuple<DiscoveryNode, ShardActiveRequest>> requests = new ArrayList<>(indexShardRoutingTable.size());
        String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getUUID();
        ClusterName clusterName = state.getClusterName();
        for (ShardRouting shardRouting : indexShardRoutingTable) {
            // Node can't be null, because otherwise shardCanBeDeleted() would have returned false
            DiscoveryNode currentNode = state.nodes().get(shardRouting.currentNodeId());
            assert currentNode != null;

            requests.add(new Tuple<>(currentNode, new ShardActiveRequest(clusterName, indexUUID, shardRouting.shardId())));
            if (shardRouting.relocatingNodeId() != null) {
                DiscoveryNode relocatingNode = state.nodes().get(shardRouting.relocatingNodeId());
                assert relocatingNode != null;
                requests.add(new Tuple<>(relocatingNode, new ShardActiveRequest(clusterName, indexUUID, shardRouting.shardId())));
            }
        }
View Full Code Here

TOP

Related Classes of org.elasticsearch.cluster.node.DiscoveryNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.