Package org.elasticsearch.index.shard

Examples of org.elasticsearch.index.shard.ShardId


                    }

                    responsesByItemAndShard.set(slot, new AtomicReferenceArray(shards.size()));
                    expectedOperationsPerItem.set(slot, new AtomicInteger(shards.size()));
                    for (ShardIterator shard : shards) {
                        ShardId shardId = shard.shardId();
                        TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
                        if (requests == null) {
                            requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(multiPercolateRequest, shardId.getIndex(), shardId.getId(), percolateRequest.preference()));
                        }
                        logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
                        requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));

                        IntArrayList items = shardToSlots.get(shardId);
View Full Code Here


                return;
            }

            logger.trace("mpercolate executing for shards {}", requestsByShard.keySet());
            for (Map.Entry<ShardId, TransportShardMultiPercolateAction.Request> entry : requestsByShard.entrySet()) {
                final ShardId shardId = entry.getKey();
                TransportShardMultiPercolateAction.Request shardRequest = entry.getValue();
                shardMultiPercolateAction.execute(shardRequest, new ActionListener<TransportShardMultiPercolateAction.Response>() {

                    @Override
                    public void onResponse(TransportShardMultiPercolateAction.Response response) {
View Full Code Here

            if (termVectorRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorRequest.type())) {
                responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorRequest.type(), termVectorRequest.id(),
                        "routing is required for [" + concreteSingleIndex + "]/[" + termVectorRequest.type() + "]/[" + termVectorRequest.id() + "]")));
                continue;
            }
            ShardId shardId = clusterService.operationRouting().getShards(clusterState, concreteSingleIndex,
                    termVectorRequest.type(), termVectorRequest.id(), termVectorRequest.routing(), null).shardId();
            MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
            if (shardRequest == null) {
                shardRequest = new MultiTermVectorsShardRequest(request, shardId.index().name(), shardId.id());
                shardRequest.preference(request.preference);

                shardRequests.put(shardId, shardRequest);
            }
            shardRequest.add(i, termVectorRequest);
View Full Code Here

        if (logger.isTraceEnabled()) {
            logger.trace("[{}] restoring shard  [{}]", restoreSource.snapshotId(), shardId);
        }
        try {
            IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
            ShardId snapshotShardId = shardId;
            if (!shardId.getIndex().equals(restoreSource.index())) {
                snapshotShardId = new ShardId(restoreSource.index(), shardId.id());
            }
            indexShardRepository.restore(restoreSource.snapshotId(), shardId, snapshotShardId, recoveryState);
            restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), shardId);
        } catch (Throwable t) {
            if (Lucene.isCorruptionException(t)) {
View Full Code Here

            if (item.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type())) {
                responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(concreteSingleIndex, item.type(), item.id(),
                        "routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]")));
                continue;
            }
            ShardId shardId = clusterService.operationRouting()
                    .getShards(clusterState, concreteSingleIndex, item.type(), item.id(), item.routing(), null).shardId();
            MultiGetShardRequest shardRequest = shardRequests.get(shardId);
            if (shardRequest == null) {
                shardRequest = new MultiGetShardRequest(request, shardId.index().name(), shardId.id());
                shardRequests.put(shardId, shardRequest);
            }
            shardRequest.add(i, item);
        }
View Full Code Here

                    indexBuilder.add(in.readString());
                }
                ImmutableMap.Builder<ShardId, ShardSnapshotStatus> builder = ImmutableMap.<ShardId, ShardSnapshotStatus>builder();
                int shards = in.readVInt();
                for (int j = 0; j < shards; j++) {
                    ShardId shardId = ShardId.readShardId(in);
                    String nodeId = in.readOptionalString();
                    State shardState = State.fromValue(in.readByte());
                    builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
                }
                entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), builder.build());
View Full Code Here

            }
            builder.endArray();
            builder.startArray("shards");
            {
                for (Map.Entry<ShardId, ShardSnapshotStatus> shardEntry : entry.shards.entrySet()) {
                    ShardId shardId = shardEntry.getKey();
                    ShardSnapshotStatus status = shardEntry.getValue();
                    builder.startObject();
                    {
                        builder.field("index", shardId.getIndex());
                        builder.field("shard", shardId.getId());
                        builder.field("state", status.state());
                        builder.field("node", status.nodeId());
                    }
                    builder.endObject();
                }
View Full Code Here

        loadedFilters.cleanUp();
    }

    private BitDocIdSet getAndLoadIfNotPresent(final Filter filter, final LeafReaderContext context) throws IOException, ExecutionException {
        final Object coreCacheReader = context.reader().getCoreCacheKey();
        final ShardId shardId = ShardUtils.extractShardId(context.reader());
        Cache<Filter, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Filter, Value>>() {
            @Override
            public Cache<Filter, Value> call() throws Exception {
                SegmentReaderUtils.registerCoreListener(context.reader(), BitsetFilterCache.this);
                return CacheBuilder.newBuilder().build();
            }
        });
        return filterToFbs.get(filter, new Callable<Value>() {
            @Override
            public Value call() throws Exception {
                DocIdSet docIdSet = filter.getDocIdSet(context, null);
                final BitDocIdSet bitSet;
                if (docIdSet instanceof BitDocIdSet) {
                    bitSet = (BitDocIdSet) docIdSet;
                } else {
                    BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
                    if (docIdSet != null && docIdSet != DocIdSet.EMPTY) {
                        builder.or(docIdSet.iterator());
                    }
                    BitDocIdSet bits = builder.build();
                    // code expects this to be non-null
                    if (bits == null) {
                        bits = new BitDocIdSet(new SparseFixedBitSet(context.reader().maxDoc()), 0);
                    }
                    bitSet = bits;
                }

                Value value = new Value(bitSet, shardId);
                if (shardId != null) {
                    IndexShard shard = indexService.shard(shardId.id());
                    if (shard != null) {
                        shard.shardBitsetFilterCache().onCached(value.bitset.ramBytesUsed());
                    }
                }
                return value;
View Full Code Here

                // might be specific to a query. We don't pass the live docs either because a cache built for a specific
                // generation of a segment might be reused by an older generation which has fewer deleted documents
                cacheValue = DocIdSets.toCacheable(context.reader(), filter.getDocIdSet(context, null));
                // we might put the same one concurrently, that's fine, it will be replaced and the removal
                // will be called
                ShardId shardId = ShardUtils.extractShardId(context.reader());
                if (shardId != null) {
                    IndexShard shard = cache.indexService.shard(shardId.id());
                    if (shard != null) {
                        cacheKey.removalListener = shard.filterCache();
                        shard.filterCache().onCached(DocIdSets.sizeInBytes(cacheValue));
                    }
                }
View Full Code Here

                new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME},
                true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);

        if (!getResult.isExists()) {
            if (request.upsertRequest() == null && !request.docAsUpsert()) {
                throw new DocumentMissingException(new ShardId(indexShard.indexService().index().name(), request.shardId()), request.type(), request.id());
            }
            Long ttl = null;
            IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
            if (request.scriptedUpsert() && (request.script() != null)) {
                // Run the script to perform the create logic
                IndexRequest upsert = request.upsertRequest();              
                Map<String, Object> upsertDoc = upsert.sourceAsMap();
                Map<String, Object> ctx = new HashMap<>(2);
                // Tell the script that this is a create and not an update
                ctx.put("op", "create");
                ctx.put("_source", upsertDoc);
                try {
                    ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptType, request.scriptParams);
                    script.setNextVar("ctx", ctx);
                    script.run();
                    // we need to unwrap the ctx...
                    ctx = (Map<String, Object>) script.unwrap(ctx);
                } catch (Exception e) {
                    throw new ElasticsearchIllegalArgumentException("failed to execute script", e);
                }               
                //Allow the script to set TTL using ctx._ttl
                ttl = getTTLFromScriptContext(ctx);
                //Allow the script to abort the create by setting "op" to "none"
                String scriptOpChoice = (String) ctx.get("op");
               
                // Only valid options for an upsert script are "create"
                // (the default) or "none", meaning abort upsert
                if (!"create".equals(scriptOpChoice)) {
                    if (!"none".equals(scriptOpChoice)) {
                        logger.warn("Used upsert operation [{}] for script [{}], doing nothing...", scriptOpChoice, request.script);
                    }
                    UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(),
                            getResult.getVersion(), false);
                    update.setGetResult(getResult);
                    return new Result(update, Operation.NONE, upsertDoc, XContentType.JSON);
                }
                indexRequest.source((Map)ctx.get("_source"));
            }

            indexRequest.index(request.index()).type(request.type()).id(request.id())
                    // it has to be a "create!"
                    .create(true)                   
                    .routing(request.routing())
                    .ttl(ttl)
                    .refresh(request.refresh())
                    .replicationType(request.replicationType()).consistencyLevel(request.consistencyLevel());
            indexRequest.operationThreaded(false);
            if (request.versionType() != VersionType.INTERNAL) {
                // in all but the internal versioning mode, we want to create the new document using the given version.
                indexRequest.version(request.version()).versionType(request.versionType());
            }
            return new Result(indexRequest, Operation.UPSERT, null, null);
        }

        long updateVersion = getResult.getVersion();

        if (request.versionType() != VersionType.INTERNAL) {
            assert request.versionType() == VersionType.FORCE;
            updateVersion = request.version(); // remember, match_any is excluded by the conflict test
        }

        if (getResult.internalSourceRef() == null) {
            // no source, we can't do nothing, through a failure...
            throw new DocumentSourceMissingException(new ShardId(indexShard.indexService().index().name(), request.shardId()), request.type(), request.id());
        }

        Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
        String operation = null;
        String timestamp;
View Full Code Here

TOP

Related Classes of org.elasticsearch.index.shard.ShardId

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.