Examples of CallCompletion


Examples of com.linkedin.util.degrader.CallCompletion

    dcClient2Default.setMinCallCount(1);
    dcClient2Default.setOverrideMinCallCount(1);
    dcClient2Default.setMaxDropRate(1d);
    dcClient2Default.setUpStep(1d);
    dcClient2Default.setHighErrorRate(0);
    CallCompletion cc = client2.getCallTracker().startCall();
    clock2.addMs(10000);
    cc.endCallWithError();

    clock1.addMs(15000);
    clock2.addMs(5000);

    System.err.println(dcClient2Default.getCurrentComputedDropRate());
    System.err.println(dcClient2Default.getCurrentComputedDropRate());

    // now verify that we only get client1
    for (int i = 0; i < 1000; ++i)
    {
      assertEquals(getTrackerClient(strategy, null, new RequestContext(), 0, clients), client1);
    }

    // now force client1 to be disabled
    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setMinCallCount(1);
    dcClient1Default.setOverrideMinCallCount(1);
    dcClient1Default.setMaxDropRate(1d);
    dcClient1Default.setUpStep(1d);
    dcClient1Default.setHighErrorRate(0);
    cc = client1.getCallTracker().startCall();
    clock1.addMs(10000);
    cc.endCallWithError();

    clock1.addMs(5000);

    // now verify that we never get a client back
    for (int i = 0; i < 1000; ++i)
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    dcClient2Default.setOverrideMinCallCount(1);
    dcClient2Default.setMinCallCount(1);
    dcClient2Default.setMaxDropRate(1d);
    dcClient2Default.setUpStep(0.4d);
    dcClient2Default.setHighErrorRate(0);
    CallCompletion cc = client2.getCallTracker().startCall();
    clock2.addMs(1);
    cc.endCallWithError();

    clock1.addMs(15000);
    clock2.addMs(5000);

    System.err.println(dcClient2Default.getCurrentComputedDropRate());
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    dcClient2Partition1.setOverrideMinCallCount(1);
    dcClient2Partition1.setMinCallCount(1);
    dcClient2Partition1.setMaxDropRate(1d);
    dcClient2Partition1.setUpStep(0.4d);
    dcClient2Partition1.setHighErrorRate(0);
    CallCompletion cc = client2.getCallTracker().startCall();
    clock2.addMs(1);
    cc.endCallWithError();

    // force client3 to be disabled
    DegraderControl dcClient3Partition1 = client3.getDegraderControl(1);
    dcClient3Partition1.setOverrideMinCallCount(1);
    dcClient3Partition1.setMinCallCount(1);
    dcClient3Partition1.setMaxDropRate(1d);
    dcClient3Partition1.setHighErrorRate(0);
    dcClient3Partition1.setUpStep(0.2d);
    CallCompletion cc3 = client3.getCallTracker().startCall();
    clock3.addMs(1);
    cc3.endCallWithError();

    clock1.addMs(15000);
    clock2.addMs(5000);
    clock3.addMs(5000);
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    dcClient2Default.setOverrideMinCallCount(1);
    dcClient2Default.setMinCallCount(1);
    dcClient2Default.setMaxDropRate(1d);
    dcClient2Default.setUpStep(0.4d);
    dcClient2Default.setHighErrorRate(0);
    CallCompletion cc = client2.getCallTracker().startCall();
    clock2.addMs(1);
    cc.endCallWithError();

    clock1.addMs(15000);
    clock2.addMs(5000);

    System.err.println(dcClient2Default.getCurrentComputedDropRate());
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

      dcClient1Default.setMinCallCount(5);
      dcClient1Default.setMaxDropRate(1d);
      dcClient1Default.setUpStep(1.0d);

      List<CallCompletion> ccList = new ArrayList<CallCompletion>();
      CallCompletion cc;
      for (int j = 0; j < NUM_CHECKS; j++)

      {
        cc = client1.getCallTracker().startCall();

        ccList.add(cc);
      }

      // add high latency and errors to shut off traffic to this tracker client.
      clock.addMs(3500);

      for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
      {
        cc = iter.next();
        cc.endCallWithError();
        iter.remove();
      }

      // go to next time interval.
      clock.addMs(TIME_INTERVAL);

      Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);

      // trigger a state update
      TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);

      // now we mimic the high latency and force the state to drop all calls so to make
      // the overrideClusterDropRate to 1.0
      ccList = new ArrayList<CallCompletion>();
      for (int j = 0; j < NUM_CHECKS; j++)
      {
        cc = client1.getCallTracker().startCall();

        ccList.add(cc);
      }

      //make sure that the latency is really high
      clock.addMs(3500);

      for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
      {
        cc = iter.next();
        cc.endCallWithError();
        iter.remove();
      }

      // go to next time interval.
      clock.addMs(TIME_INTERVAL);
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    dcClient1Default.setMinCallCount(5);
    dcClient1Default.setMaxDropRate(1d);
    dcClient1Default.setUpStep(1.0d);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;
    for (int j = 0; j < NUM_CHECKS; j++)

    {
      cc = client1.getCallTracker().startCall();

      ccList.add(cc);
    }

    // add high latency and errors to shut off traffic to this tracker client.
    // note: the default values for highError and lowError in the degrader are 1.1,
    // which means we don't use errorRates when deciding when to lb/degrade.
    // In addition, because we changed to use the
    clock.addMs(3500);
    //for (int j = 0; j < NUM_CHECKS; j++)
    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCallWithError();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(TIME_INTERVAL);

    Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);

    // trigger a state update
    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    if (config.getInitialRecoveryLevel() < 0.01)
    {
      //the returned TrackerClient should be null
      assertNull(resultTC,"expected null trackerclient");

      // In the next time interval, the load balancer should reintroduce the TC
      // back into the ring because there was an entire time interval where no calls went to this
      // tracker client, so it's time to try it out. We need to enter this code at least once.
      do
      {
        // go to next time interval.
        clock.addMs(TIME_INTERVAL);
        // try adjusting the hash ring on this updateState
        if (strategyV3 != null)
        {
          strategy.setStrategyV3(DEFAULT_PARTITION_ID, strategyV3);
        }
        else if (strategyV2 != null)
        {
          strategy.setStrategyV2(strategyV2);
        }
        else
        {
          fail("should set strategy (either LoadBalance or Degrader");
        }
        resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
        localStepsToFullRecovery--;
      }
      while (localStepsToFullRecovery > 0);
    }
    assertNotNull(resultTC,"expected non-null trackerclient");

    // make calls to the tracker client to verify that it's on the road to healthy status.
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = resultTC.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs(10);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(TIME_INTERVAL);
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    int callHowManyTimes = (int)((qps * timeInterval) / 1000);
    for (int i = 0; i < callHowManyTimes; i++)
    {
      for (TrackerClient client : clients)
      {
        CallCompletion cc = client.getCallTracker().startCall();
        callCompletions.add(cc);
      }
    }
    Random random = new Random();
    clock.addMs(milliseconds);

    for (CallCompletion cc : callCompletions)
    {
      if (withError)
      {
        cc.endCallWithError();
      }
      else if (withQualifiedDegraderError)
      {
        //choose a random error type
        if (random.nextBoolean())
        {
          cc.endCallWithError(ErrorType.CLOSED_CHANNEL_EXCEPTION);
        }
        else
        {
          cc.endCallWithError(ErrorType.CONNECT_EXCEPTION);
        }
      }
      else
      {
        cc.endCall();
      }
    }
    //complete a full interval cycle
    clock.addMs(timeInterval - (milliseconds % timeInterval));
  }
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setMinCallCount(5);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;

    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    // The override drop rate should be zero at this point.
    assertEquals(dcClient1Default.getOverrideDropRate(),0.0);

    // make high latency calls to the tracker client, verify the override drop rate doesn't change
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = client1.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs((long)highWaterMark);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(timeInterval);

    // try call dropping on the next updateState
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    // we now expect that the override drop rate stepped up because updateState
    // made that decision.
    assertEquals(dcClient1Default.getOverrideDropRate(), globalStepUp);

    // make mid latency calls to the tracker client, verify the override drop rate doesn't change
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      // need to use client1 because the resultTC may be null
      cc = client1.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs((long)highWaterMark - 1);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(timeInterval);

    double previousOverrideDropRate = dcClient1Default.getOverrideDropRate();

    // try call dropping on the next updateState
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertEquals(dcClient1Default.getOverrideDropRate(), previousOverrideDropRate );

    // make low latency calls to the tracker client, verify the override drop rate decreases
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = client1.getCallTracker().startCall();
      ccList.add(cc);
    }

    clock.addMs((long)lowWaterMark);

    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
      iter.remove();
    }

    // go to next time interval.
    clock.addMs(timeInterval);
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
    URI uri2 = URI.create("http://test.linkedin.com:3243/fdsaf");
    URIRequest request = new URIRequest(uri1);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;

    TrackerClient client1 =
            new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
    TrackerClient client2 =
            new TrackerClient(uri2, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri2), clock, null);

    clients.add(client1);
    clients.add(client2);

    // force client1 to be disabled if we encounter errors/high latency
    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setMinCallCount(5);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setUpStep(1.0);
    // force client2 to be disabled if we encounter errors/high latency
    DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient2Default.setOverrideMinCallCount(5);
    dcClient2Default.setMinCallCount(5);
    dcClient2Default.setUpStep(0.4);

    // Have one cycle of successful calls to verify valid tracker clients returned.
    // try load balancing on this updateState, need to updateState before forcing the strategy.
    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertNotNull(resultTC, "expected non-null trackerclient");
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      ccList.add(client1.getCallTracker().startCall());
      ccList.add(client2.getCallTracker().startCall());
    }

    clock.addMs(1);
    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCall();
    }

    // bump to next interval, and get stats.
    clock.addMs(5000);

    // try Load balancing on this updateState
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertNotNull(resultTC,"expected non-null trackerclient");

    Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 0.0);
    Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.0);

    // now simulate a bad cluster state with high error and high latency
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      ccList.add(client1.getCallTracker().startCall());
      ccList.add(client2.getCallTracker().startCall());
    }

    clock.addMs(3500);
    for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext();)
    {
      cc = iter.next();
      cc.endCallWithError();
    }

    // go to next interval
    clock.addMs(5000);

    Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);
    Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.4);

    // trigger a state update, the returned TrackerClient should be client2
    // because client 1 should have gone up to a 1.0 drop rate, and the cluster should
    // be unhealthy
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertEquals(resultTC, client2);


    // Simulate several time cycles without any calls. The ring recovery mechanism should bump
    // client1 up to full weight in an attempt to route some calls to it. Client2 will stay at
    // it's current drop rate.
    do
    {
      // go to next time interval.
      clock.addMs(TIME_INTERVAL);
      // adjust the hash ring this time.
      strategy.setStrategy(DEFAULT_PARTITION_ID,
          DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
      resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
      localStepsToFullRecovery--;
    }
    while (localStepsToFullRecovery > 0);
    assertNotNull(resultTC,"expected non-null trackerclient");

    assertTrue(strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri()) ==
                       client1.getPartitionWeight(DEFAULT_PARTITION_ID) * config.getPointsPerWeight(),
               "client1 did not recover to full weight in hash map.");
    Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.4,
                        "client2 drop rate not as expected");


    cc = client1.getCallTracker().startCall();
    clock.addMs(10);
    cc.endCall();
    clock.addMs(TIME_INTERVAL);

    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertNotNull(resultTC,"expected non-null trackerclient");
  }
View Full Code Here

Examples of com.linkedin.util.degrader.CallCompletion

    List<TrackerClient> clients = new ArrayList<TrackerClient>();
    URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
    URIRequest request = new URIRequest(uri1);

    List<CallCompletion> ccList = new ArrayList<CallCompletion>();
    CallCompletion cc;

    TrackerClient client1 =
            new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);

    clients.add(client1);

    // force client1 to be disabled if we encounter errors/high latency
    DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
    dcClient1Default.setOverrideMinCallCount(5);
    dcClient1Default.setMinCallCount(5);
    dcClient1Default.setUpStep(1.0);
    dcClient1Default.setHighErrorRate(0);

    // Issue high latency calls to reduce client1 to the minimum number of hash points allowed.
    // (1 in this case)
    TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    assertNotNull(resultTC, "expected non-null trackerclient");
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = resultTC.getCallTracker().startCall();

      ccList.add(cc);
    }

    clock.addMs(3500);
    for (int j = 0; j < NUM_CHECKS; j++)
    {
      cc = ccList.get(j);
      cc.endCall();
    }
    // bump to next interval, and get stats.
    clock.addMs(5000);

    // because we want to test out the adjusted min drop rate, force the hash ring adjustment now.
    strategy.setStrategy(DEFAULT_PARTITION_ID,
        DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
    resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
    // client1 should be reduced to 1 hash point, but since it is the only TC, it should be the
    // TC returned.
    assertEquals(resultTC, client1, "expected non-null trackerclient");

    assertEquals((int)(strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri())), 1,
                 "expected client1 to have only 1 point in hash map");

    // make low latency call, we expect the computedDropRate to be adjusted because the minimum
    // call count was also scaled down.
    cc = client1.getCallTracker().startCall();
    clock.addMs(10);
    cc.endCall();
    clock.addMs(TIME_INTERVAL);

    Assert.assertTrue(dcClient1Default.getCurrentComputedDropRate() < 1.0,
                      "client1 drop rate not less than 1.");
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.