Package org.hivedb.meta

Examples of org.hivedb.meta.Node


  public void testUpdate() {
    int count = getHive().getNodes().size();
    NodeDao dao = new NodeDao(getDataSource(getConnectString(getHiveDatabaseName())));
    assertEquals(count, dao.loadAll().size());

    Node full = createFullyPopulatedNode();
    Node minimal = createMinimalNode();

    dao.create(full);
    dao.create(minimal);

    full.setDatabaseName("notBlahDatabase");

    minimal.setUsername("minimus");
    minimal.setPassword("maximus");

    dao.update(full);
    dao.update(minimal);

    List<Node> nodes = dao.loadAll();
    assertEquals(2 + count, nodes.size());

    Node fetchedFull = null;
    Node fetchedMinimal = null;

    for (Node n : nodes)
      if (n.getName().equals(full.getName()))
        fetchedFull = n;
      else if (n.getName().equals(minimal.getName()))
        fetchedMinimal = n;

    assertNotNull(fetchedFull);
    assertNotNull(fetchedMinimal);

    assertEquals(full, fetchedFull);
    assertEquals(minimal, fetchedMinimal);

    assertNotNull(fetchedMinimal.getUsername());
    assertNotNull(fetchedMinimal.getPassword());
  }
View Full Code Here


  public void testDelete() {
    int count = getHive().getNodes().size();
    NodeDao dao = new NodeDao(getDataSource(getConnectString(getHiveDatabaseName())));
    assertEquals(count, dao.loadAll().size());

    Node full = createFullyPopulatedNode();
    Node minimal = createMinimalNode();

    dao.create(full);
    dao.create(minimal);

    List<Node> nodes = dao.loadAll();
View Full Code Here

    assertEquals(0, dao.loadAll().size());
  }

  public Node createFullyPopulatedNode() {
    Node node = createMinimalNode();
    node.setName("full node");
    node.setStatus(Status.writable);
    node.setUsername("test");
    node.setPassword("test");
    node.setPort(3306);
    node.setCapacity(101);
    node.setOptions("&works=true");
    return node;
  }
View Full Code Here

    node.setOptions("&works=true");
    return node;
  }

  public Node createMinimalNode() {
    return new Node(
      Hive.NEW_OBJECT_ID,
      "minimal node",
      "blahbase",
      "localhost",
      HiveDbDialect.MySql
View Full Code Here

  @Test
  public void installASchemaOnAnExistingNode() throws Exception {
    Hive hive = Hive.load(uri(), CachingDataSourceProvider.getInstance());
    String nodeName = "anExistingNode";
    Node node = new Node(nodeName, H2TestCase.TEST_DB, "unecessary", HiveDbDialect.H2);
    hive.addNode(node);
    WeatherSchema weatherSchema = WeatherSchema.getInstance();
    getService().install(weatherSchema.getName(), nodeName);
    validateSchema(weatherSchema, node);
  }
View Full Code Here

      numIndex = resource.getSecondaryIndex("num");
      nameIndex = resource.getSecondaryIndex("name");
      for (SecondaryIndex secondaryIndex : resource.getSecondaryIndexes()) {
        hive.addSecondaryIndex(resource, secondaryIndex);
      }
      hive.addNode(new Node("node", H2TestCase.TEST_DB, "", HiveDbDialect.H2));
      SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
      dao.setDataSource(hive.getDataSourceProvider().getDataSource(getConnectString(H2TestCase.TEST_DB)));
      //    dao.getJdbcTemplate().update("SET TRACE_LEVEL_SYSTEM_OUT 3");
    }
    catch (Exception e) {
View Full Code Here

  @Test
  public void testInsertPrimaryIndexKey() throws Exception {
    DbDirectory d = getDirectory();
    Integer key = new Integer(43);
    Hive hive = getHive();
    Node firstNode = Atom.getFirst(hive.getNodes());
    d.insertPrimaryIndexKey(Atom.getFirst(hive.getNodes()), key);
    for (Integer id : Transform.map(semaphoreToId(), d.getKeySemamphoresOfPrimaryIndexKey(key)))
      assertEquals((Integer) firstNode.getId(), id);
  }
View Full Code Here

    Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    String primaryKey = new String("Asia");
    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), CachingDataSourceProvider.getInstance().getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    Mover<Integer> secMover = new SecondaryMover();

    //Do the actual migration
    Migrator m = new HiveMigrator(hive);
    assertNotNull(Filter.grepItemAgainstList(origin.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    //Directory points to the destination node
    assertNotNull(Filter.grepItemAgainstList(destination.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records exist and are identical on the destination node
    assertEquals(primaryKey, pMover.get(primaryKey, destination));
    assertEquals(secondaryKey, secMover.get(secondaryKey, destination));
  }
View Full Code Here

    Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    String primaryKey = new String("Oceana");
    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();
    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
    //This mover just craps out on copy
    Mover<Integer> failingMover = new Mover<Integer>() {
      public void copy(Integer item, Node node) {
        throw new RuntimeException("");
      }

      public void delete(Integer item, Node node) {
      }

      public Integer get(Object id, Node node) {
        return null;
      }
    };

    Migrator m = new HiveMigrator(hive);
    pMover.getDependentMovers().clear();
    pMover.getDependentMovers().add(new Pair<Mover, KeyLocator>(failingMover, new SecondaryKeyLocator(origin.getUri())));
    try {
      m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    } catch (Exception e) {
      //Quash
    }
    //Directory still points to the origin node
    assertNotNull(Filter.grepItemAgainstList(origin.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
View Full Code Here

    Hive hive = Hive.load(getConnectString(getHiveDatabaseName()), CachingDataSourceProvider.getInstance());
    String primaryKey = new String("Asia");
    Integer secondaryKey = new Integer(7);

    Pair<Node, Node> nodes = initializeTestData(hive, primaryKey, secondaryKey);
    Node origin = nodes.getKey();
    Node destination = nodes.getValue();

    NodeResolver dir = new DbDirectory(hive.getPartitionDimension(), getDataSource(getConnectString(getHiveDatabaseName())));
    PartitionKeyMover<String> pMover = new PrimaryMover(origin.getUri());
//    This mover just craps out on delete
    Mover<Integer> failingMover = new Mover<Integer>() {
      public void copy(Integer item, Node node) {
        SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
        dao.setDataSource(CachingDataSourceProvider.getInstance().getDataSource(node.getUri()));
        dao.getJdbcTemplate().update("insert into secondary_table values (?)", new Object[]{item});
      }

      public void delete(Integer item, Node node) {
        throw new RuntimeException("Ach!");
      }

      public Integer get(Object id, Node node) {
        SimpleJdbcDaoSupport dao = new SimpleJdbcDaoSupport();
        dao.setDataSource(CachingDataSourceProvider.getInstance().getDataSource(node.getUri()));
        return dao.getJdbcTemplate().queryForInt("select id from secondary_table where id = ?", new Object[]{id});
      }
    };

    Migrator m = new HiveMigrator(hive);
    pMover.getDependentMovers().clear();
    pMover.getDependentMovers().add(new Pair<Mover, KeyLocator>(failingMover, new SecondaryKeyLocator(origin.getUri())));
    try {
      m.migrate(primaryKey, Arrays.asList(new String[]{destination.getName()}), pMover);
    } catch (Exception e) {
      //Quash
    }
    //Directory still points destination
    assertNotNull(Filter.grepItemAgainstList(destination.getId(), Transform.map(DirectoryWrapper.semaphoreToId(), dir.getKeySemamphoresOfPrimaryIndexKey(primaryKey))));
    //Records exist ondestination
    assertEquals(primaryKey, pMover.get(primaryKey, destination));
    assertEquals(secondaryKey, new SecondaryMover().get(secondaryKey, destination));
  }
View Full Code Here

TOP

Related Classes of org.hivedb.meta.Node

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.