Package com.cloudera.sqoop.manager

Examples of com.cloudera.sqoop.manager.ConnManager


    SqoopOptions options = new SqoopOptions(
        NetezzaTestUtils.getNZConnectString(), getTableName());
    options.setUsername(NetezzaTestUtils.getNZUser());
    options.setPassword(NetezzaTestUtils.getNZPassword());

    ConnManager mgr = new NetezzaManager(options);
    String[] tables = mgr.listTables();
    Arrays.sort(tables);
    assertTrue(getTableName() + " is not found!",
        Arrays.binarySearch(tables, getTableName()) >= 0);
  }
View Full Code Here


        .getConf());

    LOG.debug(String.format("%s can be called by Sqoop!",
        OraOopConstants.ORAOOP_PRODUCT_NAME));

    ConnManager result = null;

    if (jobData != null) {

      SqoopOptions sqoopOptions = jobData.getSqoopOptions();

      String connectString = sqoopOptions.getConnectString();
      if (connectString != null
          && connectString.toLowerCase().trim().startsWith("jdbc:oracle")) {

        if (!isOraOopEnabled(sqoopOptions)) {
          return result;
        }

        OraOopConnManager oraOopConnManager = null;

        OraOopConstants.Sqoop.Tool jobType = getSqoopJobType(jobData);
        OraOopUtilities.rememberSqoopJobType(jobType, jobData.getSqoopOptions()
            .getConf());

        List<OraOopLogMessage> messagesToDisplayAfterWelcome =
            new ArrayList<OraOopLogMessage>();

        switch (jobType) {

          case IMPORT:
            if (isNumberOfImportMappersOkay(sqoopOptions)
                && !isSqoopImportIncremental(jobData)
                && isSqoopImportJobTableBased(sqoopOptions)) {

              // At this stage, the Sqoop import job appears to be one we're
              // interested in accepting. We now need to connect to
              // the Oracle database to perform more tests...

              oraOopConnManager = new OraOopConnManager(sqoopOptions);

              try {
                Connection connection = oraOopConnManager.getConnection();

                if (isSqoopTableAnOracleTable(connection, sqoopOptions
                    .getUsername(),
                    oraOopConnManager.getOracleTableContext())) {

                  // OraOop will not accept responsibility for an Index
                  // Organized Table (IOT)...
                  if (!isSqoopTableAnIndexOrganizedTable(connection,
                      oraOopConnManager.getOracleTableContext())) {
                    result = oraOopConnManager; // <- OraOop accepts
                                                // responsibility for this Sqoop
                                                // job!
                  }
                }
              } catch (SQLException ex) {
                throw new RuntimeException(String.format(
                    "Unable to connect to the Oracle database at %s\n"
                        + "Error:%s", sqoopOptions.getConnectString(), ex
                        .getMessage()), ex);
              }
            }
            break;

          case EXPORT:
            if (isNumberOfExportMappersOkay(sqoopOptions)) {

              // At this stage, the Sqoop export job appears to be one we're
              // interested in accepting. We now need to connect to
              // the Oracle database to perform more tests...

              oraOopConnManager = new OraOopConnManager(sqoopOptions);

              Connection connection = null;
              try {
                connection = oraOopConnManager.getConnection();
              } catch (SQLException ex) {
                throw new RuntimeException(String.format(
                    "Unable to connect to the Oracle database at %s\n"
                        + "Error:%s", sqoopOptions.getConnectString(), ex
                        .getMessage()), ex);
              }

              try {

                createAnyRequiredOracleObjects(sqoopOptions, connection,
                    oraOopConnManager, messagesToDisplayAfterWelcome);

                if (isSqoopTableAnOracleTable(connection, sqoopOptions
                    .getUsername(),
                    oraOopConnManager.getOracleTableContext())) {

                  result = oraOopConnManager; // <- OraOop accepts
                                              // responsibility for this Sqoop
                                              // job!
                }

              } catch (SQLException ex) {
                LOG.error(OraOopUtilities.getFullExceptionMessage(ex));
              }
            }

            break;
          default:
            // OraOop doesn't know how to handle other types of jobs - so won't
            // accept them.
            break;
        }

        // If OraOop has accepted this Sqoop job...
        if (result != null) {

          showUserTheOraOopWelcomeMessage();

          for (OraOopLogMessage message : messagesToDisplayAfterWelcome) {
            message.log(LOG);
          }

          // By the time we get into getSplits(), the number of mappers
          // stored in the config can be either 4 or 1 - so it seems
          // a bit unreliable. We'll use our own property name to ensure
          // getSplits() gets the correct value...
          sqoopOptions.getConf().setInt(
              OraOopConstants.ORAOOP_DESIRED_NUMBER_OF_MAPPERS,
              sqoopOptions.getNumMappers());

          // Generate the "action" name that we'll assign to our Oracle sessions
          // so that the user knows which Oracle sessions belong to OraOop...
          sqoopOptions.getConf().set(
              OraOopConstants.ORACLE_SESSION_ACTION_NAME,
              getOracleSessionActionName(jobData));

          OraOopUtilities.appendJavaSecurityEgd(sqoopOptions.getConf());

          // Get the Oracle database version...
          try {
            OracleVersion oracleVersion =
                OraOopOracleQueries.getOracleVersion(result.getConnection());
            LOG.info(String.format("Oracle Database version: %s",
                oracleVersion.getBanner()));
            sqoopOptions.getConf().setInt(
                OraOopConstants.ORAOOP_ORACLE_DATABASE_VERSION_MAJOR,
                oracleVersion.getMajor());
            sqoopOptions.getConf().setInt(
                OraOopConstants.ORAOOP_ORACLE_DATABASE_VERSION_MINOR,
                oracleVersion.getMinor());
          } catch (SQLException ex) {
            LOG.error("Unable to obtain the Oracle database version.", ex);
          }

          try {
            if (sqoopOptions.getConf().getBoolean(
                OraOopConstants.ORAOOP_IMPORT_CONSISTENT_READ, false)) {
              long scn =
                  sqoopOptions.getConf().getLong(
                      OraOopConstants.ORAOOP_IMPORT_CONSISTENT_READ_SCN, 0);
              if (scn == 0) {
                scn = OraOopOracleQueries.getCurrentScn(result.getConnection());
              }
              sqoopOptions.getConf().setLong(
                  OraOopConstants.ORAOOP_IMPORT_CONSISTENT_READ_SCN, scn);
              LOG.info("Performing a consistent read using SCN: " + scn);
            }
View Full Code Here

    }

    if (null == rowKeyCol) {
      // No split-by column is explicitly set.
      // If the table has a primary key, use that.
      ConnManager manager = getContext().getConnManager();
      rowKeyCol = manager.getPrimaryKey(tableName);
    }

    if (null == rowKeyCol) {
      // Give up here if this is still unset.
      throw new IOException("Could not determine the row-key column. "
View Full Code Here

    }

    if (null == rowKeyCol) {
      // No split-by column is explicitly set.
      // If the table has a primary key, use that.
      ConnManager manager = getContext().getConnManager();
      rowKeyCol = manager.getPrimaryKey(tableName);
    }

    if (null == rowKeyCol) {
      // Give up here if this is still unset.
      throw new IOException(
View Full Code Here

  @Override
  protected void configureInputFormat(Job job, String tableName,
      String tableClassName, String splitByCol) throws ClassNotFoundException,
      IOException {

    ConnManager mgr = getContext().getConnManager();
    String username = options.getUsername();
    if (null == username || username.length() == 0) {
      DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(),
          options.getConnectString());
    } else {
      DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(),
          options.getConnectString(), username, options.getPassword());
    }

    String[] colNames = options.getColumns();
    if (null == colNames) {
      colNames = mgr.getColumnNames(tableName);
    }

    String[] sqlColNames = null;
    if (null != colNames) {
      sqlColNames = new String[colNames.length];
      for (int i = 0; i < colNames.length; i++) {
        sqlColNames[i] = mgr.escapeColName(colNames[i]);
      }
    }

    // It's ok if the where clause is null in DBInputFormat.setInput.
    String whereClause = options.getWhereClause();

    // We can't set the class properly in here, because we may not have the
    // jar loaded in this JVM. So we start by calling setInput() with
    // DBWritable and then overriding the string manually.

    // Note that mysqldump also does *not* want a quoted table name.
    DataDrivenDBInputFormat.setInput(job, DBWritable.class, tableName,
        whereClause, mgr.escapeColName(splitByCol), sqlColNames);

    LOG.debug("Using InputFormat: " + inputFormatClass);
    job.setInputFormatClass(getInputFormatClass());

    if (isHCatJob) {
View Full Code Here

  }
  @Override
  protected void configureOutputFormat(Job job, String tableName,
                                       String tableClassName)
      throws ClassNotFoundException, IOException {
    ConnManager mgr = context.getConnManager();
    try {
      String username = options.getUsername();
      if (null == username || username.length() == 0) {
        DBConfiguration.configureDB(job.getConfiguration(),
            mgr.getDriverClass(),
            options.getConnectString(),
            options.getConnectionParams());
      } else {
        DBConfiguration.configureDB(job.getConfiguration(),
            mgr.getDriverClass(),
            options.getConnectString(),
            username, options.getPassword(),
            options.getConnectionParams());
      }

      String [] colNames = options.getColumns();
      if (null == colNames) {
        colNames = mgr.getColumnNames(tableName);
      }

      if (mgr.escapeTableNameOnExport()) {
        DBOutputFormat.setOutput(job, mgr.escapeTableName(tableName), colNames);
      } else {
        DBOutputFormat.setOutput(job, tableName, colNames);
      }

      job.setOutputFormatClass(getOutputFormatClass());
View Full Code Here

   * @throws IOException if the export job encounters an IO error
   * @throws ExportException if the job fails unexpectedly or is misconfigured.
   */
  public void runExport() throws ExportException, IOException {

    ConnManager cmgr = context.getConnManager();
    SqoopOptions options = context.getOptions();
    Configuration conf = options.getConf();

    String outputTableName = context.getTableName();
    String stagingTableName = context.getOptions().getStagingTableName();

    String tableName = outputTableName;
    boolean stagingEnabled = false;

    // Check if there are runtime error checks to do
    if (isHCatJob && options.isDirect()
        && !context.getConnManager().isDirectModeHCatSupported()) {
      throw new IOException("Direct import is not compatible with "
        + "HCatalog operations using the connection manager "
        + context.getConnManager().getClass().getName()
        + ". Please remove the parameter --direct");
    }

    if (stagingTableName != null) { // user has specified the staging table
      if (cmgr.supportsStagingForExport()) {
        LOG.info("Data will be staged in the table: " + stagingTableName);
        tableName = stagingTableName;
        stagingEnabled = true;
      } else {
        throw new ExportException("The active connection manager ("
            + cmgr.getClass().getCanonicalName()
            + ") does not support staging of data for export. "
            + "Please retry without specifying the --staging-table option.");
      }
    }


    String tableClassName = null;
    if (!cmgr.isORMFacilitySelfManaged()) {
        tableClassName =
            new TableClassName(options).getClassForTable(outputTableName);
    }
    // For ORM self managed, we leave the tableClassName to null so that
    // we don't check for non-existing classes.
    String ormJarFile = context.getJarFile();

    LOG.info("Beginning export of " + outputTableName);
    loadJars(conf, ormJarFile, tableClassName);

    if (stagingEnabled) {
      // Prepare the staging table
      if (options.doClearStagingTable()) {
        try {
          // Delete all records from staging table
          cmgr.deleteAllRecords(stagingTableName);
        } catch (SQLException ex) {
          throw new ExportException(
              "Failed to empty staging table before export run", ex);
        }
      } else {
        // User has not explicitly specified the clear staging table option.
        // Assert that the staging table is empty.
        try {
          long rowCount = cmgr.getTableRowCount(stagingTableName);
          if (rowCount != 0L) {
            throw new ExportException("The specified staging table ("
                + stagingTableName + ") is not empty. To force deletion of "
                + "its data, please retry with --clear-staging-table option.");
          }
        } catch (SQLException ex) {
          throw new ExportException(
              "Failed to count data rows in staging table: "
                  + stagingTableName, ex);
        }
      }
    }

    Job job = createJob(conf);
    try {
      // Set the external jar to use for the job.
      job.getConfiguration().set("mapred.jar", ormJarFile);
      if (options.getMapreduceJobName() != null) {
        job.setJobName(options.getMapreduceJobName());
      }

      propagateOptionsToJob(job);
      if (isHCatJob) {
        LOG.info("Configuring HCatalog for export job");
        SqoopHCatUtilities hCatUtils = SqoopHCatUtilities.instance();
        hCatUtils.configureHCat(options, job, cmgr, tableName,
          job.getConfiguration());
      }
      configureInputFormat(job, tableName, tableClassName, null);
      configureOutputFormat(job, tableName, tableClassName);
      configureMapper(job, tableName, tableClassName);
      configureNumTasks(job);
      cacheJars(job, context.getConnManager());

      jobSetup(job);
      setJob(job);
      boolean success = runJob(job);
      if (!success) {
        throw new ExportException("Export job failed!");
      }

      if (options.isValidationEnabled()) {
        validateExport(tableName, conf, job);
      }
    } catch (InterruptedException ie) {
      throw new IOException(ie);
    } catch (ClassNotFoundException cnfe) {
      throw new IOException(cnfe);
    } finally {
      unloadJars();
      jobTeardown(job);
    }

    // Unstage the data if needed
    if (stagingEnabled) {
      // Migrate data from staging table to the output table
      try {
        LOG.info("Starting to migrate data from staging table to destination.");
        cmgr.migrateData(stagingTableName, outputTableName);
      } catch (SQLException ex) {
        LoggingUtils.logAll(LOG, "Failed to move data from staging table ("
          + stagingTableName + ") to target table ("
          + outputTableName + ")", ex);
        throw new ExportException(
View Full Code Here

      return new GenericJdbcManager(manualDriver, options);
    }

    // If user specified explicit connection manager, let's use it
    if (managerClassName != null){
      ConnManager connManager = null;

      try {
        Class<ConnManager> cls = (Class<ConnManager>)
          Class.forName(managerClassName);

        // We have two constructor options, one is with or without explicit
        // constructor. In most cases --driver argument won't be allowed as the
        // connectors are forcing to use their building class names.
        if (manualDriver == null) {
          Constructor<ConnManager> constructor =
            cls.getDeclaredConstructor(com.cloudera.sqoop.SqoopOptions.class);
          connManager = constructor.newInstance(options);
        } else {
          Constructor<ConnManager> constructor =
            cls.getDeclaredConstructor(String.class,
                                       com.cloudera.sqoop.SqoopOptions.class);
          connManager = constructor.newInstance(manualDriver, options);
        }
      } catch (ClassNotFoundException e) {
        LOG.error("Sqoop could not found specified connection manager class "
          + managerClassName  + ". Please check that you've specified the "
          + "class correctly.");
        throw new IOException(e);
      } catch (NoSuchMethodException e) {
        LOG.error("Sqoop wasn't able to create connnection manager properly. "
          + "Some of the connectors supports explicit --driver and some "
          + "do not. Please try to either specify --driver or leave it out.");
        throw new IOException(e);
      } catch (Exception e) {
        LOG.error("Problem with bootstrapping connector manager:"
          + managerClassName);
        LOG.error(e);
        throw new IOException(e);
      }
      return connManager;
    }

    // Try all the available manager factories.
    for (ManagerFactory factory : factories) {
      LOG.debug("Trying ManagerFactory: " + factory.getClass().getName());
      ConnManager mgr = factory.accept(data);
      if (null != mgr) {
        LOG.debug("Instantiated ConnManager " + mgr.toString());
        return mgr;
      }
    }

    throw new IOException("No manager for connect string: "
View Full Code Here

      // other types, we just use the defaults.
      job.setOutputKeyClass(Text.class);
      job.setOutputValueClass(NullWritable.class);
    } else if (options.getFileLayout()
        == SqoopOptions.FileLayout.AvroDataFile) {
      ConnManager connManager = getContext().getConnManager();
      AvroSchemaGenerator generator = new AvroSchemaGenerator(options,
          connManager, tableName);
      Schema schema = generator.generate();

      try {
View Full Code Here

    // including Oracle and MySQL.
    String alias = "t1";
    int dot = col.lastIndexOf('.');
    String qualifiedName = (dot == -1) ? col : alias + col.substring(dot);

    ConnManager mgr = getContext().getConnManager();
    String ret = mgr.getInputBoundsQuery(qualifiedName, query);
    if (ret != null) {
      return ret;
    }

    return "SELECT MIN(" + qualifiedName + "), MAX(" + qualifiedName + ") "
View Full Code Here

TOP

Related Classes of com.cloudera.sqoop.manager.ConnManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.