// Copyright 2011 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.enterprise.connector.notes;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.enterprise.connector.notes.client.NotesACL;
import com.google.enterprise.connector.notes.client.NotesACLEntry;
import com.google.enterprise.connector.notes.client.NotesDatabase;
import com.google.enterprise.connector.notes.client.NotesDateTime;
import com.google.enterprise.connector.notes.client.NotesDocument;
import com.google.enterprise.connector.notes.client.NotesName;
import com.google.enterprise.connector.notes.client.NotesSession;
import com.google.enterprise.connector.notes.client.NotesView;
import com.google.enterprise.connector.notes.client.NotesViewEntry;
import com.google.enterprise.connector.notes.client.NotesViewNavigator;
import com.google.enterprise.connector.spi.RepositoryException;
import com.google.enterprise.connector.util.database.DatabaseConnectionPool;
import com.google.enterprise.connector.util.database.JdbcDatabase;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Creates a cache of user, group, and role data.
*
* Tables (note that the actual table names are generated and are
* not exactly the names used here):
*
* users: userid (primary key),
* notesname (Notes username, in canonical format),
* gsaname (username used for GSA authentication;
* generated by the connector)
*
* groups: groupid (primary key)
* groupname (Notes group name)
* pseudogroup (boolean used to flag the groups constructed
* from the Notes usernames)
*
* roles: roleid (primary key)
* rolename (name of the role from database ACL)
*
* usergroups: userid (key from users table)
* groupid (key from groups table)
*
* userroles: userid (key from users table)
* roleid (key from roles table)
*
* grouproles: groupid (key from groups table)
* roleid (key from roles table)
*
* groupchildren: parentgroupid (key from groups table)
* childgroupid (key from groups table)
*
* The users table is used to map Notes usernames to the GSA
* usernames. Not every Notes user in the configured directory
* will be included here; a Notes selection formula is used to
* determine which users are included. The GSA usernames are
* generated in the connector. Notes usernames are taken from the
* FULLNAME field in the ($PeopleGroupsFlat) view of the Domino
* directory; the Notes Name class's getCanonical method is used
* to generate the cached value.
*
* The groups table is used to maintain a list of group names. In
* addition to groups from the Notes directory, this table caches
* pseudo-groups generated from the Notes usernames. For example,
* a user with a name of "cn=Z/ou=X/o=Y" will trigger the
* creation of pseudo-groups "ou=X/o=Y" and "o=Y".
*
* The roles table is used to cache a list of roles configured in
* the databases being crawled. Each database's ACL will be
* examined for existing roles. Role names will be prefixed with
* the database replica id to allow the connector to distinguish
* the same role name in different databases. Example: 12345/[role].
*
* The usergroups table represents user group memberships. For
* each entry, the given user is a member of the given group.
*
* The userroles table represents user roles. For each entry, the
* given user has been assigned the indicated role in a database ACL.
*
* The grouproles table represents group roles. For each entry, the
* given group has been assigned the indicated role in a database ACL.
*
* The groupchildren table represents nested group
* membership. For each entry, the group indicated by
* childgroupid is a member of the group indicated by
* parentgroupid.
*/
/*
* TODO: move SQL into properties file
* TODO: decide on where to handle locking; can we depend on the
* database, or do we need synchronization here?
*/
class NotesUserGroupManager {
private static final String CLASS_NAME =
NotesUserGroupManager.class.getName();
private static final Logger LOGGER = Logger.getLogger(CLASS_NAME);
private final NotesConnectorSession connectorSession;
private NotesSession notesSession;
private NotesDatabase connectorDatabase;
private NotesDatabase directoryDatabase;
private NotesView peopleGroupsView;
private DatabaseConnectionPool connectionPool;
private boolean originalAutoCommit;
private boolean cacheInitialized = false;
private int originalTransactionIsolation;
private final NotesDomainNames notesDomainNames;
private Connection conn;
@VisibleForTesting final String userTableName;
@VisibleForTesting final String groupTableName;
@VisibleForTesting final String roleTableName;
@VisibleForTesting final String userGroupsTableName;
@VisibleForTesting final String userRolesTableName;
@VisibleForTesting final String groupRolesTableName;
@VisibleForTesting final String groupChildrenTableName;
NotesUserGroupManager(NotesConnectorSession connectorSession)
throws RepositoryException {
this.connectorSession = connectorSession;
this.notesDomainNames = new NotesDomainNames();
JdbcDatabase jdbcDatabase =
connectorSession.getConnector().getJdbcDatabase();
String connectorName =
connectorSession.getConnector().getGoogleConnectorName();
userTableName = jdbcDatabase.makeTableName("users_", connectorName);
groupTableName = jdbcDatabase.makeTableName("groups_", connectorName);
roleTableName = jdbcDatabase.makeTableName("roles_", connectorName);
userGroupsTableName =
jdbcDatabase.makeTableName("usergroups_", connectorName);
userRolesTableName =
jdbcDatabase.makeTableName("userroles_", connectorName);
groupRolesTableName =
jdbcDatabase.makeTableName("grouproles_", connectorName);
groupChildrenTableName =
jdbcDatabase.makeTableName("groupchildren_", connectorName);
LOGGER.logp(Level.FINEST, CLASS_NAME, "<init>",
"Tables:"
+ "\nuser: " + userTableName
+ "\ngroup: " + groupTableName
+ "\nrole: " + roleTableName
+ "\nuserGroups: " + userGroupsTableName
+ "\nuserRoles: " + userRolesTableName
+ "\ngroupRoles: " + groupRolesTableName
+ "\ngroupChildren: " + groupChildrenTableName);
initializeUserCache();
}
public Collection<String> mapNotesNamesToGsaNames(NotesSession notesSession,
Collection<?> notesUsers, boolean removeUsers) {
final String METHOD = "mapNotesNamesToGsaNames";
LOGGER.entering(CLASS_NAME, METHOD);
if (notesUsers == null || notesUsers.size() == 0) {
return Collections.emptyList();
}
DatabaseConnectionPool connectionPool = null;
Connection lookupConn = null;
PreparedStatement exactMatchStmt = null;
PreparedStatement commonNameStmt = null;
boolean isReadOnly = false;
try {
connectionPool = connectorSession.getConnector().getJdbcDatabase()
.getConnectionPool();
lookupConn = connectionPool.getConnection();
isReadOnly = lookupConn.isReadOnly();
lookupConn.setReadOnly(true);
LinkedHashSet<String> gsaNames =
new LinkedHashSet<String>(notesUsers.size());
List<Object> verifiedUsers = new ArrayList<Object>();
Map<Object, User> users = getSimpleUsers(notesSession, lookupConn,
notesUsers);
for (Map.Entry<Object, User> userData : users.entrySet()) {
gsaNames.add(userData.getValue().getGsaName());
verifiedUsers.add(userData.getKey());
}
if (removeUsers) {
notesUsers.removeAll(verifiedUsers);
}
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Mapped Notes names: " + notesUsers + " to GSA names: "
+ gsaNames);
}
return gsaNames;
} catch (Exception e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failed to map users", e);
return null;
} finally {
try {
Util.close(exactMatchStmt);
Util.close(commonNameStmt);
lookupConn.setReadOnly(isReadOnly);
connectionPool.releaseConnection(lookupConn);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure releasing connection", e);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private String getGsaName(Connection conn, PreparedStatement pstmt,
String name) {
final String METHOD = "getGsaName";
ResultSet rs = null;
try {
pstmt.setString(1, name.toLowerCase());
rs = pstmt.executeQuery();
if (rs.next()) {
return rs.getString("gsaname");
}
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Lookup error for name: " + name, e);
} finally {
Util.close(rs);
}
return null;
}
// TODO: this method isn't currently used in the connector. It
// should use the lookup method in getSimpleUsers to find the
// user record.
public User getUserByNotesName(String notesName) {
final String METHOD = "getUserByNotesName";
return getUser("notesname", notesName);
}
public User getUserByGsaName(String gsaName) {
final String METHOD = "getUserByGsaName";
return getUser("gsaname", gsaName);
}
private User getUser(String field, String value) {
final String METHOD = "getUser";
LOGGER.entering(CLASS_NAME, METHOD);
if (Strings.isNullOrEmpty(value)) {
return null;
}
DatabaseConnectionPool connectionPool = null;
Connection lookupConn = null;
boolean isReadOnly = false;
try {
connectionPool = connectorSession.getConnector().getJdbcDatabase()
.getConnectionPool();
lookupConn = connectionPool.getConnection();
isReadOnly = lookupConn.isReadOnly();
lookupConn.setReadOnly(true);
// Find the user.
PreparedStatement pstmt = lookupConn.prepareStatement("select * from "
+ userTableName + " where " + field + " = ?",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
ResultSet rs = null;
long userId = -1L;
String notesName;
String gsaName;
try {
pstmt.setString(1, value.toLowerCase());
rs = pstmt.executeQuery();
if (!rs.next()) {
return null;
}
userId = rs.getLong("userid");
notesName = rs.getString("notesname");
gsaName = rs.getString("gsaname");
} finally {
Util.close(rs);
Util.close(pstmt);
}
User user = new User(userId, notesName, gsaName);
// User is authenticated and should be a member of "-default-" group.
user.addGroup("-default-");
// Find user groups and nested groups
pstmt = lookupConn.prepareStatement(
Util.buildString(
"select groupname from ", groupTableName,
" where groupid in ",
"(select groupid from ",userGroupsTableName,
" where userid = ?)"),
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
try {
pstmt.setLong(1, userId);
rs = pstmt.executeQuery();
while (rs.next()) {
user.addGroup(rs.getString(1));
}
} finally {
Util.close(rs);
Util.close(pstmt);
}
// Find user's parent and great grand-parent groups
// This query queries for user's groups. From user's groups, it looks up
// all parent ids. From parent ids, it looks up for group names.
pstmt = lookupConn.prepareStatement(
Util.buildString(
"select groupname from ", groupTableName,
" where groupid in ",
"(select parentgroupid from ", groupChildrenTableName,
" where childgroupid in ",
"(select groupid from ", userGroupsTableName,
" where userid = ?)", ")"),
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
try {
pstmt.setLong(1, userId);
rs = pstmt.executeQuery();
while (rs.next()) {
user.addGroup(rs.getString(1));
}
} finally {
Util.close(rs);
Util.close(pstmt);
}
// Find their roles.
pstmt = lookupConn.prepareStatement("select replicaid, rolename from "
+ roleTableName + " where roleid in (select roleid from "
+ userRolesTableName + " where userid = ?)",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
try {
pstmt.setLong(1, userId);
rs = pstmt.executeQuery();
while (rs.next()) {
user.addRole(rs.getString(1), rs.getString(2));
}
} finally {
Util.close(rs);
Util.close(pstmt);
}
// Find any roles they acquire because of direct group membership and
// via parent groups.
pstmt = lookupConn.prepareStatement("select replicaid, rolename from "
+ roleTableName + " where roleid in (select roleid from "
+ groupRolesTableName + " where groupid in "
+ "(select groupid from " + userGroupsTableName
+ " where userid = ? union select parentgroupid from "
+ groupChildrenTableName
+ " where childgroupid in (select groupid from "
+ userGroupsTableName + " where userid = ?)))",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
try {
pstmt.setLong(1, userId);
pstmt.setLong(2, userId);
rs = pstmt.executeQuery();
while (rs.next()) {
user.addRole(rs.getString(1), rs.getString(2));
}
} finally {
Util.close(rs);
Util.close(pstmt);
}
return user;
} catch (Exception e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failed to find user record for: " + field
+ " = " + value, e);
return null;
} finally {
try {
lookupConn.setReadOnly(isReadOnly);
connectionPool.releaseConnection(lookupConn);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure releasing connection", e);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
/**
* Attempts to map the strings in the userNames parameter to
* cached user records. First attempts an exact match, then
* uses the Name class to convert the string to a canonical
* name, then tries to treat the string as the common name
* portion of a username.
*/
private Map<Object, User> getSimpleUsers(NotesSession notesSession,
Connection connection, Collection<?> userNames) {
final String METHOD = "getSimpleUsers";
PreparedStatement exactMatchStmt = null;
PreparedStatement commonNameStmt = null;
Map<Object, User> users = new HashMap<Object, User>();
try {
exactMatchStmt = connection.prepareStatement("select * from "
+ userTableName + " where notesname = ?",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
commonNameStmt = connection.prepareStatement("select * from "
+ userTableName + " where notesname like ?",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
for (Object userObj : userNames) {
if (userObj == null) {
continue;
}
String userName = userObj.toString();
try {
// Try an exact match.
String lookupString = userName;
User user = getSimpleUser(connection, exactMatchStmt, lookupString);
if (user != null) {
users.put(userObj, user);
continue;
}
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"User not found using: " + lookupString);
// Try converting to canonical format.
NotesName notesName = notesSession.createName(userName);
if (notesName != null) {
lookupString = notesName.getCanonical();
user = getSimpleUser(connection, exactMatchStmt, lookupString);
if (user != null) {
users.put(userObj, user);
continue;
}
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"User not found using: " + lookupString);
}
// If an exact match failed, see if we have a common name.
if (!userName.toLowerCase().startsWith("cn=")) {
lookupString = "cn=" + userName + "/%";
user = getSimpleUser(connection, commonNameStmt, lookupString);
if (user != null) {
users.put(userObj, user);
continue;
}
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"User not found using: " + lookupString);
}
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"No record found for user: " + userName);
} catch (Exception e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failed to lookup user: " + userName, e);
}
}
} catch (Exception e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failed to complete user lookup: " + userNames, e);
}
return users;
}
private User getSimpleUser(Connection conn, PreparedStatement pstmt,
String name) {
final String METHOD = "getSimpleUser";
ResultSet rs = null;
try {
pstmt.setString(1, name.toLowerCase());
rs = pstmt.executeQuery();
if (rs.next()) {
User user = new User(rs.getLong("userid"), rs.getString("notesname"),
rs.getString("gsaname"));
if (rs.next()) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Found more than one match for " + name);
}
return user;
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Lookup error for name: " + name, e);
} finally {
Util.close(rs);
}
return null;
}
@VisibleForTesting
List<String> getViewUnids(NotesDatabase db, String viewName) {
long timeStart = System.currentTimeMillis();
List<String> unidList = new LinkedList<String>();
NotesView view = null;
try {
if (!db.isOpen()) {
LOGGER.warning("Cannot open database: " + getDatabaseFilePath(db));
return unidList;
}
LOGGER.log(Level.FINEST,
"Initialize UNID cache for {0} view in database {1}",
new Object[] {viewName, db.getFilePath()});
view = db.getView(viewName);
view.refresh();
NotesDocument doc = view.getFirstDocument();
while (doc != null) {
unidList.add(doc.getUniversalID());
NotesDocument docNext = view.getNextDocument(doc);
doc.recycle();
doc = docNext;
}
} catch (RepositoryException e) {
LOGGER.log(Level.WARNING, "Unable to build the UNID cache for "
+ viewName + " view", e);
} finally {
Util.recycle(view);
}
LOGGER.log(Level.FINEST,
"UNID cache is initialized for {0} view [{1}]: {2}ms",
new Object[] {viewName, unidList.size(),
(System.currentTimeMillis() - timeStart)});
return unidList;
}
public void updateUsersGroups() {
updateUsersGroups(!isCacheInitialized());
}
/**
* Updates the cached lists of people and groups. When force is
* true, the configure cache update interval is ignored and the
* user and group cache is updated.
*
* @param force if true, force an update
*/
public synchronized void updateUsersGroups(boolean force) {
final String METHOD = "updateUsersGroups";
LOGGER.entering(CLASS_NAME, METHOD);
try {
LOGGER.fine("Forcing cache update: " + force);
if (!setUpResources(force)) {
return;
}
// Pass 0 - Reset domain cache
List<String> userUnids =
getViewUnids(directoryDatabase, NCCONST.DIRVIEW_VIMUSERS);
updateNotesDomainNames(userUnids);
// Pass 1 - Update groups
List<String> groupUnids =
getViewUnids(directoryDatabase, NCCONST.DIRVIEW_VIMGROUPS);
updateGroups(groupUnids);
// Pass 2 - Update people
updateUsers(userUnids);
// Pass 3 - Update roles
// Role update is moved from the maintenance thread to the traversal
// thread so that the update only occurs when the database ACL is updated.
// Pass 4 - Delete any users that no longer exist
checkUserDeletions();
// Pass 5 - Delete any groups that no longer exist
checkGroupDeletions();
setLastCacheUpdate();
setCacheInitialized();
} catch (Exception e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failure updating user/group cache", e);
} finally {
releaseResources();
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
@VisibleForTesting
synchronized boolean setUpResources(boolean force)
throws RepositoryException {
notesSession = connectorSession.createNotesSession();
connectorDatabase = notesSession.getDatabase(
connectorSession.getServer(), connectorSession.getDatabase());
// Check our update interval.
if (!force && !shouldUpdate(connectorDatabase)) {
return false;
}
directoryDatabase = notesSession.getDatabase(
connectorSession.getServer(), connectorSession.getDirectory());
peopleGroupsView = directoryDatabase.getView(
NCCONST.DIRVIEW_PEOPLEGROUPFLAT);
peopleGroupsView.refresh();
try {
connectionPool = connectorSession.getConnector().getJdbcDatabase()
.getConnectionPool();
conn = connectionPool.getConnection();
originalTransactionIsolation = conn.getTransactionIsolation();
originalAutoCommit = conn.getAutoCommit();
conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);
} catch (SQLException e) {
throw new RepositoryException(
"Failure obtaining database connection for user cache", e);
}
return true;
}
synchronized void releaseResources() {
final String METHOD = "releaseResources";
Util.recycle(peopleGroupsView);
peopleGroupsView = null;
Util.recycle(directoryDatabase);
directoryDatabase = null;
Util.recycle(connectorDatabase);
connectorDatabase = null;
connectorSession.closeNotesSession(notesSession);
notesSession = null;
if (conn != null) {
try {
conn.setAutoCommit(originalAutoCommit);
conn.setTransactionIsolation(originalTransactionIsolation);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error releasing database connection", e);
} finally {
connectionPool.releaseConnection(conn);
conn = null;
}
}
connectionPool = null;
}
@VisibleForTesting
Connection getConnection() {
return conn;
}
@VisibleForTesting
NotesSession getNotesSession() {
return notesSession;
}
// Update groups
/**
* Loop over the groups in NCCONST.DIRVIEW_PEOPLEGROUPFLAT. For
* each group, create a group record and compute nested group
* membership (all groups which are contained within this group
* via 0 or more intermediate groups).
*/
@VisibleForTesting
void updateGroups(List<String> groupUnids) {
final String METHOD = "updateGroups";
LOGGER.entering(CLASS_NAME, METHOD);
long timeStart = System.currentTimeMillis();
int count = 0;
for (String unid : groupUnids) {
if (count++ % NCCONST.GC_INVOCATION_INTERVAL == 0) {
Util.invokeGC();
}
NotesDocument groupDoc = getDocumentByUnid(directoryDatabase, unid);
if (groupDoc == null) {
LOGGER.log(Level.FINEST, "Group document [{0}] is not found", unid);
} else {
String groupName = null;
try {
groupName = groupDoc.getItemValueString(NCCONST.GITM_LISTNAME);
if (Strings.isNullOrEmpty(groupName)) {
continue;
}
// Only process groups
if (!isAccessControlGroup(groupDoc)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Not a group/access control group: '" + groupName + "'");
continue;
}
updateGroup(groupDoc, groupName);
} catch (RepositoryException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to update group cache" +
(groupName != null ? " for " + groupName : ""), e);
} finally {
Util.recycle(groupDoc);
}
}
}
long timeFinish = System.currentTimeMillis();
LOGGER.log(Level.FINE, "Update groups: " + (timeFinish - timeStart) + "ms");
}
private void updateGroup(NotesDocument groupDoc, String groupName) {
final String METHOD = "updateGroup";
LOGGER.entering(CLASS_NAME, METHOD);
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Processing group " + groupName);
}
// Update the group record and the parent/descendent group records.
LinkedHashSet<String> processedGroups = new LinkedHashSet<String>();
LinkedHashSet<String> nestedGroups = new LinkedHashSet<String>();
try {
// Find the nested groups.
getNestedGroups(groupDoc, groupName, processedGroups, nestedGroups);
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Nested groups for " + groupName + " are: " + nestedGroups);
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to find nested group information for group: " + groupName
+ "; not updating", e);
return;
}
try {
conn.setAutoCommit(false);
long groupid = verifyGroupExists(groupName, true);
if (groupid != -1L) {
// Delete previous parent/child records for this group.
PreparedStatement pstmt = conn.prepareStatement(
"delete from " + groupChildrenTableName
+ " where parentgroupid = ?");
pstmt.setLong(1, groupid);
pstmt.executeUpdate();
// Add new parent/child records for this group.
if (nestedGroups.size() > 0) {
pstmt = conn.prepareStatement("insert into "
+ groupChildrenTableName + " (parentgroupid, childgroupid)"
+ " values (?, ?)");
pstmt.setLong(1, groupid);
for (String childGroupName : nestedGroups) {
long childGroupId = verifyGroupExists(childGroupName, true);
if (childGroupId == -1L) {
throw new RepositoryException("Missing group record for "
+ "child group: " + childGroupName);
}
pstmt.setLong(2, childGroupId);
pstmt.executeUpdate();
}
}
conn.commit();
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to update group information for group: " + groupName
+ "; not updating", e);
try {
conn.rollback();
} catch (SQLException e1) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Rollback failed", e1);
}
} finally {
try {
conn.setAutoCommit(true);
} catch (SQLException e1) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"AutoCommit reset failed", e1);
}
}
}
@SuppressWarnings("unchecked")
private void getNestedGroups(NotesDocument groupDoc, String groupName,
Set<String> processedGroups, Set<String> nestedGroups)
throws RepositoryException {
final String METHOD = "getNestedGroups";
LOGGER.entering(CLASS_NAME, METHOD);
// Check for already processed groups to avoid cycles.
if (!processedGroups.add(groupName.toLowerCase())) {
return;
}
Vector<String> groupMembers = groupDoc.getItemValue(NCCONST.GITM_MEMBERS);
for (String member : groupMembers) {
// Check for wildcard configuration in group membership
if (member.startsWith("*/")) {
nestedGroups.add(member.toLowerCase());
LinkedHashMap<String, Long> subdomains =
notesDomainNames.getSubDomainNames(member.toLowerCase().substring(1));
nestedGroups.addAll(subdomains.keySet());
continue;
}
NotesDocument memberDoc = peopleGroupsView.getDocumentByKey(member);
if (memberDoc == null) {
continue;
}
try {
if (!isAccessControlGroup(memberDoc)) {
continue;
}
nestedGroups.add(member.toLowerCase());
getNestedGroups(memberDoc, member, processedGroups, nestedGroups);
} finally {
Util.recycle(memberDoc);
}
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
/**
* Store all wildcard domains in H2 and build a domain cache.
*/
private void updateNotesDomainNames(List<String> userUnids) {
final String METHOD = "updateNotesDomainNames";
LOGGER.entering(CLASS_NAME, METHOD);
long timeStart = System.currentTimeMillis();
int count = 0;
for (String unid : userUnids) {
if (count++ % NCCONST.GC_INVOCATION_INTERVAL == 0) {
Util.invokeGC();
}
NotesDocument doc = getDocumentByUnid(directoryDatabase, unid);
if (doc == null) {
LOGGER.log(Level.FINEST,
"Document [{0}] is not found in {1} database",
new Object[] {unid, getDatabaseFilePath(directoryDatabase)});
} else {
try {
Vector fullNames = doc.getItemValue(NCCONST.PITM_FULLNAME);
if (fullNames.size() == 0) {
continue;
}
// Create domains/OUs as groups in H2 if not existed and
// update domain cache
List<String> canonicalOUs = notesDomainNames
.computeExpandedWildcardDomainNames((String) fullNames.get(0));
verifyMultiDomainsExist(canonicalOUs, true);
} catch (RepositoryException re) {
LOGGER.log(Level.WARNING,
"Failed to update Notes domain names for person document ["
+ unid + "]", re);
} finally {
Util.recycle(doc);
}
}
}
long timeFinish = System.currentTimeMillis();
LOGGER.log(Level.FINEST, "Update Notes domain cache [{0}ms]: {1}",
new Object[] {(timeFinish - timeStart), notesDomainNames.toString()});
LOGGER.exiting(CLASS_NAME, METHOD);
}
/*
* Helper method to lookup document by UNID.
*/
private NotesDocument getDocumentByUnid(NotesDatabase db, String unid) {
try {
return db.getDocumentByUNID(unid);
} catch (RepositoryException e) {
LOGGER.warning("Cannot find document [" + unid + "] in "
+ getDatabaseFilePath(db) + " database");
return null;
}
}
/*
* Helper method to lookup database's file path.
*/
private String getDatabaseFilePath(NotesDatabase db) {
try {
return db.getFilePath();
} catch (RepositoryException ex) {
LOGGER.log(Level.WARNING, "Unable to retrieve database's file path", ex);
return null;
}
}
public Set<String> getSubDomains(String domainName) {
return notesDomainNames.getSubDomainNames(domainName).keySet();
}
private long verifyDomainExists(String ou, boolean createIfNotExists) {
final String METHOD = "verifyDomainExists";
LOGGER.entering(CLASS_NAME, METHOD);
Long groupId = this.notesDomainNames.get(ou);
if (groupId instanceof Long) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD, "Found " + ou +
" domain in cache [ID# " + groupId + "]");
return groupId.longValue();
}
long id = verifyGroupExists(ou, createIfNotExists);
if (id != -1L) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD, "Verify " + ou +
" domain in database [ID# " + id + "]");
this.notesDomainNames.add(ou, new Long(id));
}
LOGGER.exiting(CLASS_NAME, METHOD);
return id;
}
private void verifyMultiDomainsExist(List<String> personOUs,
boolean createIfNotExists) throws RepositoryException {
final String METHOD = "verifyDomainsExists";
LOGGER.entering(CLASS_NAME, METHOD);
boolean origAutoCommit = true;
// Verify database connection which is used within verifyGroupExists
try {
if (conn == null) {
connectionPool = connectorSession.getConnector().getJdbcDatabase()
.getConnectionPool();
conn = connectionPool.getConnection();
}
origAutoCommit = conn.getAutoCommit();
conn.setAutoCommit(true);
} catch (SQLException e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failed to connect to H2 database");
throw new RepositoryException(e);
}
// Loop thru each person's OU and add to notesDomainNames cache
for (String ou : personOUs) {
verifyDomainExists(ou, createIfNotExists);
}
// Release connection
try {
conn.setAutoCommit(origAutoCommit);
} catch (SQLException e) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Failed to reset original auto commit");
} finally {
connectionPool.releaseConnection(conn);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
private long verifyGroupExists(String groupName,
boolean createIfNotExists) {
final String METHOD = "verifyGroupExists";
LOGGER.entering(CLASS_NAME, METHOD);
PreparedStatement pstmt = null;
ResultSet generatedKeys = null;
try {
pstmt = conn.prepareStatement(
"select * from " + groupTableName + " where groupname = ?",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
pstmt.setString(1, groupName.toLowerCase());
ResultSet rs = pstmt.executeQuery();
if (rs.next()) {
return rs.getLong("groupid");
}
if (!createIfNotExists) {
return -1L;
}
rs.close();
pstmt.close();
pstmt = conn.prepareStatement("insert into " + groupTableName
+ "(groupname, pseudogroup) values (?, false)",
Statement.RETURN_GENERATED_KEYS);
pstmt.setString(1, groupName.toLowerCase());
int rows = pstmt.executeUpdate();
if (rows == 0) {
throw new RepositoryException(
"Failed to create group record for " + groupName);
}
generatedKeys = pstmt.getGeneratedKeys();
if (generatedKeys.next()) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
Util.buildString("New ", groupName, " group is added to cache"));
return generatedKeys.getLong(1);
} else {
throw new RepositoryException(
"Failed to retrieve key for " + groupName);
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed group lookup/creation: " + groupName, e);
return -1L;
} finally {
Util.close(generatedKeys);
Util.close(pstmt);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
// Update users
/**
* Loop over the users in NCCONST.DIRVIEW_PEOPLEGROUPFLAT. For
* each user, retrieve their Notes user name, construct their
* GSA user name, and verify that they should be included using
* the configured selection formula. Construct a user
* record. Compute group membership and create user/group
* records.
*/
@VisibleForTesting
void updateUsers(List<String> userUnids) {
final String METHOD = "updateUsers";
LOGGER.entering(CLASS_NAME, METHOD);
long timeStart = System.currentTimeMillis();
NotesView serverAccessView = null;
try {
String userSelectionFormula = connectorSession.getUserSelectionFormula();
String userNameFormula = connectorSession.getUserNameFormula();
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"User selection formula is: " + userSelectionFormula
+ "\nUser name formula is: " + userNameFormula);
}
serverAccessView = directoryDatabase.getView(
NCCONST.DIRVIEW_SERVERACCESS);
serverAccessView.refresh();
int count = 0;
for (String unid : userUnids) {
NotesDocument personDoc = getDocumentByUnid(directoryDatabase, unid);
if (personDoc == null) {
LOGGER.log(Level.FINEST, "Person document [{0}] is not found", unid);
} else {
String notesName = null;
try {
if (count++ % NCCONST.GC_INVOCATION_INTERVAL == 0) {
Util.invokeGC();
}
if (!personDoc.getItemValueString(NCCONST.ITMFORM).contentEquals(
NCCONST.DIRFORM_PERSON)) {
continue;
}
// The first value in this field is the Notes name; other
// names may be present.
Vector nameVector = personDoc.getItemValue(NCCONST.PITM_FULLNAME);
if (nameVector.size() == 0) {
continue;
}
String storedName = nameVector.firstElement().toString();
notesName = notesSession.createName(storedName)
.getCanonical().toLowerCase();
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Processing user: " + notesName + "; name from directory was: "
+ storedName);
}
// Get their PVI
String pvi = evaluatePvi(userNameFormula, personDoc);
if (0 == pvi.length()) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Could not evaluate PVI username for: " + notesName);
continue;
}
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD, "PVI: " + pvi);
}
// Does this person match the selection formula?
boolean selected = checkPersonSelectionFormula(userSelectionFormula,
personDoc);
if (!selected) {
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"User not selected: " + notesName);
}
continue;
}
updateUser(personDoc, notesName, pvi, serverAccessView);
// Log user info
if (LOGGER.isLoggable(Level.FINE)) {
String delim = "";
User user = getUser("gsaname", pvi);
Collection<String> userGroups = user.getGroups();
StringBuilder buf = new StringBuilder();
buf.append("All groups for ").append(user).append(": ");
for (String grpName : userGroups) {
buf.append(delim).append(grpName);
delim = ", ";
}
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD, buf.toString());
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to update user cache" +
(notesName != null ? " for " + notesName : ""), e);
} finally {
Util.recycle(personDoc);
}
}
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error processing users", e);
} finally {
Util.recycle(serverAccessView);
LOGGER.exiting(CLASS_NAME, METHOD);
}
long timeFinish = System.currentTimeMillis();
LOGGER.log(Level.FINE, "Update users: " + (timeFinish - timeStart) + "ms");
}
private void updateUser(NotesDocument personDoc,
String notesName, String pvi, NotesView serverAccessView) {
final String METHOD = "updateUser";
LOGGER.entering(CLASS_NAME, METHOD);
PreparedStatement pstmt = null;
try {
conn.setAutoCommit(false);
// Create the user record (if needed).
long userId = verifyUserExists(notesName, pvi, true);
Set<Long> parentGroups = new LinkedHashSet<Long>();
getParentGroupsForUser(notesName, parentGroups, serverAccessView);
getParentGroupsForUser(NotesAuthorizationManager.getCommonName(notesName),
parentGroups, serverAccessView);
// getGroupsFromDN will add the DN-based groups to the
// group cache if they don't exist, so we want it in the
// transaction.
getGroupsFromDN(notesName, parentGroups, serverAccessView);
// Delete previous user/group records for this user.
pstmt = conn.prepareStatement(
"delete from " + userGroupsTableName
+ " where userid = ?");
pstmt.setLong(1, userId);
pstmt.executeUpdate();
// Update user/group records.
pstmt = conn.prepareStatement("insert into " + userGroupsTableName
+ " (userid, groupid) values(?, ?)");
pstmt.setLong(1, userId);
for (Long groupId : parentGroups) {
pstmt.setLong(2, groupId);
pstmt.executeUpdate();
}
conn.commit();
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error updating user data for: " + notesName, e);
try {
conn.rollback();
} catch (SQLException e1) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Rollback failed", e1);
}
Util.close(pstmt);
} finally {
try {
conn.setAutoCommit(true);
} catch (SQLException e1) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Reset AutoCommit failed", e1);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
@SuppressWarnings("unchecked")
private void getParentGroupsForUser(String userName,
Set<Long> parentGroups, NotesView serverAccessView)
throws RepositoryException {
final String METHOD = "getParentGroupsForUser";
LOGGER.entering(CLASS_NAME, METHOD);
if (Strings.isNullOrEmpty(userName)) {
return;
}
NotesViewNavigator nvnAccess = null;
NotesViewEntry nveAccessEntry = null;
try {
nvnAccess = serverAccessView.createViewNavFromCategory(userName);
for (nveAccessEntry = nvnAccess.getFirst();
nveAccessEntry != null;
nveAccessEntry = getNextViewEntry(nvnAccess, nveAccessEntry)) {
NotesDocument accessdoc = null;
try {
accessdoc = nveAccessEntry.getDocument();
String groupName =
accessdoc.getItemValueString(NCCONST.GITM_LISTNAME);
PreparedStatement pstmt = null;
try {
pstmt = conn.prepareStatement("select groupId from "
+ groupTableName + " where groupName = ?");
pstmt.setString(1, groupName.toLowerCase());
ResultSet rs = pstmt.executeQuery();
if (rs.next()) {
long id = rs.getLong(1);
parentGroups.add(id);
getParentGroupsForGroup(id, parentGroups);
} else {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"No group record for group: " + groupName);
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure looking up group record for " + groupName, e);
} finally {
Util.close(pstmt);
}
} finally {
Util.recycle(accessdoc);
}
}
} finally {
Util.recycle(nveAccessEntry);
Util.recycle(nvnAccess);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private void getParentGroupsForGroup(long groupId,
Set<Long> parentGroups) throws RepositoryException {
final String METHOD = "getParentGroupsForGroup";
LOGGER.entering(CLASS_NAME, METHOD);
PreparedStatement pstmt = null;
try {
pstmt = conn.prepareStatement("select parentgroupid from "
+ groupChildrenTableName + " where childgroupid = ?");
pstmt.setLong(1, groupId);
ResultSet rs = pstmt.executeQuery();
while (rs.next()) {
long id = rs.getLong(1);
if (parentGroups.add(id)) {
getParentGroupsForGroup(id, parentGroups);
}
}
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure getting parent groups for " + groupId, e);
throw new RepositoryException(e);
} finally {
Util.close(pstmt);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private void getGroupsFromDN(String dn, Set<Long> groups,
NotesView serverAccessView) {
final String METHOD = "getGroupsFromDN";
LOGGER.entering(CLASS_NAME, METHOD);
// TODO: use the Name class to parse the name?
for (int index = dn.indexOf('/'); index != -1; index = dn.indexOf('/')) {
try {
String ou = dn.substring(index + 1);
LOGGER.logp(Level.FINER, CLASS_NAME, METHOD,
"Group list adding OU " + ou);
long groupId = verifyDomainExists(ou, true);
if (groupId != -1L) {
markAsPseudoGroup(groupId, ou);
groups.add(groupId);
}
// Prepend wildcard to each OU
mapWildcardGroup("*/" + ou, groups, serverAccessView);
dn = ou;
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error creating group from dn: " + dn, e);
}
}
mapWildcardGroup("*", groups, serverAccessView);
LOGGER.exiting(CLASS_NAME, METHOD);
}
private void mapWildcardGroup(String wildcardName, Set<Long> groups,
NotesView serverAccessView) {
long wildcardGroupId = verifyDomainExists(wildcardName, true);
if (wildcardGroupId != -1L) {
markAsPseudoGroup(wildcardGroupId, wildcardName);
groups.add(wildcardGroupId);
getGroupsWithWildcardMembers(wildcardName.toLowerCase(), groups,
serverAccessView);
}
}
private void getGroupsWithWildcardMembers(String wildcardName,
Set<Long> groups, NotesView serverAccessView) {
final String METHOD = "getGroupsWithWildcardMembers";
LOGGER.entering(CLASS_NAME, METHOD);
if (Strings.isNullOrEmpty(wildcardName)) {
return;
}
NotesViewNavigator viewNav = null;
NotesViewEntry entry = null;
try {
viewNav = serverAccessView.createViewNavFromCategory(wildcardName);
NotesViewEntry nextEntry;
entry = viewNav.getFirst();
while (entry != null) {
String groupName =
entry.getDocument().getItemValueString(NCCONST.GITM_LISTNAME);
// Verify group in H2 and add group
long groupId = verifyGroupExists(groupName, true);
if (groupId != -1L) {
groups.add(new Long(groupId));
}
nextEntry = viewNav.getNext(entry);
entry.recycle();
entry = nextEntry;
}
} catch (RepositoryException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
Util.buildString("Failed to lookup groups for ", wildcardName, " in ",
NCCONST.DIRVIEW_SERVERACCESS, " view"), e);
} finally {
Util.recycle(entry);
Util.recycle(viewNav);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
private void markAsPseudoGroup(long groupId, String groupName) {
final String METHOD = "markAsPseudoGroup";
LOGGER.entering(CLASS_NAME, METHOD);
Statement stmt = null;
try {
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Marking group as pseudo-group: " + groupName + "/" + groupId);
}
stmt = conn.createStatement();
int result = stmt.executeUpdate("update " + groupTableName
+ " set pseudogroup = true where groupid = " + groupId);
if (result != 1) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to flag group as pseudo-group: " + groupName);
}
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to flag group as pseudo-group: " + groupName, e);
} finally {
Util.close(stmt);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private long verifyUserExists(String notesName, String pvi,
boolean createIfNotExists) throws SQLException, RepositoryException {
final String METHOD = "verifyUserExists";
LOGGER.entering(CLASS_NAME, METHOD);
try {
String userLookupSql;
String key;
if (notesName != null) {
userLookupSql = "select * from " + userTableName
+ " where notesname = ?";
key = notesName;
} else if (pvi != null) {
userLookupSql = "select * from " + userTableName
+ " where gsaname = ?";
key = pvi;
} else {
throw new RepositoryException("Attempted user lookup without a key");
}
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Looking up user with SQL: [" + userLookupSql + "] and key: "
+ key);
}
PreparedStatement pstmt = conn.prepareStatement(userLookupSql,
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
ResultSet rs = null;
ResultSet generatedKeys = null;
try {
pstmt.setString(1, key.toLowerCase());
rs = pstmt.executeQuery();
if (rs.next()) {
// See if we need to update the pvi
if (pvi != null && !pvi.equals(rs.getString("gsaname"))) {
rs.updateString("gsaname", pvi.toLowerCase());
rs.updateRow();
}
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
Util.buildString("Found user ", key, " from cache"));
return rs.getLong("userid");
}
if (!createIfNotExists) {
return -1L;
}
Util.close(pstmt);
rs = null;
pstmt = conn.prepareStatement(
"insert into " + userTableName
+ "(notesname, gsaname) values (?, ?)",
Statement.RETURN_GENERATED_KEYS);
pstmt.setString(1, notesName.toLowerCase());
pstmt.setString(2, pvi.toLowerCase());
int rows = pstmt.executeUpdate();
if (rows == 0) {
throw new RepositoryException(
"Failed to create user record for " + notesName);
}
generatedKeys = pstmt.getGeneratedKeys();
if (generatedKeys.next()) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
Util.buildString("New user ", notesName.toLowerCase(),
" [", pvi.toLowerCase(), "] is added to cache"));
return generatedKeys.getLong(1);
} else {
throw new RepositoryException(
"Failed to retrieve key for " + notesName);
}
} finally {
Util.close(generatedKeys);
Util.close(rs);
Util.close(pstmt);
}
} finally {
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
// Update roles
/**
* Loop over the databases configured for crawling. For each
* database, cache information about roles assigned to users or
* groups in the ACL.
*/
@VisibleForTesting
void updateRoles() {
final String METHOD = "updateRoles";
LOGGER.entering(CLASS_NAME, METHOD);
NotesView connectorCrawlDatabaseView = null;
NotesDocument connectorCrawlDatabaseDoc = null;
try {
connectorCrawlDatabaseView =
connectorDatabase.getView(NCCONST.VIEWDATABASES);
if (connectorCrawlDatabaseView == null) {
return;
}
connectorCrawlDatabaseView.refresh();
Set<String> replicaIds = new LinkedHashSet<String>();
for (connectorCrawlDatabaseDoc =
connectorCrawlDatabaseView.getFirstDocument();
connectorCrawlDatabaseDoc != null;
connectorCrawlDatabaseDoc = getNextDocument(
connectorCrawlDatabaseView, connectorCrawlDatabaseDoc)) {
NotesDatabase crawlDatabase = null;
String databaseName = null;
try {
databaseName = connectorCrawlDatabaseDoc.getItemValueString(
NCCONST.DITM_DBNAME);
String replicaId = connectorCrawlDatabaseDoc.getItemValueString(
NCCONST.DITM_REPLICAID);
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Updating roles for database: " + databaseName);
replicaIds.add(replicaId);
// TODO: is there anything that would cause us to skip
// checking roles for this database? Or remove all
// role-related records for this database?
crawlDatabase = notesSession.getDatabase(null, null);
crawlDatabase.openByReplicaID(
connectorCrawlDatabaseDoc.getItemValueString(NCCONST.DITM_SERVER),
replicaId);
if (!crawlDatabase.isOpen()) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Database could not be opened: " + databaseName);
continue;
}
updateRolesForDatabase(crawlDatabase, replicaId);
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error updating roles for database: " + databaseName, e);
} finally {
Util.recycle(crawlDatabase);
}
}
checkDatabaseDeletions(replicaIds);
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error updating roles", e);
} finally {
Util.recycle(connectorCrawlDatabaseDoc);
Util.recycle(connectorCrawlDatabaseView);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
synchronized void updateRoles(NotesDatabase db) throws RepositoryException {
try {
setUpResources(true);
updateRolesForDatabase(db, db.getReplicaID());
} finally {
releaseResources();
}
}
private void updateRolesForDatabase(NotesDatabase crawlDatabase,
String databaseReplicaId) {
final String METHOD = "updateRolesForDatabase";
LOGGER.entering(CLASS_NAME, METHOD);
NotesACL acl = null;
NotesACLEntry aclEntry = null;
try {
conn.setAutoCommit(false);
deleteRolesForDatabase(databaseReplicaId);
acl = crawlDatabase.getACL();
for (aclEntry = acl.getFirstEntry();
aclEntry != null;
aclEntry = getNextAclEntry(acl, aclEntry)) {
Vector roles = aclEntry.getRoles();
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Acl entry: " + aclEntry.getName() + "; with roles: " + roles);
}
int roleType = aclEntry.getUserType();
switch (roleType) {
case NotesACLEntry.TYPE_PERSON:
updateRolesForUser(aclEntry.getName(), databaseReplicaId, roles);
break;
case NotesACLEntry.TYPE_SERVER:
case NotesACLEntry.TYPE_SERVER_GROUP:
break;
default: // Treat all other cases as groups.
updateRolesForGroup(aclEntry.getName(), databaseReplicaId, roles);
break;
}
}
conn.commit();
} catch (Exception e) {
try {
conn.rollback();
} catch (SQLException e1) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Rollback failed", e1);
}
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to update role data for database: " + databaseReplicaId, e);
} finally {
try {
conn.setAutoCommit(true);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to reset autocommit", e);
}
Util.recycle(aclEntry);
Util.recycle(acl);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private void deleteRolesForDatabase(String databaseReplicaId)
throws SQLException {
final String METHOD = "deleteRolesForDatabase";
LOGGER.entering(CLASS_NAME, METHOD);
PreparedStatement pstmt1 = null;
PreparedStatement pstmt2 = null;
PreparedStatement pstmt3 = null;
try {
pstmt1 = conn.prepareStatement("delete from " + userRolesTableName
+ " where roleid in (select roleid from " + roleTableName
+ " where replicaid = ?)");
pstmt2 = conn.prepareStatement("delete from " + groupRolesTableName
+ " where roleid in (select roleid from " + roleTableName
+ " where replicaid = ?)");
pstmt3 = conn.prepareStatement("delete from " + roleTableName
+ " where replicaid = ?");
pstmt1.setString(1, databaseReplicaId);
pstmt1.executeUpdate();
pstmt2.setString(1, databaseReplicaId);
pstmt2.executeUpdate();
pstmt3.setString(1, databaseReplicaId);
pstmt3.executeUpdate();
} finally {
Util.close(pstmt1);
Util.close(pstmt2);
Util.close(pstmt3);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private void updateRolesForUser(String notesName, String databaseReplicaId,
Vector roles) throws SQLException, RepositoryException {
final String METHOD = "updateRolesForUser";
LOGGER.entering(CLASS_NAME, METHOD);
try {
Map<Object, User> users = getSimpleUsers(notesSession, conn,
Lists.newArrayList(notesName));
if (users.size() == 0) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"ACL user not in connector user cache: " + notesName);
return;
}
long userId = users.get(notesName).getUserId();
PreparedStatement pstmt = null;
try {
pstmt = conn.prepareStatement("insert into " + userRolesTableName
+ " (userid, roleid) values(?, ?)");
pstmt.setLong(1, userId);
for (Object role : roles) {
String roleName = role.toString();
long roleId = verifyRoleExists(roleName, databaseReplicaId, true);
if (roleId != -1L) {
pstmt.setLong(2, roleId);
pstmt.executeUpdate();
} else {
throw new RepositoryException("Failed to update user role: "
+ databaseReplicaId + "/" + roleName + " for user "
+ notesName);
}
}
} finally {
Util.close(pstmt);
}
} finally {
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private void updateRolesForGroup(String groupName, String databaseReplicaId,
Vector roles) throws SQLException, RepositoryException {
final String METHOD = "updateRolesForGroup";
LOGGER.entering(CLASS_NAME, METHOD);
try {
long groupId = verifyGroupExists(groupName, false);
if (groupId == -1L) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"ACL group not in connector group cache: " + groupName);
return;
}
PreparedStatement pstmt = null;
try {
pstmt = conn.prepareStatement("insert into " + groupRolesTableName
+ " (groupid, roleid) values(?, ?)");
pstmt.setLong(1, groupId);
for (Object role : roles) {
String roleName = role.toString();
long roleId = verifyRoleExists(roleName, databaseReplicaId, true);
if (roleId != -1L) {
pstmt.setLong(2, roleId);
pstmt.executeUpdate();
} else {
throw new RepositoryException("Failed to update user role: "
+ databaseReplicaId + "/" + roleName + " for group "
+ groupName);
}
}
} finally {
Util.close(pstmt);
}
} finally {
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
private long verifyRoleExists(String roleName, String databaseReplicaId,
boolean createIfNotExists) {
final String METHOD = "verifyRoleExists";
LOGGER.entering(CLASS_NAME, METHOD);
try {
PreparedStatement pstmt = conn.prepareStatement(
"select * from " + roleTableName + " where rolename = ?"
+ " and replicaid = ?",
ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
ResultSet generatedKeys = null;
try {
pstmt.setString(1, roleName.toLowerCase());
pstmt.setString(2, databaseReplicaId);
ResultSet rs = pstmt.executeQuery();
if (rs.next()) {
return rs.getLong("roleid");
}
if (!createIfNotExists) {
return -1L;
}
rs.close();
pstmt.close();
pstmt = conn.prepareStatement("insert into " + roleTableName
+ "(rolename, replicaId) values (?, ?)",
Statement.RETURN_GENERATED_KEYS);
pstmt.setString(1, roleName.toLowerCase());
pstmt.setString(2, databaseReplicaId);
int rows = pstmt.executeUpdate();
if (rows == 0) {
throw new RepositoryException(
"Failed to create role record for " + roleName);
}
generatedKeys = pstmt.getGeneratedKeys();
if (generatedKeys.next()) {
return generatedKeys.getLong(1);
} else {
throw new RepositoryException(
"Failed to retrieve key for " + roleName);
}
} finally {
Util.close(generatedKeys);
Util.close(pstmt);
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error in lookup/creation of role: " + databaseReplicaId
+ "/" + roleName, e);
return -1L;
} finally {
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
/**
* Remove any role records with a replica id not in the
* parameter list.
*/
@VisibleForTesting
void checkDatabaseDeletions(Set currentReplicaIds) {
final String METHOD = "checkDatabaseDeletions";
LOGGER.entering(CLASS_NAME, METHOD);
Statement stmt = null;
try {
Set<String> replicaIdsToDelete = new LinkedHashSet<String>();
stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_READ_ONLY);
ResultSet rs = stmt.executeQuery("select distinct(replicaid)"
+ " from " + roleTableName);
while (rs.next()) {
String replicaId = rs.getString(1);
if (!currentReplicaIds.contains(replicaId)) {
replicaIdsToDelete.add(replicaId);
}
}
if (replicaIdsToDelete.size() > 0) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Found role cache data for database(s) that aren't" +
" in the connector's crawl list any more: " + replicaIdsToDelete);
conn.setAutoCommit(false);
try {
for (String replicaId : replicaIdsToDelete) {
deleteRolesForDatabase(replicaId);
}
conn.commit();
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure deleting roles for database(s)", e);
conn.rollback();
} finally {
conn.setAutoCommit(true);
}
}
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Database error", e);
} finally {
Util.close(stmt);
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
// Delete data
/**
* Check each user in the cache against the view
* NCCONST.DIRVIEW_VIMUSERS. If the cached user is not in the
* Notes view, delete them from the cache.
*/
@VisibleForTesting
void checkUserDeletions() {
final String METHOD = "checkUserDeletions";
LOGGER.entering(CLASS_NAME, METHOD);
NotesView usersView = null;
Statement stmt = null;
ArrayList<Long> usersToDelete = new ArrayList<Long>();
try {
String userSelectionFormula = connectorSession.getUserSelectionFormula();
// TODO: why do we have to use this view and an abbreviated
// full name as opposed to just using the people/groups view
// we use elsewhere?
usersView = directoryDatabase.getView(NCCONST.DIRVIEW_VIMUSERS);
usersView.refresh();
stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_READ_ONLY);
ResultSet rs = stmt.executeQuery("select userid,notesname from "
+ userTableName);
while (rs.next()) {
long userId;
String fullName;
try {
userId = rs.getLong(1);
fullName = rs.getString(2);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure reading user table data", e);
continue;
}
try {
String abbrevFormula = String.format("@Name([ABBREVIATE];\"%s\")",
fullName);
String key = notesSession.evaluate(abbrevFormula).elementAt(0)
.toString();
NotesDocument notesUserDoc = usersView.getDocumentByKey(key);
if (notesUserDoc == null) {
// This person or group no longer exists. Remove them
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"User no longer exists in source directory"
+ " and will be deleted: " + key);
usersToDelete.add(userId);
} else {
// Do an additional check for Persons to make sure they
// still meet the selection criteria
if (!checkPersonSelectionFormula(userSelectionFormula,
notesUserDoc)) {
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"User no longer meets selection criteria"
+ " and will be deleted: " + key);
usersToDelete.add(userId);
}
Util.recycle(notesUserDoc);
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure checking user deletion: " + fullName);
}
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error checking deletions", e);
} finally {
Util.recycle(usersView);
Util.close(stmt);
}
for (Long userId : usersToDelete) {
try {
removeUser(userId);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error removing user: " + userId, e);
}
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
/**
* Check each group in the cache against the view
* NCCONST.DIRVIEW_VIMGROUPS. If the cached group is not in the
* Notes view, delete it from the cache.
*/
@VisibleForTesting
void checkGroupDeletions() {
final String METHOD = "checkGroupDeletions";
LOGGER.entering(CLASS_NAME, METHOD);
NotesView groupView = null;
ArrayList<Long> groupsToDelete = new ArrayList<Long>();
Statement stmt = null;
try {
groupView = directoryDatabase.getView(NCCONST.DIRVIEW_VIMGROUPS);
groupView.refresh();
stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_READ_ONLY);
ResultSet rs = stmt.executeQuery(
"select groupid,groupname,pseudogroup from " + groupTableName);
while (rs.next()) {
long groupId;
String groupName;
boolean pseudoGroup = false;
try {
groupId = rs.getLong(1);
groupName = rs.getString(2);
pseudoGroup = rs.getBoolean(3);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure reading group table data", e);
continue;
}
if (pseudoGroup) {
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.logp(Level.FINEST, CLASS_NAME, METHOD,
"Skipping deletion check for pseudo-group: " + groupName);
}
continue;
}
try {
if (Util.isCanonical(groupName)) {
NotesName notesGroupName = notesSession.createName(groupName);
groupName = notesGroupName.getAbbreviated();
}
NotesDocument notesGroupDoc = groupView.getDocumentByKey(groupName);
if (notesGroupDoc == null) {
// This group no longer exists.
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Group no longer exists in source directory"
+ " and will be deleted: " + groupName);
groupsToDelete.add(groupId);
}
Util.recycle(notesGroupDoc);
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error checking deletions for group: " + groupName, e);
}
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error checking deletions", e);
} finally {
Util.recycle(groupView);
Util.close(stmt);
}
for (Long groupId : groupsToDelete) {
try {
removeGroup(groupId);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error removing group: " + groupId, e);
}
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
private void removeUser(String notesName) throws SQLException {
final String METHOD = "removeUser";
LOGGER.entering(CLASS_NAME, METHOD);
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Removing user: " + notesName);
PreparedStatement pstmt = conn.prepareStatement("select userid from "
+ userTableName + " where notesname = ?",
ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_READ_ONLY);
long userId = -1;
try {
pstmt.setString(1, notesName.toLowerCase());
ResultSet rs = pstmt.executeQuery();
if (rs.next()) {
userId = rs.getLong(1);
}
} finally {
Util.close(pstmt);
}
if (userId != -1) {
removeUser(userId);
} else {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Unable to find user data to remove: " + notesName);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
private void removeUser(long userId) throws SQLException {
final String METHOD = "removeUser";
LOGGER.entering(CLASS_NAME, METHOD);
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Removing user: " + userId);
try {
conn.setAutoCommit(false);
executeUpdateWithIds("delete from " + userGroupsTableName
+ " where userid = ?", userId);
executeUpdateWithIds("delete from " + userRolesTableName
+ " where userid = ?", userId);
executeUpdateWithIds("delete from " + userTableName
+ " where userid = ?", userId);
conn.commit();
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Caught exception", e);
conn.rollback();
} finally {
conn.setAutoCommit(true);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
private void removeGroup(long groupId) throws SQLException {
final String METHOD = "removeGroup";
LOGGER.entering(CLASS_NAME, METHOD);
try {
conn.setAutoCommit(false);
executeUpdateWithIds("delete from " + groupChildrenTableName
+ " where parentgroupid = ? or childgroupid = ?",
groupId, groupId);
executeUpdateWithIds("delete from " + groupRolesTableName
+ " where groupid = ?", groupId);
executeUpdateWithIds("delete from " + userGroupsTableName
+ " where groupid = ?", groupId);
executeUpdateWithIds("delete from " + groupTableName
+ " where groupid = ?", groupId);
conn.commit();
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Caught exception", e);
conn.rollback();
} finally {
conn.setAutoCommit(true);
}
}
private int executeUpdateWithIds(String sql, long... ids)
throws SQLException {
PreparedStatement pstmt = conn.prepareStatement(sql);
try {
for (int i = 0; i < ids.length; i++) {
pstmt.setLong(i + 1, ids[i]);
}
return pstmt.executeUpdate();
} finally {
Util.close(pstmt);
}
}
// Manage update interval.
public boolean resetLastCacheUpdate() {
final String METHOD = "resetLastCacheUpdate";
LOGGER.entering(CLASS_NAME, METHOD);
boolean isReset = false;
NotesSession nSession = null;
NotesDatabase dbConfig = null;
NotesView vwConfig = null;
NotesDocument docConfig = null;
NotesDateTime dtTarget = null;
try {
nSession = connectorSession.createNotesSession();
dbConfig = nSession.getDatabase(
connectorSession.getServer(), connectorSession.getDatabase());
if (!dbConfig.isOpen()) {
throw new RepositoryException(
"GSA Configuration database is not opened");
}
dtTarget = nSession.createDateTime("1/1/1970");
dtTarget.setAnyTime();
vwConfig = dbConfig.getView(NCCONST.VIEWSYSTEMSETUP);
docConfig = vwConfig.getFirstDocument();
if (docConfig == null) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"System configuration document not found.");
return false;
}
docConfig.replaceItemValue(NCCONST.SITM_LASTCACHEUPDATE, dtTarget);
docConfig.save(true);
isReset = true;
} catch (RepositoryException e) {
LOGGER.log(Level.SEVERE, CLASS_NAME, e);
} finally {
Util.recycle(dtTarget);
Util.recycle(docConfig);
Util.recycle(vwConfig);
Util.recycle(dbConfig);
Util.recycle(nSession);
}
LOGGER.exiting(CLASS_NAME, METHOD);
return isReset;
}
private void setLastCacheUpdate() {
final String METHOD = "setLastCacheUpdate";
LOGGER.entering(CLASS_NAME, METHOD);
NotesDateTime now = null;
NotesView systemView = null;
NotesDocument systemDoc = null;
try {
now = notesSession.createDateTime("1/1/1900");
now.setNow();
systemView = connectorDatabase.getView(NCCONST.VIEWSYSTEMSETUP);
systemDoc = systemView.getFirstDocument();
if (systemDoc == null) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"System configuration document not found.");
return;
}
systemDoc.replaceItemValue(NCCONST.SITM_LASTCACHEUPDATE, now);
systemDoc.save(true);
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Directory Cache last update time set to " + now.toString());
} catch (RepositoryException e) {
LOGGER.log(Level.SEVERE, CLASS_NAME, e);
} finally {
Util.recycle(systemDoc);
Util.recycle(systemView);
Util.recycle(now);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
private boolean shouldUpdate(NotesDatabase connectorDatabase)
throws RepositoryException {
final String METHOD = "shouldUpdate";
LOGGER.entering(CLASS_NAME, METHOD);
boolean needToUpdate = true;
NotesDateTime lastCacheUpdate = null;
NotesDateTime now = null;
NotesView systemView = null;
NotesDocument systemDoc = null;
Vector<?> vecLastCacheUpdate = null;
try {
lastCacheUpdate = notesSession.createDateTime("1/1/2010");
now = notesSession.createDateTime("1/1/1900");
now.setNow();
systemView = connectorDatabase.getView(NCCONST.VIEWSYSTEMSETUP);
systemDoc = systemView.getFirstDocument();
if (systemDoc == null) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"System configuration document not found.");
return false;
}
// Get the update interval from the system configuration
int cacheUpdateInterval = connectorSession.getCacheUpdateInterval();
vecLastCacheUpdate = systemDoc
.getItemValue(NCCONST.SITM_LASTCACHEUPDATE);
if (vecLastCacheUpdate.size() > 0) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"vecLastCacheUpdate is " + vecLastCacheUpdate);
lastCacheUpdate = (NotesDateTime) vecLastCacheUpdate.firstElement();
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Last directory cache update time is: " + lastCacheUpdate);
}
double elapsedMinutes = now.timeDifference(lastCacheUpdate) / 60;
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Time difference since last directory cache update is: "
+ elapsedMinutes);
// Check poll interval
if (cacheUpdateInterval > elapsedMinutes) {
LOGGER.logp(Level.FINE, CLASS_NAME, METHOD,
"Directory cache poll interval has not yet elapsed.");
needToUpdate = false;
}
} finally {
Util.recycle(now);
Util.recycle(notesSession, vecLastCacheUpdate);
Util.recycle(lastCacheUpdate);
Util.recycle(systemDoc);
Util.recycle(systemView);
}
LOGGER.exiting(CLASS_NAME, METHOD);
return needToUpdate;
}
/**
* Use the given formula to construct a PVI from the Notes user
* document.
*/
private String evaluatePvi(String userNameFormula, NotesDocument doc)
throws RepositoryException {
final String METHOD = "evaluatePvi";
Vector<?> vecEvalResult = notesSession.evaluate(userNameFormula, doc);
// Make sure we don't get an empty vector.
if (vecEvalResult != null && vecEvalResult.size() > 0) {
return (vecEvalResult.elementAt(0).toString());
}
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to evaluate formula: " + userNameFormula);
return "";
}
/**
* Use the given formula to decide if a Notes user should be
* included in the connector's user cache. The Notes formula
* should return 1 or 0.
*/
@SuppressWarnings("unchecked")
private boolean checkPersonSelectionFormula(String userSelectionFormula,
NotesDocument personDoc) throws RepositoryException {
Vector<Double> vecEvalResult = (Vector<Double>) notesSession.evaluate(
userSelectionFormula, personDoc);
// A Selection formula will return a vector of doubles.
return (1 == vecEvalResult.elementAt(0).intValue());
}
private boolean isAccessControlGroup(NotesDocument groupDoc)
throws RepositoryException {
if (!groupDoc.getItemValueString(NCCONST.ITMFORM).contentEquals(
NCCONST.DIRFORM_GROUP)) {
return false;
}
// Only process access control type groups
String groupType = groupDoc.getItemValueString(NCCONST.GITM_GROUPTYPE);
return NCCONST.DIR_ACCESSCONTROLGROUPTYPES.contains(groupType);
}
// Helpers
private NotesDocument getNextDocument(NotesView view, NotesDocument doc)
throws RepositoryException {
view.refresh();
NotesDocument nextDoc = view.getNextDocument(doc);
doc.recycle();
return nextDoc;
}
private NotesViewEntry getNextViewEntry(NotesViewNavigator viewNav,
NotesViewEntry viewEntry) throws RepositoryException {
NotesViewEntry nextEntry = viewNav.getNext(viewEntry);
Util.recycle(viewEntry);
return nextEntry;
}
private NotesACLEntry getNextAclEntry(NotesACL acl, NotesACLEntry aclEntry)
throws RepositoryException {
NotesACLEntry nextEntry = acl.getNextEntry(aclEntry);
Util.recycle(aclEntry);
return nextEntry;
}
// Database setup
@VisibleForTesting
void initializeUserCache() throws RepositoryException {
final String METHOD = "initializeUserCache";
Connection conn = null;
JdbcDatabase jdbcDatabase =
connectorSession.getConnector().getJdbcDatabase();
try {
conn = jdbcDatabase.getConnectionPool().getConnection();
// Notes user names don't have a simple defined max
// size. There are limits for each component (name, org
// unit, org). For now, we're going with "as big as a
// varchar can be".
jdbcDatabase.verifyTableExists(userTableName, new String[] {
"create table " + userTableName
+ " (userid long auto_increment primary key,"
+ " gsaname varchar(128), notesname varchar(254))"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + userTableName);
Util.executeStatements(conn, true, new String[] {
"create index if not exists idx_gsaname_" + userTableName
+ " on " + userTableName + "(gsaname)",
"create index if not exists idx_notesname_" + userTableName
+ " on " + userTableName + "(notesname)"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified index: idx_gsaname_" + userTableName + " and "
+ " idx_notesname_" + userTableName);
// Group names have a max size of 63, but we also create
// groups based on DN components, so make the groupname
// field larger than would otherwise be needed.
jdbcDatabase.verifyTableExists(groupTableName, new String[] {
"create table " + groupTableName
+ " (groupid long auto_increment primary key,"
+ " groupname varchar(254), pseudogroup boolean)"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + groupTableName);
Util.executeStatements(conn, true,
"create index if not exists idx_groupname_groups on "
+ groupTableName + "(groupname)");
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified index: idx_groupname_groups on " + groupTableName);
// Role names have a max size of 15.
jdbcDatabase.verifyTableExists(roleTableName, new String[] {
"create table " + roleTableName
+ " (roleid long auto_increment primary key,"
+ " rolename varchar(32), replicaid varchar(32))"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + roleTableName);
Util.executeStatements(conn, true,
"create index if not exists idx_rolename_roles on "
+ roleTableName + "(rolename)",
"create index if not exists idx_replicaid_roles on "
+ roleTableName + "(replicaid)");
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD, "Created/verified indexes: "
+ "idx_rolename_roles and idx_replicaid_roles on " + roleTableName);
jdbcDatabase.verifyTableExists(userGroupsTableName, new String[] {
"create table " + userGroupsTableName + " (userid long,"
+ " groupid long)"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + userGroupsTableName);
Util.executeStatements(conn, true,
"create index if not exists idx_userid_usergroups on "
+ userGroupsTableName + "(userid)",
"create index if not exists idx_groupid_usergroups on "
+ userGroupsTableName + "(groupid)");
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD, "Created/verified indexes: "
+ "idx_userid_usergroups and idx_groupid_usergroups on "
+ userGroupsTableName);
jdbcDatabase.verifyTableExists(userRolesTableName, new String[] {
"create table " + userRolesTableName + " (userid long, "
+ "roleid long)"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + userRolesTableName);
Util.executeStatements(conn, true,
"create index if not exists idx_userid_userroles on "
+ userRolesTableName + "(userid)",
"create index if not exists idx_roleid_userroles on "
+ userRolesTableName + "(roleid)");
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD, "Created/verified indexes: "
+ "idx_userid_userroles and idx_roleid_userroles on "
+ userRolesTableName);
jdbcDatabase.verifyTableExists(groupRolesTableName, new String[] {
"create table " + groupRolesTableName + " (groupid long,"
+ " roleid long)"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + groupRolesTableName);
Util.executeStatements(conn, true,
"create index if not exists idx_groupid_grouproles on "
+ groupRolesTableName + "(groupid)",
"create index if not exists idx_roleid_grouproles on "
+ groupRolesTableName + "(roleid)");
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD, "Created/verified indexes: "
+ "idx_groupid_grouproles and idx_roleid_grouproles on "
+ groupRolesTableName);
jdbcDatabase.verifyTableExists(groupChildrenTableName, new String[] {
"create table " + groupChildrenTableName + " (parentgroupid long,"
+ " childgroupid long)"});
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD,
"Created/verified table: " + groupChildrenTableName);
Util.executeStatements(conn, true,
"create index if not exists idx_parentgroupid_groupchildren on "
+ groupChildrenTableName + "(parentgroupid)",
"create index if not exists idx_childgroupid_groupchildren on "
+ groupChildrenTableName + "(childgroupid)");
LOGGER.logp(Level.INFO, CLASS_NAME, METHOD, "Created/verified indexes: "
+ "idx_parentgroupid_groupchildren and idx_childgroupid_groupchildren"
+ " on " + groupChildrenTableName);
} catch (Exception e) {
LOGGER.logp(Level.SEVERE, CLASS_NAME, METHOD,
"Failed to initialize user cache", e);
throw new RepositoryException("Failed to initialize user cache", e);
} finally {
if (conn != null) {
jdbcDatabase.getConnectionPool().releaseConnection(conn);
}
}
}
void clearTables(Connection conn) {
final String METHOD = "clearTables";
try {
String[] tables = { userTableName, groupTableName, roleTableName,
userGroupsTableName, userRolesTableName, groupRolesTableName,
groupChildrenTableName
};
Statement stmt = conn.createStatement();
for (String table : tables) {
stmt.executeUpdate("delete from " + table);
}
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Error clearing tables", e);
}
}
void dropTables() {
final String METHOD = "dropTables";
DatabaseConnectionPool connectionPool = null;
Connection conn = null;
Statement stmt = null;
boolean isReadOnly = false;
try {
connectionPool = connectorSession.getConnector().getJdbcDatabase()
.getConnectionPool();
conn = connectionPool.getConnection();
isReadOnly = conn.isReadOnly();
conn.setReadOnly(false);
String[] tables = { userGroupsTableName, userRolesTableName,
groupRolesTableName, groupChildrenTableName,
userTableName, groupTableName, roleTableName
};
stmt = conn.createStatement();
for (String table : tables) {
try {
stmt.executeUpdate("drop table " + table);
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to drop table: " + table, e);
}
}
} catch (Exception e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failed to drop tables", e);
} finally {
try {
Util.close(stmt);
conn.setReadOnly(isReadOnly);
connectionPool.releaseConnection(conn);
} catch (SQLException e) {
LOGGER.logp(Level.WARNING, CLASS_NAME, METHOD,
"Failure releasing connection", e);
}
LOGGER.exiting(CLASS_NAME, METHOD);
}
}
synchronized boolean isCacheInitialized() {
return cacheInitialized;
}
private synchronized void setCacheInitialized() {
cacheInitialized = true;
}
public static class User {
private final long userId;
private final String notesName;
private final String gsaName;
private final LinkedHashSet<String> groups = Sets.newLinkedHashSet();
private final LinkedHashSet<String> roles = Sets.newLinkedHashSet();
private final Map<String, LinkedHashSet<String>> rolesByDatabase =
Maps.newHashMap();
User(long userId, String notesName, String gsaName) {
this.userId = userId;
this.notesName = notesName;
this.gsaName = gsaName;
}
long getUserId() {
return userId;
}
public String getNotesName() {
return notesName;
}
public String getGsaName() {
return gsaName;
}
void addGroup(String group) {
groups.add(group);
}
public Collection<String> getGroups() {
return Collections.unmodifiableSet(groups);
}
void addRole(String replicaId, String role) {
roles.add(replicaId + "/" + role);
LinkedHashSet<String> tmp = rolesByDatabase.get(replicaId);
if (tmp == null) {
tmp = Sets.newLinkedHashSet();
rolesByDatabase.put(replicaId, tmp);
}
tmp.add(role);
}
/**
* Returns user's roles. Roles are in the form
* "replicaid/role".
*/
public Collection<String> getRoles() {
return Collections.unmodifiableSet(roles);
}
/**
* Returns user's roles for a specific database. Role names
* are not decorated with the replica id.
*/
public Collection<String> getRolesByDatabase(String replicaId) {
LinkedHashSet<String> tmp = rolesByDatabase.get(replicaId);
if (tmp == null) {
tmp = Sets.newLinkedHashSet();
rolesByDatabase.put(replicaId, tmp);
}
return Collections.unmodifiableSet(tmp);
}
/**
* Returns user's groups and roles. Roles are in the form
* "replicaid/role".
*/
public Collection<String> getGroupsAndRoles() {
LinkedHashSet<String> both = new LinkedHashSet<String>(groups);
both.addAll(roles);
return both;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append(notesName).append(" [").append(gsaName).append("]");
return buf.toString();
}
}
}