Index: deploy/shared/pom.xml =================================================================== --- deploy/shared/pom.xml (revision 308077) +++ deploy/shared/pom.xml (working copy) @@ -1,5 +1,5 @@ - + 4.0.0 kernel @@ -227,6 +227,16 @@ compile + org.terracotta + terracotta-toolkit-1.6-runtime + 5.6.0 + + + net.sf.ehcache + ehcache-terracotta + 2.6.6 + + org.mnode.ical4j ical4j compile @@ -238,6 +248,13 @@ + + + terracotta-releases + http://www.terracotta.org/download/reflector/releases + + + scm:svn:https://source.sakaiproject.org/svn/kernel/trunk/deploy/shared scm:svn:https://source.sakaiproject.org/svn/kernel/trunk/deploy/shared Index: kernel-component/src/main/webapp/WEB-INF/event-components.xml =================================================================== --- kernel-component/src/main/webapp/WEB-INF/event-components.xml (revision 308077) +++ kernel-component/src/main/webapp/WEB-INF/event-components.xml (working copy) @@ -27,6 +27,7 @@ + true ${auto.ddl} 5 Index: kernel-component/src/main/webapp/WEB-INF/memory-components.xml =================================================================== --- kernel-component/src/main/webapp/WEB-INF/memory-components.xml (revision 308077) +++ kernel-component/src/main/webapp/WEB-INF/memory-components.xml (working copy) @@ -19,14 +19,14 @@ + + + classpath:org/sakaiproject/memory/api/ehcache.xml + + + - - - classpath:org/sakaiproject/memory/api/ehcache.xml - - - Index: kernel-impl/pom.xml =================================================================== --- kernel-impl/pom.xml (revision 308077) +++ kernel-impl/pom.xml (working copy) @@ -1,5 +1,5 @@ - + 4.0.0 kernel @@ -318,6 +318,19 @@ sakai-kernel-util ${project.version} + + + + org.sakaiproject.emailtemplateservice + emailtemplateservice-api + - + + + + terracotta-releases + http://www.terracotta.org/download/reflector/releases + + + Index: kernel-impl/src/main/java/org/sakaiproject/authz/impl/DbAuthzGroupService.java =================================================================== --- kernel-impl/src/main/java/org/sakaiproject/authz/impl/DbAuthzGroupService.java (revision 308077) +++ kernel-impl/src/main/java/org/sakaiproject/authz/impl/DbAuthzGroupService.java (working copy) @@ -54,57 +54,67 @@ */ public abstract class DbAuthzGroupService extends BaseAuthzGroupService implements Observer { + /** To avoide the dreaded ORA-01795 and the like, we need to limit to <100 the items in each in(?, ?, ...) clause, connecting them with ORs. */ + protected final static int MAX_IN_CLAUSE = 99; /** Our log (commons). */ private static Log M_log = LogFactory.getLog(DbAuthzGroupService.class); - /** All the event functions we know exist on the db. */ protected Collection m_functionCache = new HashSet(); - /** All the event role names we know exist on the db. */ protected Collection m_roleNameCache = new HashSet(); - /** Table name for realms. */ protected String m_realmTableName = "SAKAI_REALM"; - /** Table name for realm properties. */ protected String m_realmPropTableName = "SAKAI_REALM_PROPERTY"; - /** ID field for realm. */ protected String m_realmIdFieldName = "REALM_ID"; - /** AuthzGroup dbid field. */ protected String m_realmDbidField = "REALM_KEY"; - /** All "fields" for realm reading. */ protected String[] m_realmReadFieldNames = {"REALM_ID", "PROVIDER_ID", "(select MAX(ROLE_NAME) from SAKAI_REALM_ROLE where ROLE_KEY = MAINTAIN_ROLE)", "CREATEDBY", "MODIFIEDBY", "CREATEDON", "MODIFIEDON", "REALM_KEY"}; - /** All "fields" for realm update. */ protected String[] m_realmUpdateFieldNames = {"REALM_ID", "PROVIDER_ID", "MAINTAIN_ROLE = (select MAX(ROLE_KEY) from SAKAI_REALM_ROLE where ROLE_NAME = ?)", "CREATEDBY", "MODIFIEDBY", "CREATEDON", "MODIFIEDON"}; - /** All "fields" for realm insert. */ protected String[] m_realmInsertFieldNames = {"REALM_ID", "PROVIDER_ID", "MAINTAIN_ROLE", "CREATEDBY", "MODIFIEDBY", "CREATEDON", "MODIFIEDON"}; - /** All "field values" for realm insert. */ - protected String[] m_realmInsertValueNames = {"?", "?", "(select MAX(ROLE_KEY) from SAKAI_REALM_ROLE where ROLE_NAME = ?)", "?", "?", "?", "?"}; - /************************************************************************************************************************************************* * Dependencies ************************************************************************************************************************************************/ - + /** All "field values" for realm insert. */ + protected String[] m_realmInsertValueNames = {"?", "?", "(select MAX(ROLE_KEY) from SAKAI_REALM_ROLE where ROLE_NAME = ?)", "?", "?", "?", "?"}; /** map of database handlers. */ protected Map databaseBeans; - /** The database handler we are using. */ protected DbAuthzGroupSql dbAuthzGroupSql; + /** If true, we do our locks in the remote database, otherwise we do them here. */ + protected boolean m_useExternalLocks = true; + /** Configuration: to run the ddl on init or not. */ + protected boolean m_autoDdl = false; + /** + * Configuration: Whether or not to automatically promote non-provided users with same status + * and role to provided + */ + protected boolean m_promoteUsersToProvided = true; + private MemoryService m_memoryService; + // KNL-600 CACHING for the realm role groups + private Cache m_realmRoleGRCache; + + private Cache authzUserGroupIdsCache; + private Cache maintainRolesCache; + public void setDatabaseBeans(Map databaseBeans) { this.databaseBeans = databaseBeans; } + /************************************************************************************************************************************************* + * Configuration + ************************************************************************************************************************************************/ + /** * returns the bean which contains database dependent code. */ @@ -120,34 +130,19 @@ { this.dbAuthzGroupSql = (databaseBeans.containsKey(vendor) ? databaseBeans.get(vendor) : databaseBeans.get("default")); } - - private MemoryService m_memoryService; + public void setMemoryService(MemoryService memoryService) { this.m_memoryService = memoryService; } - // KNL-600 CACHING for the realm role groups - private Cache m_realmRoleGRCache; - - private Cache authzUserGroupIdsCache; - - private Cache maintainRolesCache; - /** * @return the ServerConfigurationService collaborator. */ protected abstract SqlService sqlService(); - /************************************************************************************************************************************************* - * Configuration - ************************************************************************************************************************************************/ - - /** If true, we do our locks in the remote database, otherwise we do them here. */ - protected boolean m_useExternalLocks = true; - /** * Configuration: set the external locks value. - * + * * @param value * The external locks value. */ @@ -155,13 +150,10 @@ { m_useExternalLocks = Boolean.valueOf(value).booleanValue(); } - - /** Configuration: to run the ddl on init or not. */ - protected boolean m_autoDdl = false; - + /** * Configuration: to run the ddl on init or not. - * + * * @param value * the auto ddl value. */ @@ -169,17 +161,15 @@ { m_autoDdl = Boolean.valueOf(value).booleanValue(); } + + /************************************************************************************************************************************************* + * Init and Destroy + ************************************************************************************************************************************************/ /** * Configuration: Whether or not to automatically promote non-provided users with same status * and role to provided - */ - protected boolean m_promoteUsersToProvided = true; - - /** - * Configuration: Whether or not to automatically promote non-provided users with same status - * and role to provided - * + * * @param promoteUsersToProvided * 'true' to promote non-provided users, 'false' to maintain their non-provided status */ @@ -188,10 +178,6 @@ m_promoteUsersToProvided = promoteUsersToProvided; } - /************************************************************************************************************************************************* - * Init and Destroy - ************************************************************************************************************************************************/ - /** * Final initialization, once all dependencies are set. */ @@ -199,9 +185,9 @@ { try { - // The observer will be notified whenever there are new events. Priority observers get notified first, before normal observers. + // The observer will be notified whenever there are new events. Priority observers get notified first, before normal observers. eventTrackingService().addPriorityObserver(this); - + // if we are auto-creating our schema, check and create if (m_autoDdl) { @@ -217,7 +203,7 @@ cacheFunctionNames(); m_realmRoleGRCache = m_memoryService.newCache("org.sakaiproject.authz.impl.DbAuthzGroupService.realmRoleGroupCache"); M_log.info("init(): table: " + m_realmTableName + " external locks: " + m_useExternalLocks); - + authzUserGroupIdsCache = m_memoryService.newCache("org.sakaiproject.authz.impl.DbAuthzGroupService.authzUserGroupIdsCache"); maintainRolesCache = m_memoryService.newCache("org.sakaiproject.authz.impl.DbAuthzGroupService.maintainRolesCache"); @@ -230,29 +216,29 @@ M_log.warn("init(): ", t); } } - + + /************************************************************************************************************************************************* + * BaseAuthzGroupService extensions + ************************************************************************************************************************************************/ + /** * Returns to uninitialized state. */ public void destroy() { - authzUserGroupIdsCache.close(); - + authzUserGroupIdsCache.destroy(); + // done with event watching eventTrackingService().deleteObserver(this); - maintainRolesCache.close(); + maintainRolesCache.destroy(); M_log.info(this +".destroy()"); } - /************************************************************************************************************************************************* - * BaseAuthzGroupService extensions - ************************************************************************************************************************************************/ - /** * Construct a Storage object. - * + * * @return The new storage object. */ protected Storage newStorage() @@ -265,7 +251,7 @@ /** * Check / assure this role name is defined. - * + * * @param name * the role name. */ @@ -363,7 +349,7 @@ /** * Check / assure this function name is defined. - * + * * @param name * the role name. */ @@ -417,6 +403,10 @@ } } + /************************************************************************************************************************************************* + * Storage implementation + ************************************************************************************************************************************************/ + /** * Read all the function records, caching them */ @@ -444,33 +434,187 @@ } } - /************************************************************************************************************************************************* - * Storage implementation - ************************************************************************************************************************************************/ + /** + * Form a SQL IN() clause, but break it up with ORs to keep the size of each IN below 100 + * + * @param size + * The size + * @param field + * The field name + * @return a SQL IN() with ORs clause this large. + */ + protected String orInClause(int size, String field) + { + // Note: to avoide the dreaded ORA-01795 and the like, we need to limit to <100 the items in each in(?, ?, ...) clause, connecting them with + // ORs -ggolden + int ors = size / MAX_IN_CLAUSE; + int leftover = size - (ors * MAX_IN_CLAUSE); + StringBuilder buf = new StringBuilder(); + // enclose them all in parens if we have > 1 + if (ors > 0) + { + buf.append(" ("); + } + + buf.append(" " + field + " IN "); + + // do all the full MAX_IN_CLAUSE '?' in/ors + if (ors > 0) + { + for (int i = 0; i < ors; i++) + { + buf.append("(?"); + for (int j = 1; j < MAX_IN_CLAUSE; j++) + { + buf.append(",?"); + } + buf.append(")"); + + if (i < ors - 1) + { + buf.append(" OR " + field + " IN "); + } + } + } + + // add one more for the extra + if (leftover > 0) + { + if (ors > 0) + { + buf.append(" OR " + field + " IN "); + } + buf.append("(?"); + for (int i = 1; i < leftover; i++) + { + buf.append(",?"); + } + buf.append(")"); + } + + // enclose them all in parens if we have > 1 + if (ors > 0) + { + buf.append(" )"); + } + + return buf.toString(); + } + /** + * Get value for query & return that; needed for mssql which doesn't support select stmts in VALUES clauses + * Note that MSSQL support was removed in KNL-880, so this is a no-op. + * + * @param sqlQuery + * @param bindParameter + * @return value if mssql, bindparameter if not (basically a no-op for others) + */ + protected Object getValueForSubquery(String sqlQuery, Object bindParameter) + { + return bindParameter; + } + + private String getRealmRoleKey(String roleName) { + Iterator itr = m_roleNameCache.iterator(); + while (itr.hasNext()) { + RealmRole realmRole = (RealmRole) itr.next(); + if (realmRole != null && realmRole.getName().equals(roleName)) { + return realmRole.getKey(); + } + } + return null; + } + + public void update(Observable arg0, Object arg) { + if (arg == null || !(arg instanceof Event)) + return; + Event event = (Event) arg; + + // check the event function against the functions we have notifications watching for + String function = event.getEvent(); + if (SECURE_UPDATE_AUTHZ_GROUP.equals(function) + || SECURE_UPDATE_OWN_AUTHZ_GROUP.equals(function) + || SECURE_REMOVE_AUTHZ_GROUP.equals(function) + || SECURE_JOIN_AUTHZ_GROUP.equals(function) + || SECURE_UNJOIN_AUTHZ_GROUP.equals(function) + || SECURE_ADD_AUTHZ_GROUP.equals(function)) { + // Get the resource ID + String realmId = extractEntityId(event.getResource()); + + if (realmId != null) { + for (String user : getAuthzUsersInGroups(new HashSet(Arrays.asList(realmId)))) { + authzUserGroupIdsCache.remove(user); + } + if (serverConfigurationService().getBoolean("authz.cacheGrants", true)) { + if (M_log.isDebugEnabled()) { + M_log.debug("DbAuthzGroupService update(): clear realm role cache for " + realmId); + } + + m_realmRoleGRCache.remove(realmId); + } + } else { + // This should never happen as the events we generate should always have + // a /realm/ prefix on the resource. + M_log.warn("DBAuthzGroupService update(): failed to extract realm ID from "+ event.getResource()); + } + } + + + } + + /** + * based on value from RealmRoleGroupCache + * transform a Map object into a Map object + * KNL-1037 + */ + private Map getMemberMap(Map mMap, Map roleMap) + { + Map rv = new HashMap(); + for (Map.Entry entry : mMap.entrySet()) + { + String userId = entry.getKey(); + MemberWithRoleId m = entry.getValue(); + String roleId = m.getRoleId(); + if (roleId != null && roleMap != null && roleMap.containsKey(roleId)) + { + Role role = (Role) roleMap.get(roleId); + rv.put(userId, new BaseMember(role, m.isActive(), m.isProvided(), userId, userDirectoryService())); + } + } + return rv; + } + + /** + * transform a Map object into a Map object + * to be used in RealmRoleGroupCache + * KNL-1037 + */ + private Map getMemberWithRoleIdMap(Map userGrants) + { + Map rv = new HashMap(); + for (Map.Entry entry : userGrants.entrySet()) + { + String userId = entry.getKey(); + Member member = entry.getValue(); + rv.put(userId, new MemberWithRoleId(member)); + } + return rv; + } + + /** * Covers for the BaseXmlFileStorage, providing AuthzGroup and RealmEdit parameters */ protected class DbStorage extends BaseDbFlatStorage implements Storage, SqlReader { - + private static final String REALM_USER_GRANTS_CACHE = "REALM_USER_GRANTS_CACHE"; private static final String REALM_ROLES_CACHE = "REALM_ROLES_CACHE"; private boolean promoteUsersToProvided = true; private EntityManager entityManager; private SiteService siteService; - + /** - * Configure whether or not users with same status and role will be "promoted" to - * being provided. - * - * @param autoPromoteNonProvidedUsers Whether or not to promote non-provided users - */ - public void setPromoteUsersToProvided(boolean promoteUsersToProvided) { - this.promoteUsersToProvided = promoteUsersToProvided; - } - - /** * Construct. */ public DbStorage(EntityManager entityManager, SiteService siteService) @@ -488,6 +632,16 @@ // setSortField(m_realmSortField, null); } + /** + * Configure whether or not users with same status and role will be "promoted" to + * being provided. + * + * @param autoPromoteNonProvidedUsers Whether or not to promote non-provided users + */ + public void setPromoteUsersToProvided(boolean promoteUsersToProvided) { + this.promoteUsersToProvided = promoteUsersToProvided; + } + public boolean check(String id) { return super.checkResource(id); @@ -510,7 +664,7 @@ /** * Complete the read process once the basic realm info has been read - * + * * @param realm * The real to complete */ @@ -521,7 +675,7 @@ /** * Complete the read process once the basic realm info has been read - * + * * @param conn * optional SQL connection to use. * @param realm @@ -550,7 +704,7 @@ } Map realmRoleGRCache = (Map)m_realmRoleGRCache.get(realm.getId()); - + if (M_log.isDebugEnabled()) { M_log.debug("DbAuthzGroupService: found " + realm.getId() + " in cache? " + (realmRoleGRCache != null)); } @@ -763,8 +917,8 @@ } // miss } - - // not in the cache + + // not in the cache String inClause = orInClause( authzGroupIds.size(), "SAKAI_REALM.REALM_ID" ); String statement = dbAuthzGroupSql.getSelectRealmUserGroupSql( inClause ); Object[] fields = new Object[authzGroupIds.size()+1]; @@ -891,7 +1045,7 @@ for (int i2 = 0; i2 < refs.length; i2++) // iterate through the groups to see if there is a swapped state in the variable { roleswap = securityService().getUserEffectiveRole("/site/" + refs[i2]); - + // break from this loop if the user is the current user and a swapped state is found if (roleswap != null && auth && userId.equals(sessionManager().getCurrentSessionUserId())) break; @@ -963,7 +1117,7 @@ /** * The transaction code to save the azg. - * + * * @param edit * The azg to save. */ @@ -1078,7 +1232,7 @@ /** * The transaction code to save the azg. - * + * * @param edit * The azg to save. */ @@ -1134,7 +1288,7 @@ /** * The transaction code to save the azg. - * + * * @param edit * The azg to save. */ @@ -1511,7 +1665,7 @@ /** * Get the fields for the database from the edit for this id, and the id again at the end if needed - * + * * @param id * The resource id * @param edit @@ -1561,7 +1715,7 @@ /** * Read from the result one set of fields to create a Resource. - * + * * @param result * The Sql query result. * @return The Resource object. @@ -1622,13 +1776,13 @@ // checks to see if the user is the current user and has the roleswap variable set in the session String roleswap = securityService().getUserEffectiveRole(realmId); - + if (roleswap != null && auth && userId.equals(sessionManager().getCurrentSessionUserId())) { fields[0] = roleswap; // set the field to the student role for the alternate sql statement = dbAuthzGroupSql.getCountRoleFunctionSql(); // set the function for our alternate sql } - + List resultsNew = m_sql.dbRead(statement, fields, new SqlReader() { public Object readSqlResultRecord(ResultSet result) @@ -1672,7 +1826,7 @@ M_log.debug("isAllowed():", new Exception()); return false; } - + if (M_log.isDebugEnabled()) M_log.debug("isAllowed: auth=" + auth + " userId=" + userId + " lock=" + lock + " realms=" + realms); @@ -1686,10 +1840,10 @@ // for roleswap String userSiteRef = null; String siteRef = null; - + // oracle query has different order of parameters String dbAuthzGroupSqlClassName=dbAuthzGroupSql.getClass().getName(); - + if(dbAuthzGroupSqlClassName.equals("org.sakaiproject.authz.impl.DbAuthzGroupSqlOracle")) { fields[pos++] = userId; } @@ -1699,8 +1853,8 @@ { // These checks for roleswap assume there is at most one of each type of site in the realms collection, // i.e. one ordinary site and one user site - - if (realmId.startsWith(SiteService.REFERENCE_ROOT + Entity.SEPARATOR)) // Starts with /site/ + + if (realmId.startsWith(SiteService.REFERENCE_ROOT + Entity.SEPARATOR)) // Starts with /site/ { if (userId != null && userId.equals(siteService.getSiteUserId(realmId))) { userSiteRef = realmId; @@ -1718,7 +1872,7 @@ { fields[pos++] = realmId; } - + /* Delegated access essentially behaves like roleswap except instead of just specifying which role, you can also specify * the realm as well. The access map is populated by an Event Listener that listens for dac.checkaccess and is stored in the session * attribute: delegatedaccess.accessmap. This is a map of: SiteRef -> String[]{realmId, roleId}. Delegated access @@ -1726,7 +1880,7 @@ */ String[] delegatedAccessGroupAndRole = getDelegatedAccessRealmRole(siteRef); boolean delegatedAccess = delegatedAccessGroupAndRole != null && delegatedAccessGroupAndRole.length == 2; - + // Would be better to get this initially to make the code more efficient, but the realms collection // does not have a common order for the site's id which is needed to determine if the session variable exists // ZQIAN: since the role swap is only done at the site level, for group reference, use its parent site reference instead. @@ -1741,7 +1895,7 @@ } else { roleswap = securityService().getUserEffectiveRole(siteRef); } - + List results = null; // Only check roleswap if the method is being called for the current user @@ -1748,14 +1902,14 @@ if ( (roleswap != null || delegatedAccess) && userId != null && userId.equals(sessionManager().getCurrentSessionUserId()) ) { - + // First check in the user's own my workspace site realm if it's in the list // We don't want to change the user's role in their own site, so call the regular function. // This catches permission checks for entity references such as user dropboxes. - + if (userSiteRef != null && isAllowed(userId, lock, userSiteRef)) return true; - + // Then check the site where there's a roleswap effective if (M_log.isDebugEnabled()) M_log.debug("userId="+userId+", siteRef="+siteRef+", roleswap="+roleswap+", delegatedAccess="+delegatedAccess); Object[] fields2 = new Object[3]; @@ -1767,7 +1921,7 @@ fields2[0] = delegatedAccessGroupAndRole[1]; } fields2[1] = lock; - if (roleswap == null + if (roleswap == null && delegatedAccess && delegatedAccessGroupAndRole != null ) { @@ -1779,7 +1933,7 @@ if (M_log.isDebugEnabled()) M_log.debug("roleswap/dac fields: "+Arrays.toString(fields2)); statement = dbAuthzGroupSql.getCountRoleFunctionSql(); - + results = m_sql.dbRead(statement, fields2, new SqlReader() { public Object readSqlResultRecord(ResultSet result) @@ -1795,7 +1949,7 @@ } } }); - + boolean rv = false; int count = -1; if (!results.isEmpty()) @@ -1805,7 +1959,7 @@ } if (rv) // if true, go ahead and return return true; - + // Then check the rest of the realms. For example these could be subfolders under /content/group/... if(roleswap != null){ for (String realmId : realms) @@ -1812,9 +1966,9 @@ { if (realmId == siteRef || realmId == userSiteRef) // we've already checked these so no need to do it again continue; - + fields2[2] = realmId; - + results = m_sql.dbRead(statement, fields2, new SqlReader() { public Object readSqlResultRecord(ResultSet result) @@ -1830,7 +1984,7 @@ } } }); - + count = -1; if (!results.isEmpty()) { @@ -1862,7 +2016,7 @@ } } }); - + boolean rv = false; int count = -1; if (!results.isEmpty()) @@ -1874,12 +2028,12 @@ return rv; } - /** + /** * Delegated access essentially behaves like roleswap except instead of just specifying which role, you can also specify * the realm as well. The access map is populated by an Event Listener that listens for dac.checkaccess and is stored in the session * attribute: delegatedaccess.accessmap. This is a map of: SiteRef -> String[]{realmId, roleId}. * Delegated access will defer to roleswap if it is set. - * + * * @param siteRef the site realm id * @return String[]{realmId, roleId} or null if delegated access is disabled */ @@ -1897,11 +2051,11 @@ } //if the siteRef doesn't exist in the map, then that means that we haven't checked delegatedaccess for this user and site. //if the user doesn't have access, the map will have a null value for that siteRef. - if (siteRef != null + if (siteRef != null && (delegatedAccessMap == null || !delegatedAccessMap.containsKey(siteRef))){ /* the delegatedaccess.accessmapflag is set during login and is only set for user's who have some kind of delegated access * if the user has access somewhere but either the map is null or there isn't any record for this site, then that means - * this site hasn't been checked yet. By posting an event, a DelegatedAccess observer will check this site's access for this user + * this site hasn't been checked yet. By posting an event, a DelegatedAccess observer will check this site's access for this user * and store it in the user's session */ eventTrackingService().post(eventTrackingService().newEvent("dac.checkaccess", siteRef, false, NotificationService.NOTI_REQUIRED)); @@ -1913,8 +2067,8 @@ } } - if (siteRef != null - && delegatedAccessMap != null + if (siteRef != null + && delegatedAccessMap != null && delegatedAccessMap.containsKey(siteRef) && delegatedAccessMap.get(siteRef) instanceof String[]) { if (M_log.isDebugEnabled()) M_log.debug("siteRef="+siteRef+", delegatedAccessMap="+delegatedAccessMap); @@ -1971,13 +2125,13 @@ */ public Set getUsersIsAllowedByGroup(String lock, Collection realms) { - final Set usersByGroup = new HashSet(); - + final Set usersByGroup = new HashSet(); + if ((lock == null) || (realms != null && realms.isEmpty())) return usersByGroup; - + String sql; Object[] fields; - + if (realms != null) { sql = dbAuthzGroupSql.getSelectRealmRoleGroupUserIdSql(orInClause(realms.size(), "REALM_ID")); fields = new Object[realms.size() + 1]; @@ -1991,7 +2145,7 @@ } else { sql = dbAuthzGroupSql.getSelectRealmRoleGroupUserIdSql("true"); fields = new Object[1]; - fields[0] = lock; + fields[0] = lock; } // read the strings @@ -2004,7 +2158,7 @@ String[] useringroup = new String[2]; useringroup[0] = result.getString(1); useringroup[1] = result.getString(2); - + usersByGroup.add( useringroup ); } catch (SQLException ignore) @@ -2014,22 +2168,22 @@ return null; } }); - + return usersByGroup; } /** * {@inheritDoc} - */ + */ public Map getUserCountIsAllowed(String function, Collection azGroups) { final Map userCountByGroup = new HashMap(); - + if ((function == null) || (azGroups != null && azGroups.isEmpty())) return userCountByGroup; - + String sql; Object[] fields; - + if (azGroups != null) { sql = dbAuthzGroupSql.getSelectRealmRoleGroupUserCountSql(orInClause(azGroups.size(), "REALM_ID")); fields = new Object[azGroups.size() + 1]; @@ -2040,7 +2194,7 @@ { String roleRealm = (String) i.next(); fields[pos++] = roleRealm; - } + } } else { sql = dbAuthzGroupSql.getSelectRealmRoleGroupUserCountSql("true"); fields = new Object[1]; @@ -2065,11 +2219,11 @@ return null; } }); - + return userCountByGroup; } - + /** * {@inheritDoc} */ @@ -2144,7 +2298,7 @@ else { existing.put(rar.realmId, rar.role); - + // Record inactive status if (!rar.active) { providedInactive.put(rar.realmId, rar.role); @@ -2256,7 +2410,7 @@ if (providedInactive.get(realmId) != null) { active = false; } - + toInsert.add(new RealmAndRole(realmId, role, active, true)); } } @@ -2299,14 +2453,14 @@ M_log.debug("refreshAuthzGroup()"); if ((realm == null) || (m_provider == null)) return; - boolean synchWithContainingRealm = serverConfigurationService().getBoolean("authz.synchWithContainingRealm", true); - + boolean synchWithContainingRealm = serverConfigurationService().getBoolean("authz.synchWithContainingRealm", true); + // check to see whether this is of group realm or not // if of Group Realm, get the containing Site Realm String containingRealmId = null; AuthzGroup containingRealm = null; Reference ref = entityManager.newReference(realm.getId()); - if (SiteService.APPLICATION_ID.equals(ref.getType()) + if (SiteService.APPLICATION_ID.equals(ref.getType()) && SiteService.GROUP_SUBTYPE.equals(ref.getSubType())) { containingRealmId = ref.getContainer(); @@ -2323,7 +2477,7 @@ M_log.warn("refreshAuthzGroup: cannot find containing realm for id: " + containingRealmRef); } } - + String sql = ""; // Note: the realm is still lazy - we have the realm id but don't need to worry about changing grants @@ -2354,7 +2508,7 @@ else { existing.put(uar.userId, uar.role); - + // Record inactive status if (!uar.active) { providedInactive.put(uar.userId, uar.role); @@ -2412,7 +2566,7 @@ boolean active = true; String existingRole = (String) existing.get(userId); String nonProviderRole = (String) nonProvider.get(userId); - + if (!synchWithContainingRealm) { if ((nonProviderRole == null) && ((existingRole == null) || (!existingRole.equals(role)))) @@ -2421,7 +2575,7 @@ if (providedInactive.get(userId) != null) { active = false; } - + // this is either at site level or at the group level but no need to synchronize toInsert.add(new UserAndRole(userId, role, active, true)); } @@ -2437,7 +2591,7 @@ boolean cMemberActive = cMember.isActive(); // synchronize with parent realm role definition and active status toInsert.add(new UserAndRole(userId, cMemberRoleId, cMemberActive, cMember.isProvided())); - + if ((existingRole != null && !existingRole.equals(cMemberRoleId)) // overriding existing authz group role ||!role.equals(cMemberRoleId)) // overriding provided role { @@ -2452,7 +2606,7 @@ } else { - // this is either at site level + // this is either at site level toInsert.add(new UserAndRole(userId, role, active, true)); } } @@ -2475,18 +2629,18 @@ { String userEid = userDirectoryService().getUserEid(userId); String targetRole = (String) target.get(userEid); - + if (role.equals(targetRole)) { // remove from non-provided and add as provided toDelete.add(userId); - + // Check whether this user was inactive in the site previously, if so preserve status boolean active = true; if (providedInactive.get(userId) != null) { active = false; } - + toInsert.add(new UserAndRole(userId, role, active, true)); } } @@ -2494,10 +2648,10 @@ { M_log.warn("refreshAuthzGroup: cannot find eid for user: " + userId); } - + } } - + // if any, do it if ((toDelete.size() > 0) || (toInsert.size() > 0)) { @@ -2561,6 +2715,164 @@ return grants; } + /** + * {@inheritDoc} + */ + public String getUserRole(String userId, String azGroupId) + { + if ((userId == null) || (azGroupId == null)) return null; + + // checks to see if the user is the current user and has the roleswap variable set in the session + String rv = null; + + if (userId.equals(sessionManager().getCurrentSessionUserId())) { + rv = securityService().getUserEffectiveRole(azGroupId); + } + + // otherwise drop through to the usual check + if (rv == null) { + String sql = dbAuthzGroupSql.getSelectRealmRoleNameSql(); + Object[] fields = new Object[2]; + fields[0] = azGroupId; + fields[1] = userId; + + // read the string + List results = m_sql.dbRead(sql, fields, null); + + // prepare the return + if ((results != null) && (!results.isEmpty())) + { + rv = (String) results.get(0); + if (results.size() > 1) + { + M_log.warn("getUserRole: user: " + userId + " multiple roles"); + } + } + } + + return rv; + } + + /** + * {@inheritDoc} + */ + public Map getUserRoles(String userId, Collection azGroupIds) + { + final HashMap rv = new HashMap(); + if (userId == null || "".equals(userId)) + return rv; + + String inClause; + int azgCount = azGroupIds == null ? 0 : azGroupIds.size(); + if (azgCount == 0) { + inClause = " 1=1 "; + } + else { + inClause = orInClause(azgCount, "REALM_ID"); + } + + String sql = dbAuthzGroupSql.getSelectRealmRolesSql(inClause); + Object[] fields = new Object[1 + azgCount]; + fields[0] = userId; + if (azgCount > 0) { + int pos = 1; + for (String s : azGroupIds) { + fields[pos++] = s; + } + } + + m_sql.dbRead(sql, fields, new SqlReader() + { + public Object readSqlResultRecord(ResultSet result) + { + try + { + String realmId = result.getString(1); + String roleName = result.getString(2); + + // ignore if we get an unexpected null -- it's useless to us + if ((realmId != null) && (roleName != null)) + { + rv.put(realmId, roleName); + } + } + catch (Exception t) + { + M_log.warn("Serious database error occurred reading result set", t); + } + + return null; + } + }); + + return rv; + } + + /** + * {@inheritDoc} + */ + public Map getUsersRole(Collection userIds, String azGroupId) + { + if ((userIds == null) || (userIds.isEmpty()) || (azGroupId == null)) + { + return new HashMap(); + } + + String inClause = orInClause(userIds.size(), "SRRG.USER_ID"); + String sql = dbAuthzGroupSql.getSelectRealmUserRoleSql(inClause); + Object[] fields = new Object[1 + userIds.size()]; + fields[0] = azGroupId; + int pos = 1; + for (Iterator i = userIds.iterator(); i.hasNext();) + { + fields[pos++] = i.next(); + } + + // the return + final Map rv = new HashMap(); + + // read + m_sql.dbRead(sql, fields, new SqlReader() + { + public Object readSqlResultRecord(ResultSet result) + { + try + { + // read the results + String userId = result.getString(1); + String role = result.getString(2); + + if ((userId != null) && (role != null)) + { + rv.put(userId, role); + } + } + catch (Exception t) + { + } + + return null; + } + }); + + return rv; + } + + public Set getMaintainRoles(){ + + Set maintainRoles = null; + + if (maintainRolesCache != null && maintainRolesCache.containsKey("maintainRoles")) { + maintainRoles = (Set) maintainRolesCache.get("maintainRoles"); + } else { + String sql = dbAuthzGroupSql.getMaintainRolesSql(); + maintainRoles = new HashSet(m_sql.dbRead(sql)); + maintainRolesCache.put("maintainRoles", maintainRoles); + } + + return maintainRoles; + } + private class UserAndGroups { String user; @@ -2579,20 +2891,20 @@ if (query == null || query.size() < 1) return; total++; Long queryHash = computeRealmQueryHash(query); - + if (queryHash != null) { if (result == null) result = Collections.emptyList(); realmsQuery.put(queryHash, result); } } - + List getRealmQuery(Set query) { if (query == null || query.size() < 1) return null; List result = null; - + total++; Long queryHash = computeRealmQueryHash(query); - + if (queryHash != null) { if (realmsQuery.containsKey(queryHash)) { result = realmsQuery.get(queryHash); @@ -2603,22 +2915,22 @@ } Long computeRealmQueryHash(Set query) { - + if (query == null || query.size() == 0) return null; - + long hash = 0; for (String q : query) { hash += q.hashCode(); } - + return Long.valueOf(hash); } - + @Override public int hashCode() { return user.hashCode(); } - + @Override public boolean equals(Object obj) { if (obj == null) return false; @@ -2640,7 +2952,7 @@ " size=" + realmsQuery.size() + ", total=" + total + ", hits=" + hit + ", hit ratio=" + (hit * 100) / (float) total; } } - + public class RealmAndProvider { public Integer realmId; @@ -2787,370 +3099,39 @@ } } - /** - * {@inheritDoc} - */ - public String getUserRole(String userId, String azGroupId) - { - if ((userId == null) || (azGroupId == null)) return null; - - // checks to see if the user is the current user and has the roleswap variable set in the session - String rv = null; - - if (userId.equals(sessionManager().getCurrentSessionUserId())) { - rv = securityService().getUserEffectiveRole(azGroupId); - } - - // otherwise drop through to the usual check - if (rv == null) { - String sql = dbAuthzGroupSql.getSelectRealmRoleNameSql(); - Object[] fields = new Object[2]; - fields[0] = azGroupId; - fields[1] = userId; - - // read the string - List results = m_sql.dbRead(sql, fields, null); - - // prepare the return - if ((results != null) && (!results.isEmpty())) - { - rv = (String) results.get(0); - if (results.size() > 1) - { - M_log.warn("getUserRole: user: " + userId + " multiple roles"); - } - } - } - - return rv; - } - - /** - * {@inheritDoc} - */ - public Map getUserRoles(String userId, Collection azGroupIds) - { - final HashMap rv = new HashMap(); - if (userId == null || "".equals(userId)) - return rv; - - String inClause; - int azgCount = azGroupIds == null ? 0 : azGroupIds.size(); - if (azgCount == 0) { - inClause = " 1=1 "; - } - else { - inClause = orInClause(azgCount, "REALM_ID"); - } - - String sql = dbAuthzGroupSql.getSelectRealmRolesSql(inClause); - Object[] fields = new Object[1 + azgCount]; - fields[0] = userId; - if (azgCount > 0) { - int pos = 1; - for (String s : azGroupIds) { - fields[pos++] = s; - } - } - - m_sql.dbRead(sql, fields, new SqlReader() - { - public Object readSqlResultRecord(ResultSet result) - { - try - { - String realmId = result.getString(1); - String roleName = result.getString(2); - - // ignore if we get an unexpected null -- it's useless to us - if ((realmId != null) && (roleName != null)) - { - rv.put(realmId, roleName); - } - } - catch (Exception t) - { - M_log.warn("Serious database error occurred reading result set", t); - } - - return null; - } - }); - - return rv; - } - - /** - * {@inheritDoc} - */ - public Map getUsersRole(Collection userIds, String azGroupId) - { - if ((userIds == null) || (userIds.isEmpty()) || (azGroupId == null)) - { - return new HashMap(); - } - - String inClause = orInClause(userIds.size(), "SRRG.USER_ID"); - String sql = dbAuthzGroupSql.getSelectRealmUserRoleSql(inClause); - Object[] fields = new Object[1 + userIds.size()]; - fields[0] = azGroupId; - int pos = 1; - for (Iterator i = userIds.iterator(); i.hasNext();) - { - fields[pos++] = i.next(); - } - - // the return - final Map rv = new HashMap(); - - // read - m_sql.dbRead(sql, fields, new SqlReader() - { - public Object readSqlResultRecord(ResultSet result) - { - try - { - // read the results - String userId = result.getString(1); - String role = result.getString(2); - - if ((userId != null) && (role != null)) - { - rv.put(userId, role); - } - } - catch (Exception t) - { - } - - return null; - } - }); - - return rv; - } - - public Set getMaintainRoles(){ - - Set maintainRoles = null; - - if (maintainRolesCache != null && maintainRolesCache.containsKey("maintainRoles")) { - maintainRoles = (Set) maintainRolesCache.get("maintainRoles"); - } else { - String sql = dbAuthzGroupSql.getMaintainRolesSql(); - maintainRoles = new HashSet(m_sql.dbRead(sql)); - maintainRolesCache.put("maintainRoles", maintainRoles); - } - - return maintainRoles; - } - } // DbStorage - /** To avoide the dreaded ORA-01795 and the like, we need to limit to <100 the items in each in(?, ?, ...) clause, connecting them with ORs. */ - protected final static int MAX_IN_CLAUSE = 99; - - /** - * Form a SQL IN() clause, but break it up with ORs to keep the size of each IN below 100 - * - * @param size - * The size - * @param field - * The field name - * @return a SQL IN() with ORs clause this large. - */ - protected String orInClause(int size, String field) - { - // Note: to avoide the dreaded ORA-01795 and the like, we need to limit to <100 the items in each in(?, ?, ...) clause, connecting them with - // ORs -ggolden - int ors = size / MAX_IN_CLAUSE; - int leftover = size - (ors * MAX_IN_CLAUSE); - StringBuilder buf = new StringBuilder(); - - // enclose them all in parens if we have > 1 - if (ors > 0) - { - buf.append(" ("); - } - - buf.append(" " + field + " IN "); - - // do all the full MAX_IN_CLAUSE '?' in/ors - if (ors > 0) - { - for (int i = 0; i < ors; i++) - { - buf.append("(?"); - for (int j = 1; j < MAX_IN_CLAUSE; j++) - { - buf.append(",?"); - } - buf.append(")"); - - if (i < ors - 1) - { - buf.append(" OR " + field + " IN "); - } - } - } - - // add one more for the extra - if (leftover > 0) - { - if (ors > 0) - { - buf.append(" OR " + field + " IN "); - } - buf.append("(?"); - for (int i = 1; i < leftover; i++) - { - buf.append(",?"); - } - buf.append(")"); - } - - // enclose them all in parens if we have > 1 - if (ors > 0) - { - buf.append(" )"); - } - - return buf.toString(); - } - - /** - * Get value for query & return that; needed for mssql which doesn't support select stmts in VALUES clauses - * Note that MSSQL support was removed in KNL-880, so this is a no-op. - * - * @param sqlQuery - * @param bindParameter - * @return value if mssql, bindparameter if not (basically a no-op for others) - */ - protected Object getValueForSubquery(String sqlQuery, Object bindParameter) - { - return bindParameter; - } - - private String getRealmRoleKey(String roleName) { - Iterator itr = m_roleNameCache.iterator(); - while (itr.hasNext()) { - RealmRole realmRole = (RealmRole) itr.next(); - if (realmRole != null && realmRole.getName().equals(roleName)) { - return realmRole.getKey(); - } - } - return null; - } - class RealmRole implements Comparable{ private String name; private String key; - + RealmRole(String name) { this.name = name; } - + RealmRole(String name, String key) { this.name = name; this.key = key; } - + public String getName() { return name; } - + public void setName(String name) { this.name = name; } - + public String getKey() { return key; } - + public void setKey(String key) { this.key = key; } - + public int compareTo(RealmRole realmRole) { return this.name.compareToIgnoreCase(realmRole.name); } } - - - public void update(Observable arg0, Object arg) { - if (arg == null || !(arg instanceof Event)) - return; - Event event = (Event) arg; - - // check the event function against the functions we have notifications watching for - String function = event.getEvent(); - if (SECURE_UPDATE_AUTHZ_GROUP.equals(function) - || SECURE_UPDATE_OWN_AUTHZ_GROUP.equals(function) - || SECURE_REMOVE_AUTHZ_GROUP.equals(function) - || SECURE_JOIN_AUTHZ_GROUP.equals(function) - || SECURE_UNJOIN_AUTHZ_GROUP.equals(function) - || SECURE_ADD_AUTHZ_GROUP.equals(function)) { - // Get the resource ID - String realmId = extractEntityId(event.getResource()); - - if (realmId != null) { - for (String user : getAuthzUsersInGroups(new HashSet(Arrays.asList(realmId)))) { - authzUserGroupIdsCache.remove(user); - } - if (serverConfigurationService().getBoolean("authz.cacheGrants", true)) { - if (M_log.isDebugEnabled()) { - M_log.debug("DbAuthzGroupService update(): clear realm role cache for " + realmId); - } - - m_realmRoleGRCache.remove(realmId); - } - } else { - // This should never happen as the events we generate should always have - // a /realm/ prefix on the resource. - M_log.warn("DBAuthzGroupService update(): failed to extract realm ID from "+ event.getResource()); - } - } - - - } - - /** - * based on value from RealmRoleGroupCache - * transform a Map object into a Map object - * KNL-1037 - */ - private Map getMemberMap(Map mMap, Map roleMap) - { - Map rv = new HashMap(); - for (Map.Entry entry : mMap.entrySet()) - { - String userId = entry.getKey(); - MemberWithRoleId m = entry.getValue(); - String roleId = m.getRoleId(); - if (roleId != null && roleMap != null && roleMap.containsKey(roleId)) - { - Role role = (Role) roleMap.get(roleId); - rv.put(userId, new BaseMember(role, m.isActive(), m.isProvided(), userId, userDirectoryService())); - } - } - return rv; - } - - - /** - * transform a Map object into a Map object - * to be used in RealmRoleGroupCache - * KNL-1037 - */ - private Map getMemberWithRoleIdMap(Map userGrants) - { - Map rv = new HashMap(); - for (Map.Entry entry : userGrants.entrySet()) - { - String userId = entry.getKey(); - Member member = entry.getValue(); - rv.put(userId, new MemberWithRoleId(member)); - } - return rv; - } } Index: kernel-impl/src/main/java/org/sakaiproject/event/impl/BaseEventTrackingService.java =================================================================== --- kernel-impl/src/main/java/org/sakaiproject/event/impl/BaseEventTrackingService.java (revision 308077) +++ kernel-impl/src/main/java/org/sakaiproject/event/impl/BaseEventTrackingService.java (working copy) @@ -21,30 +21,24 @@ package org.sakaiproject.event.impl; -import java.io.Serializable; -import java.util.Date; -import java.util.Observable; -import java.util.Observer; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.sakaiproject.authz.api.SecurityAdvisor; import org.sakaiproject.authz.api.SecurityService; -import org.sakaiproject.event.api.Event; -import org.sakaiproject.event.api.EventDelayHandler; -import org.sakaiproject.event.api.EventTrackingService; -import org.sakaiproject.event.api.NotificationService; +import org.sakaiproject.entity.api.EntityManager; import org.sakaiproject.entity.api.Reference; -import org.sakaiproject.entity.api.EntityManager; -import org.sakaiproject.event.api.UsageSession; -import org.sakaiproject.event.api.UsageSessionService; +import org.sakaiproject.event.api.*; import org.sakaiproject.time.api.Time; import org.sakaiproject.time.api.TimeService; -import org.sakaiproject.user.api.User; import org.sakaiproject.tool.api.Placement; import org.sakaiproject.tool.api.SessionManager; import org.sakaiproject.tool.api.ToolManager; +import org.sakaiproject.user.api.User; +import java.util.Date; +import java.util.Observable; +import java.util.Observer; + /** *

* BaseEventTrackingService is the base implmentation for the EventTracking service. @@ -71,23 +65,8 @@ *********************************************************************************************************************************************************************************************************************************************************/ /** - * Extend Observable to "public"ize setChanges, so we can set it. Why a helper object? Cause the service (which is observable) already 'extends' TurbineBaseService, and cannot also 'extend' Observable. - */ - protected class MyObservable extends Observable - { - public void setChanged() - { - super.setChanged(); - } - } - - /********************************************************************************************************************************************************************************************************************************************************** - * Event post / flow - override - *********************************************************************************************************************************************************************************************************************************************************/ - - /** * Cause this new event to get to wherever it has to go for persistence, etc. - * + * * @param event * The new event to post. */ @@ -94,12 +73,12 @@ protected abstract void postEvent(Event event); /********************************************************************************************************************************************************************************************************************************************************** - * Observer notification + * Event post / flow - override *********************************************************************************************************************************************************************************************************************************************************/ /** * Send notification about a new event to observers. - * + * * @param event * The event to send notification about. * @param local @@ -128,7 +107,7 @@ } /********************************************************************************************************************************************************************************************************************************************************** - * Dependencies + * Observer notification *********************************************************************************************************************************************************************************************************************************************************/ /** @@ -136,6 +115,10 @@ */ protected abstract UsageSessionService usageSessionService(); + /********************************************************************************************************************************************************************************************************************************************************** + * Dependencies + *********************************************************************************************************************************************************************************************************************************************************/ + /** * @return the SessionManager collaborator. */ @@ -161,10 +144,6 @@ */ protected abstract TimeService timeService(); - /********************************************************************************************************************************************************************************************************************************************************** - * Init and Destroy - *********************************************************************************************************************************************************************************************************************************************************/ - /** * Final initialization, once all dependencies are set. */ @@ -173,6 +152,10 @@ M_log.info(this + ".init()"); } + /********************************************************************************************************************************************************************************************************************************************************** + * Init and Destroy + *********************************************************************************************************************************************************************************************************************************************************/ + /** * Final cleanup. */ @@ -193,7 +176,7 @@ /** * Construct a Event object. - * + * * @param event * The Event id. * @param resource @@ -209,7 +192,7 @@ /** * Construct a Event object. - * + * * @param event * The Event id. * @param resource @@ -227,7 +210,7 @@ /** * Construct a Event object. - * + * * @param event * The Event id. * @param resource @@ -247,7 +230,7 @@ /** * Post an event - * + * * @param event * The event object (created with newEvent()). Note: the current session user will be used as the user responsible for the event. */ @@ -278,7 +261,7 @@ /** * Post an event on behalf of a user's session - * + * * @param event * The event object (created with newEvent()). * @param session @@ -297,7 +280,7 @@ /** * Post an event on behalf of a user. - * + * * @param event * The event object (created with newEvent()). * @param user @@ -339,14 +322,14 @@ if (delayHandler != null) { // Make sure there is a userid associated with the event - + String id = event.getUserId(); - - if (id == null) + + if (id == null) { id = sessionManager().getCurrentSessionUserId(); } - + if (id == null) { id = "?"; @@ -381,11 +364,11 @@ /** * Ensure that the provided event is an instance of BaseEvent. If not, create a new BaseEvent * and transfer state. - * + * * @param e * @return */ - private BaseEvent ensureBaseEvent(Event e) + protected BaseEvent ensureBaseEvent(Event e) { BaseEvent event = null; if (e instanceof BaseEvent) @@ -405,7 +388,7 @@ * Refired events can occur under a different user and session than was originally available. * To make sure permission exceptions aren't falsely encountered, a security advisor should be * pushed on the stack to recreate the correct environment for security checks. - * + * * @param userId */ private SecurityAdvisor newResourceAdvisor(final String eventUserId) @@ -426,7 +409,7 @@ /** * Add an observer of events. The observer will be notified whenever there are new events. - * + * * @param observer * The class observing. */ @@ -441,7 +424,7 @@ /** * Add an observer of events. The observer will be notified whenever there are new events. Priority observers get notified first, before normal observers. - * + * * @param observer * The class observing. */ @@ -456,7 +439,7 @@ /** * Add an observer of events. The observer will be notified whenever there are new events. Local observers get notified only of event generated on this application server, not on those generated elsewhere. - * + * * @param observer * The class observing. */ @@ -471,7 +454,7 @@ /** * Delete an observer of events. - * + * * @param observer * The class observing to delete. */ @@ -482,6 +465,17 @@ m_localObservableHelper.deleteObserver(observer); } + /** + * Extend Observable to "public"ize setChanges, so we can set it. Why a helper object? Cause the service (which is observable) already 'extends' TurbineBaseService, and cannot also 'extend' Observable. + */ + protected class MyObservable extends Observable + { + public void setChanged() + { + super.setChanged(); + } + } + /********************************************************************************************************************************************************************************************************************************************************** * Event implementation *********************************************************************************************************************************************************************************************************************************************************/ @@ -494,7 +488,7 @@ * Event objects are posted to the EventTracking service, and may be listened for. *

*/ - protected class BaseEvent implements Event, Serializable + protected class BaseEvent implements Event { /** * Be a good Serializable citizen @@ -526,82 +520,11 @@ protected int m_priority = NotificationService.NOTI_OPTIONAL; /** Event creation time. */ - protected Time m_time = null; + protected Date m_time = null; /** - * Access the event id string - * - * @return The event id string. - */ - public String getEvent() - { - return m_id; - } - - /** - * Access the resource reference. - * - * @return The resource reference string. - */ - public String getResource() - { - return m_resource; - } - - /** - * Access the resource reference. - * - * @return The resource reference string. - */ - public String getContext() - { - return m_context; - } - - - /** - * Access the UsageSession id. If null, check for a User id. - * - * @return The UsageSession id string. - */ - public String getSessionId() - { - return m_session; - } - - /** - * Access the User id. If null, check for a session id. - * - * @return The User id string. - */ - public String getUserId() - { - return m_user; - } - - /** - * Is this event one that caused a modify to the resource, or just an access. - * - * @return true if the event caused a modify to the resource, false if it was just an access. - */ - public boolean getModify() - { - return m_modify; - } - - /** - * Access the event's notification priority. - * - * @return The event's notification priority. - */ - public int getPriority() - { - return m_priority; - } - - /** * Construct - * + * * @param event * The Event id. * @param resource @@ -618,7 +541,7 @@ m_modify = modify; m_priority = priority; - // Find the context using the reference (let the service that it belongs to parse it) + // Find the context using the reference (let the service that it belongs to parse it) if (resource != null && !"".equals(resource)) { Reference ref = entityManager().newReference(resource); if (ref != null) { @@ -625,8 +548,8 @@ m_context = ref.getContext(); } } - - // If we still need to find the context, try the tool placement + + // If we still need to find the context, try the tool placement if (m_context == null) { Placement placement = toolManager().getCurrentPlacement(); if (placement != null) { @@ -633,7 +556,7 @@ m_context = placement.getContext(); } } - + // KNL-997 String uId = sessionManager().getCurrentSessionUserId(); if (uId == null) @@ -641,13 +564,13 @@ uId = "?"; } setUserId(uId); - - + + } /** * Construct - * + * * @param event * The Event id. * @param resource @@ -666,7 +589,7 @@ m_modify = modify; m_priority = priority; m_context = context; - + // KNL-997 String uId = sessionManager().getCurrentSessionUserId(); if (uId == null) @@ -675,10 +598,10 @@ } setUserId(uId); } - + /** * Construct - * + * * @param seq * The event sequence number. * @param event @@ -695,18 +618,28 @@ this(event, resource, context, modify, priority); m_seq = seq; } + - public BaseEvent(long seq, String event, String resource, String context, boolean modify, int priority, Date eventDate) { this(event, resource, context, modify, priority); m_seq = seq; - m_time = timeService().newTime(eventDate.getTime()); + m_time = eventDate; } /** + * Access the event id string + * + * @return The event id string. + */ + public String getEvent() + { + return m_id; + } + + /** * Set the event id. - * + * * @param id * The event id string. */ @@ -723,8 +656,18 @@ } /** + * Access the resource reference. + * + * @return The resource reference string. + */ + public String getResource() + { + return m_resource; + } + + /** * Set the resource id. - * + * * @param id * The resource id string. */ @@ -741,8 +684,28 @@ } /** + * Access the resource reference. + * + * @return The resource reference string. + */ + public String getContext() + { + return m_context; + } + + /** + * Access the UsageSession id. If null, check for a User id. + * + * @return The UsageSession id string. + */ + public String getSessionId() + { + return m_session; + } + + /** * Set the session id. - * + * * @param id * The session id string. */ @@ -759,8 +722,18 @@ } /** + * Access the User id. If null, check for a session id. + * + * @return The User id string. + */ + public String getUserId() + { + return m_user; + } + + /** * Set the user id. - * + * * @param id * The user id string. */ @@ -777,6 +750,26 @@ } /** + * Is this event one that caused a modify to the resource, or just an access. + * + * @return true if the event caused a modify to the resource, false if it was just an access. + */ + public boolean getModify() + { + return m_modify; + } + + /** + * Access the event's notification priority. + * + * @return The event's notification priority. + */ + public int getPriority() + { + return m_priority; + } + + /** * @return A representation of this event's values as a string. */ public String toString() Index: kernel-impl/src/main/java/org/sakaiproject/event/impl/ClusterEventTracking.java =================================================================== --- kernel-impl/src/main/java/org/sakaiproject/event/impl/ClusterEventTracking.java (revision 308077) +++ kernel-impl/src/main/java/org/sakaiproject/event/impl/ClusterEventTracking.java (working copy) @@ -21,16 +21,6 @@ package org.sakaiproject.event.impl; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Collection; -import java.util.Date; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Vector; - import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -40,7 +30,15 @@ import org.sakaiproject.db.api.SqlService; import org.sakaiproject.event.api.Event; import org.sakaiproject.event.api.NotificationService; +import org.sakaiproject.event.api.SimpleEvent; +import org.sakaiproject.memory.api.Cache; +import org.sakaiproject.memory.api.MemoryService; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.*; + /** *

* ClusterEventTracking is the implmentation for the EventTracking service for use in a clustered multi-app server configuration.
@@ -50,16 +48,13 @@ public abstract class ClusterEventTracking extends BaseEventTrackingService implements Runnable { + /** String used to identify this service in the logs */ + protected static final String m_logId = "EventTracking: "; // see http://jira.sakaiproject.org/browse/SAK-3793 for more info about these numbers private static final long WARNING_SAFE_EVENTS_TABLE_SIZE = 18000000l; private static final long MAX_SAFE_EVENTS_TABLE_SIZE = 20000000l; - /** Our logger. */ private static Log M_log = LogFactory.getLog(ClusterEventTracking.class); - - /** String used to identify this service in the logs */ - protected static final String m_logId = "EventTracking: "; - /** The db event checker thread. */ protected Thread m_thread = null; @@ -73,11 +68,32 @@ /** Queue of events to write if we are batching. */ protected Collection m_eventQueue = null; + /** Unless false, check the db for events from the other cluster servers. */ + protected boolean m_checkDb = true; + /** If true, batch events for bulk write. */ + protected boolean m_batchWrite = true; + /** Configuration: to run the ddl on init or not. */ + protected boolean m_autoDdl = false; /************************************************************************************************************************************************* * Dependencies ************************************************************************************************************************************************/ + /** How long to wait between checks for new events from the db. */ + protected long m_period = 1000L * 5L; + /** contains a map of the database dependent handler. */ + protected Map databaseBeans; + /** contains database dependent code. */ + protected ClusterEventTrackingServiceSql clusterEventTrackingServiceSql; + /************************************************************************************************************************************************* + * Configuration + ************************************************************************************************************************************************/ + /** The events caches (ONLY used if enabled) - KNL-1184 */ + private Cache eventCache; + private Cache eventLastCache; + /** is caching enabled? - KNL-1184 */ + private boolean cachingEnabled; + /** * @return the MemoryService collaborator. */ @@ -88,16 +104,14 @@ */ protected abstract ServerConfigurationService serverConfigurationService(); - /************************************************************************************************************************************************* - * Configuration - ************************************************************************************************************************************************/ + /** + * @return the MemoryService collaborator. + */ + protected abstract MemoryService memoryService(); - /** Unless false, check the db for events from the other cluster servers. */ - protected boolean m_checkDb = true; - /** * Configuration: set the check-db. - * + * * @param value * The check-db value. */ @@ -112,12 +126,9 @@ } } - /** If true, batch events for bulk write. */ - protected boolean m_batchWrite = true; - /** * Configuration: set the batch writing flag. - * + * * @param value * The batch writing value. */ @@ -132,12 +143,9 @@ } } - /** Configuration: to run the ddl on init or not. */ - protected boolean m_autoDdl = false; - /** * Configuration: to run the ddl on init or not. - * + * * @param value * the auto ddl value. */ @@ -146,12 +154,9 @@ m_autoDdl = Boolean.valueOf(value).booleanValue(); } - /** How long to wait between checks for new events from the db. */ - protected long m_period = 1000L * 5L; - /** * Set the # seconds to wait between db checks for new events. - * + * * @param time * The # seconds to wait between db checks for new events. */ @@ -160,12 +165,6 @@ m_period = Integer.parseInt(time) * 1000L; } - /** contains a map of the database dependent handler. */ - protected Map databaseBeans; - - /** contains database dependent code. */ - protected ClusterEventTrackingServiceSql clusterEventTrackingServiceSql; - public void setDatabaseBeans(Map databaseBeans) { this.databaseBeans = databaseBeans; @@ -236,6 +235,8 @@ this.post(this.newEvent("server.start", serverConfigurationService().getString("version.sakai", "unknown") + "/" + serverConfigurationService().getString("version.service", "unknown"), false)); + // initialize the caching server, if enabled + initCacheServer(); } catch (Exception t) { @@ -296,7 +297,7 @@ protected void postEvent(Event event) { // mark the event time - ((BaseEvent) event).m_time = timeService().newTime(); + ((BaseEvent) event).m_time = new Date(); // notify locally generated events immediately - // they will not be process again when read back from the database @@ -342,13 +343,22 @@ Object fields[] = new Object[6]; bindValues(event, fields); - // process the insert - boolean ok = sqlService().dbWrite(conn, statement, fields); - if (!ok) - { - M_log.warn(this + ".writeEvent(): dbWrite failed: session: " + fields[3] + " event: " + event.toString()); - } - } + // process the insert + if (cachingEnabled) { + // if caching is enabled, get the last inserted id + Long eventId = sqlService().dbInsert(conn, statement, fields, "EVENT_ID"); + if (eventId != null) { + // write event to cache + writeEventToCluster(event, eventId); + } + } else { + boolean ok = sqlService().dbWrite(conn, statement, fields); + if (!ok) { + M_log.warn(this + ".writeEvent(): dbWrite failed: session: " + + fields[3] + " event: " + event.toString()); + } + } + } /** * Write a batch of events to the db @@ -373,9 +383,9 @@ // Note: investigate batch writing via the jdbc driver: make sure we can still use prepared statements (check out host arrays, too) // -ggolden - // common preparation for each insert - String statement = insertStatement(); - Object fields[] = new Object[6]; + // common preparation for each insert + String statement = insertStatement(); + Object fields[] = new Object[6]; // write all events for (Iterator i = events.iterator(); i.hasNext();) @@ -383,16 +393,31 @@ Event event = (Event) i.next(); bindValues(event, fields); - // process the insert - boolean ok = sqlService().dbWrite(conn, statement, fields); - if (!ok) - { - M_log.warn(this + ".writeBatchEvents(): dbWrite failed: session: " + fields[3] + " event: " + event.toString()); - } - } + // process the insert + if (cachingEnabled) { + conn = sqlService().borrowConnection(); + if (conn.getAutoCommit()) { + conn.setAutoCommit(false); + } + Long eventId = sqlService().dbInsert(conn, statement, fields, "EVENT_ID"); + if (eventId != null) { + // write event to cache + writeEventToCluster(event, eventId); + } + } else { + boolean ok = sqlService().dbWrite(conn, statement, fields); + if (!ok) { + M_log.warn(this + + ".writeBatchEvents(): dbWrite failed: session: " + + fields[3] + " event: " + event.toString()); + } + } + } // commit - conn.commit(); + if (!conn.isClosed()) { + conn.commit(); + } } catch (Exception e) { @@ -407,7 +432,7 @@ M_log.warn(this + ".writeBatchEvents, while rolling back: " + ee); } } - M_log.warn(this + ".writeBatchEvents: " + e); + M_log.warn(this + ".writeBatchEvents: " + e, e); } finally { @@ -415,7 +440,7 @@ { try { - if (conn.getAutoCommit() != wasCommit) + if (!conn.isClosed() && conn.getAutoCommit() != wasCommit) { conn.setAutoCommit(wasCommit); } @@ -422,7 +447,7 @@ } catch (Exception e) { - M_log.warn(this + ".writeBatchEvents, while setting auto commit: " + e); + M_log.warn(this + ".writeBatchEvents, while setting auto commit: " + e, e); } sqlService().returnConnection(conn); } @@ -553,81 +578,101 @@ Object[] fields = new Object[1]; fields[0] = Long.valueOf(m_lastEventSeq); - List events = sqlService().dbRead(statement, fields, new SqlReader() - { - public Object readSqlResultRecord(ResultSet result) - { - try - { - // read the Event - long id = result.getLong(1); - Date date = new Date(result.getTimestamp(2, sqlService().getCal()).getTime()); - String function = result.getString(3); - String ref = result.getString(4); - String session = result.getString(5); - String code = result.getString(6); - String context = result.getString(7); - String eventSessionServerId = result.getString(8); // may be null + List events = new ArrayList(); + if (cachingEnabled) { // KNL-1184 + // set to last event id processed + 1 since we've already processed the last event id + long beginEventId = m_lastEventSeq + 1; + // set m_lastEventSeq to latest key value in event cache + initLastEventIdInEventCache(); + // only process events if there are new ones + if (m_lastEventSeq >= beginEventId) { + for (long i = beginEventId; i <= m_lastEventSeq; i++) { + SimpleEvent event = (SimpleEvent) eventCache.get( String.valueOf(i) ); + if (event != null) { + boolean nonSessionEvent = (event.getServerId() == null || StringUtils.startsWith(event.getSessionId(), "~")); + String userId = null; + boolean skipIt = false; - // for each one (really, for the last one), update the last event seen seq number - if (id > m_lastEventSeq) - { - m_lastEventSeq = id; - } + if (nonSessionEvent) { + String[] parts = StringUtils.split(event.getSessionId(), "~"); + if (parts.length > 1) { + userId = parts[1]; + } - boolean nonSessionEvent = (eventSessionServerId == null || session.startsWith("~")); - String userId = null; - boolean skipIt = false; + // we skip this event if it came from our server + if (parts.length > 0) { + skipIt = serverId.equals(parts[0]); + } - if (nonSessionEvent) - { - String[] parts = StringUtils.split(session, "~"); - if (parts.length > 1) { - userId = parts[1]; + event.setUserId(userId); + } else { + skipIt = serverInstance.equals(event.getServerId()); + event.setSessionId(event.getSessionId()); + } + + // add event to list, only if it is not a local server event + if (!skipIt) { + events.add(event); + } + } + } + } + } else { + events = sqlService().dbRead(statement, fields, new SqlReader() { + public Object readSqlResultRecord(ResultSet result) { + try { + Long id = result.getLong(1); + Date date = new Date(result.getTimestamp(2, sqlService().getCal()).getTime()); + String function = result.getString(3); + String ref = result.getString(4); + String session = result.getString(5); + String code = result.getString(6); + String context = result.getString(7); + String eventSessionServerId = result.getString(8); // may be null + + if (id > m_lastEventSeq) { + m_lastEventSeq = id; } - // we skip this event if it came from our server - if (parts.length > 0) { - skipIt = serverId.equals(parts[0]); + boolean nonSessionEvent = (eventSessionServerId == null || session.startsWith("~")); + String userId = null; + boolean skipIt = false; + + if (nonSessionEvent) { + String[] parts = StringUtils.split(session, "~"); + if (parts.length > 1) { + userId = parts[1]; + } + + // we skip this event if it came from our server + if (parts.length > 0) { + skipIt = serverId.equals(parts[0]); + } + } else { + skipIt = serverInstance.equals(eventSessionServerId); } - } - // for session events, if the event is from this server instance, - // we have already processed it and can skip it here. - else - { - skipIt = serverInstance.equals(eventSessionServerId); - } + if (skipIt) { + return null; + } - if (skipIt) - { + // Note: events from outside the server don't need notification info, since notification is processed only on internal + // events -ggolden + BaseEvent event = new BaseEvent(id, function, ref, context, "m".equals(code), NotificationService.NOTI_NONE, date); + if (nonSessionEvent) { + event.setUserId(userId); + } else { + event.setSessionId(session); + } + return event; + } catch (Exception ignore) { return null; } - - // Note: events from outside the server don't need notification info, since notification is processed only on internal - // events -ggolden - BaseEvent event = new BaseEvent(id, function, ref, context, "m".equals(code), NotificationService.NOTI_NONE, date); - if (nonSessionEvent) - { - event.setUserId(userId); - } - else - { - event.setSessionId(session); - } - - return event; } - catch (SQLException ignore) - { - return null; - } - } - }); - + }); + } // for each new event found, notify observers - for (int i = 0; i < events.size(); i++) - { + for (int i = 0; i < events.size(); i++) { Event event = (Event) events.get(i); notifyObservers(event, false); } @@ -673,4 +718,63 @@ if (M_log.isDebugEnabled()) M_log.debug(this + " Starting (after) Event #: " + m_lastEventSeq); } + + /** + * KNL-1184 + * Initializes the events cache, if enabled + */ + private void initCacheServer() { + cachingEnabled = serverConfigurationService().getBoolean("cluster.cache.enabled", false); + if (cachingEnabled) { + eventCache = memoryService().newCache("org.sakaiproject.event.impl.ClusterEventTracking.eventsCache"); + /** + * This cache only needs to hold a single value, the last updated event id + */ + eventLastCache = memoryService().newCache("org.sakaiproject.event.impl.ClusterEventTracking.eventLastCache"); + } + } + + /** + * Finds the last event ID inserted into the event cache + * (tracked in another cache) + */ + private void initLastEventIdInEventCache() { + if (cachingEnabled) { + if (eventLastCache != null) { + Long last = (Long) eventLastCache.get("lastEventId"); + if (last != null) { + m_lastEventSeq = last; + } + } + } + } + + /** + * Writes an event to cache, if enabled + * + * @param event the event object + * @param eventId the id of the event object + */ + private void writeEventToCluster(Event event, Long eventId) { + if (cachingEnabled) { + if (eventCache != null) { + // store event as an element + BaseEvent baseEvent = ensureBaseEvent(event); + SimpleEvent simpleEvent = new SimpleEvent((Event) baseEvent, serverConfigurationService().getServerIdInstance()); + // add item to cache store + eventCache.put(String.valueOf(eventId), simpleEvent); + // update the last event id each time + eventLastCache.put("lastEventId", eventId); + } else { + if (M_log.isDebugEnabled()) { + M_log.info("Cannot store event to cache, event store not initialized."); + } + } + } else { + if (M_log.isDebugEnabled()) { + M_log.info("Cluster caching not enabled."); + } + } + } + } Index: kernel-impl/src/main/java/org/sakaiproject/event/impl/EventTrackingTest.java =================================================================== --- kernel-impl/src/main/java/org/sakaiproject/event/impl/EventTrackingTest.java (revision 308077) +++ kernel-impl/src/main/java/org/sakaiproject/event/impl/EventTrackingTest.java (working copy) @@ -26,6 +26,7 @@ import org.sakaiproject.db.api.SqlService; import org.sakaiproject.entity.api.EntityManager; import org.sakaiproject.event.api.UsageSessionService; +import org.sakaiproject.memory.api.MemoryService; import org.sakaiproject.time.api.TimeService; import org.sakaiproject.tool.api.SessionManager; import org.sakaiproject.tool.api.ToolManager; @@ -69,6 +70,11 @@ return null; } + @Override + protected MemoryService memoryService() { + return null; + } + /** * @return the TimeService collaborator. */ Index: kernel-impl/src/main/java/org/sakaiproject/event/impl/SessionServiceAdaptorTest.java =================================================================== --- kernel-impl/src/main/java/org/sakaiproject/event/impl/SessionServiceAdaptorTest.java (revision 308077) +++ kernel-impl/src/main/java/org/sakaiproject/event/impl/SessionServiceAdaptorTest.java (working copy) @@ -37,87 +37,129 @@ * SessionServiceAdaptorTest extends the db alias service providing the dependency injectors for testing. * *

*/ -public class SessionServiceAdaptorTest extends UsageSessionServiceAdaptor -{ - /** - * @return the TimeService collaborator. - */ - protected TimeService timeService() - { - return null; - } +@SuppressWarnings("unchecked") +public class SessionServiceAdaptorTest extends UsageSessionServiceAdaptor { - /** Dependency: SqlService. */ - /** - * @return the SqlService collaborator. - */ - protected SqlService sqlService() - { - return null; - } + TimeService timeService; + SqlService sqlService; + ServerConfigurationService serverConfigurationService; + ThreadLocalManager threadLocalManager; + SessionManager sessionManager; + IdManager idManager; + EventTrackingService eventTrackingService; + AuthzGroupService authzGroupService; + UserDirectoryService userDirectoryService; + MemoryService memoryService; - /** - * @return the ServerConfigurationService collaborator. - */ - protected ServerConfigurationService serverConfigurationService() - { - return null; - } + public void setTimeService(TimeService timeService) { + this.timeService = timeService; + } - /** - * @return the ThreadLocalManager collaborator. - */ - protected ThreadLocalManager threadLocalManager() - { - return null; - } + public void setSqlService(SqlService sqlService) { + this.sqlService = sqlService; + } - /** - * @return the SessionManager collaborator. - */ - protected SessionManager sessionManager() - { - return null; - } + public void setServerConfigurationService(ServerConfigurationService serverConfigurationService) { + this.serverConfigurationService = serverConfigurationService; + } - /** - * @return the IdManager collaborator. - */ - protected IdManager idManager() - { - return null; - } + public void setThreadLocalManager(ThreadLocalManager threadLocalManager) { + this.threadLocalManager = threadLocalManager; + } - /** - * @return the EventTrackingService collaborator. - */ - protected EventTrackingService eventTrackingService() - { - return null; - } + public void setSessionManager(SessionManager sessionManager) { + this.sessionManager = sessionManager; + } - /** - * @return the AuthzGroupService collaborator. - */ - protected AuthzGroupService authzGroupService() - { - return null; - } + public void setIdManager(IdManager idManager) { + this.idManager = idManager; + } - /** - * @return the UserDirectoryService collaborator. - */ - protected UserDirectoryService userDirectoryService() - { - return null; - } - - /** - * @return the MemoryService collaborator. - */ - protected MemoryService memoryService() - { - return null; - } + public void setEventTrackingService(EventTrackingService eventTrackingService) { + this.eventTrackingService = eventTrackingService; + } + public void setAuthzGroupService(AuthzGroupService authzGroupService) { + this.authzGroupService = authzGroupService; + } + + public void setUserDirectoryService(UserDirectoryService userDirectoryService) { + this.userDirectoryService = userDirectoryService; + } + + public void setMemoryService(MemoryService memoryService) { + this.memoryService = memoryService; + } + + /** + * @return the TimeService collaborator. + */ + protected TimeService timeService() { + return timeService; + } + + /** Dependency: SqlService. */ + /** + * @return the SqlService collaborator. + */ + protected SqlService sqlService() { + return sqlService; + } + + /** + * @return the ServerConfigurationService collaborator. + */ + protected ServerConfigurationService serverConfigurationService() { + return serverConfigurationService; + } + + /** + * @return the ThreadLocalManager collaborator. + */ + protected ThreadLocalManager threadLocalManager() { + return threadLocalManager; + } + + /** + * @return the SessionManager collaborator. + */ + protected SessionManager sessionManager() { + return sessionManager; + } + + /** + * @return the IdManager collaborator. + */ + protected IdManager idManager() { + return idManager; + } + + /** + * @return the EventTrackingService collaborator. + */ + protected EventTrackingService eventTrackingService() { + return eventTrackingService; + } + + /** + * @return the AuthzGroupService collaborator. + */ + protected AuthzGroupService authzGroupService() { + return authzGroupService; + } + + /** + * @return the UserDirectoryService collaborator. + */ + protected UserDirectoryService userDirectoryService() { + return userDirectoryService; + } + + /** + * @return the MemoryService collaborator. + */ + protected MemoryService memoryService() { + return memoryService; + } + } Index: kernel-impl/src/main/java/org/sakaiproject/memory/impl/SakaiCacheManagerFactoryBean.java =================================================================== --- kernel-impl/src/main/java/org/sakaiproject/memory/impl/SakaiCacheManagerFactoryBean.java (revision 0) +++ kernel-impl/src/main/java/org/sakaiproject/memory/impl/SakaiCacheManagerFactoryBean.java (working copy) @@ -0,0 +1,272 @@ +/****************************************************************************** + * $URL: https://source.sakaiproject.org/svn/master/trunk/header.java $ + * $Id: header.java 307632 2014-03-31 15:29:37Z azeckoski@unicon.net $ + ****************************************************************************** + * + * Copyright (c) 2003-2014 The Apereo Foundation + * + * Licensed under the Educational Community License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://opensource.org/licenses/ecl2 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *****************************************************************************/ + +package org.sakaiproject.memory.impl; + +import net.sf.ehcache.CacheException; +import net.sf.ehcache.CacheManager; +import net.sf.ehcache.config.*; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.sakaiproject.component.api.ServerConfigurationService; +import org.springframework.beans.factory.DisposableBean; +import org.springframework.beans.factory.FactoryBean; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.core.io.Resource; +import org.springframework.util.ClassUtils; +import org.springframework.util.ReflectionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.Method; + +/** + * NOTE: This file was modeled after org/springframework/cache/ehcache/EhCacheManagerFactoryBean.java from URL + * http://grepcode.com/file_/repo1.maven.org/maven2/org.springframework/spring-context-support/3.2.3.RELEASE/org/springframework/cache/ehcache/EhCacheManagerFactoryBean.java/?v=source + * + * @author rlong Bob Long rlong@unicon.net + * @author azeckoski Aaron Zeckoski azeckoski@unicon.net + * + * ORIGINAL JAVADOC BELOW: + * {@link FactoryBean} that exposes an EhCache {@link net.sf.ehcache.CacheManager} + * instance (independent or shared), configured from a specified config location. + * + *

If no config location is specified, a CacheManager will be configured from + * "ehcache.xml" in the root of the class path (that is, default EhCache initialization + * - as defined in the EhCache docs - will apply). + * + *

Setting up a separate EhCacheManagerFactoryBean is also advisable when using + * EhCacheFactoryBean, as it provides a (by default) independent CacheManager instance + * and cares for proper shutdown of the CacheManager. EhCacheManagerFactoryBean is + * also necessary for loading EhCache configuration from a non-default config location. + * + *

Note: As of Spring 3.0, Spring's EhCache support requires EhCache 1.3 or higher. + * As of Spring 3.2, we recommend using EhCache 2.1 or higher. + * + * @author Dmitriy Kopylenko + * @author Juergen Hoeller + */ +public class SakaiCacheManagerFactoryBean implements FactoryBean, InitializingBean, DisposableBean { + + // Check whether EhCache 2.1+ CacheManager.create(Configuration) method is available... + private static final Method createWithConfiguration = + ClassUtils.getMethodIfAvailable(CacheManager.class, "create", Configuration.class); + /** cache defaults **/ + private final static String DEFAULT_CACHE_SERVER_URL = "localhost:9510"; + private final static int DEFAULT_CACHE_TIMEOUT = 120; + private final static int DEFAULT_CACHE_MAX_OBJECTS = 10000000; + protected final Log logger = LogFactory.getLog(getClass()); + protected ServerConfigurationService serverConfigurationService; + private Resource configLocation; + private boolean shared = false; + private String cacheManagerName = "Sakai"; + private CacheManager cacheManager; + private Boolean cacheEnabled; + + public SakaiCacheManagerFactoryBean() { + } + + public SakaiCacheManagerFactoryBean(ServerConfigurationService serverConfigurationService) { + this.serverConfigurationService = serverConfigurationService; + this.cacheManagerName = "SakaiTest"; + try { + this.afterPropertiesSet(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Set the location of the EhCache config file. A typical value is "/WEB-INF/ehcache.xml". + *

Default is "ehcache.xml" in the root of the class path, or if not found, + * "ehcache-failsafe.xml" in the EhCache jar (default EhCache initialization). + * @see net.sf.ehcache.CacheManager#create(java.io.InputStream) + * @see net.sf.ehcache.CacheManager#CacheManager(java.io.InputStream) + */ + public void setConfigLocation(Resource configLocation) { + this.configLocation = configLocation; + } + + /** + * Set whether the EhCache CacheManager should be shared (as a singleton at the VM level) + * or independent (typically local within the application). Default is "false", creating + * an independent instance. + * @see net.sf.ehcache.CacheManager#create() + * @see net.sf.ehcache.CacheManager#CacheManager() + */ + public void setShared(boolean shared) { + this.shared = shared; + } + + /** + * Set the name of the EhCache CacheManager (if a specific name is desired). + * @see net.sf.ehcache.CacheManager#setName(String) + */ + public void setCacheManagerName(String cacheManagerName) { + this.cacheManagerName = cacheManagerName; + } + + /** + * creates a CacheConfiguration based on the cache name. Uses several properties out of sakai.properties: + *

    + *
  • cluster.cache.maxEntriesLocalHeap, defaults to 1000000
  • + *
  • cluster.cache.timeToIdle, defaults to 120
  • + *
  • cluster.cache.timeToLive, defaults to 120
  • + *
  • cluster.cache.maxEntriesLocalDisk, defaults to 1000000
  • + *
+ * @param clusterCacheName + * @return Terracotta cluster cache configuration + */ + private CacheConfiguration createClusterCacheConfiguration(String clusterCacheName) { + CacheConfiguration clusterCache = new CacheConfiguration( + clusterCacheName, + serverConfigurationService.getInt(clusterCacheName + ".cache.maxEntriesLocalHeap", DEFAULT_CACHE_MAX_OBJECTS)) + .eternal(false) + .timeToIdleSeconds(serverConfigurationService.getInt(clusterCacheName + ".cache.timeToIdle", DEFAULT_CACHE_TIMEOUT)) + .timeToLiveSeconds(serverConfigurationService.getInt(clusterCacheName + ".cache.timeToLive", DEFAULT_CACHE_TIMEOUT)) + .terracotta(new TerracottaConfiguration() + .nonstop(new NonstopConfiguration() + .timeoutBehavior(new TimeoutBehaviorConfiguration() + .type(TimeoutBehaviorConfiguration.LOCAL_READS_TYPE_NAME)) + .enabled(true))); + clusterCache.maxElementsOnDisk(serverConfigurationService.getInt(clusterCacheName + ".cache.maxEntriesLocalDisk", DEFAULT_CACHE_MAX_OBJECTS)); + return clusterCache; + } + + /** + * This is the init method + * If using Terracotta, enable caching via sakai.properties and ensure the Terracotta server is reachable + * Use '-Dcom.tc.tc.config.total.timeout=10000' to specify how long we should try to connect to the TC server + */ + public void afterPropertiesSet() throws IOException { + logger.info("Initializing EhCache CacheManager"); + InputStream is = (this.configLocation != null ? this.configLocation.getInputStream() : null); + if (this.cacheEnabled == null) { + this.cacheEnabled = serverConfigurationService.getBoolean("cluster.cache.enabled", false); + } + + try { + Configuration configuration = (is != null) ? ConfigurationFactory.parseConfiguration(is) : ConfigurationFactory.parseConfiguration(); + configuration.setName(this.cacheManagerName); + + // Setup the Terracotta cluster config + TerracottaClientConfiguration terracottaConfig = new TerracottaClientConfiguration(); + + // use Terracotta server if running and available + if (this.cacheEnabled) { + logger.info("Attempting to load cluster caching using Terracotta at: "+ + serverConfigurationService.getString("cluster.cache.server.url", DEFAULT_CACHE_SERVER_URL)+"."); + // set the URL to the server + String[] serverUrls = serverConfigurationService.getStrings("cluster.cache.server.urls"); + // create comma-separated string of URLs + String serverUrlsString = StringUtils.join(serverUrls, ","); + terracottaConfig.setUrl(serverUrlsString); + terracottaConfig.setRejoin(true); + configuration.addTerracottaConfig(terracottaConfig); + + // retrieve the names of all caches that will be managed by Terracotta and create cache configurations for them + String[] caches = serverConfigurationService.getStrings("cluster.cache.names"); + if (ArrayUtils.isNotEmpty(caches)) { + for (int i = 0; i < caches.length; i++) { + String cacheName = caches[i]; + CacheConfiguration cacheConfiguration = this.createClusterCacheConfiguration(cacheName); + if (cacheConfiguration != null) { + configuration.addCache(cacheConfiguration); + } + } + } + + // create new cache manager with the above configuration + if (this.shared) { + this.cacheManager = (CacheManager) ReflectionUtils.invokeMethod(createWithConfiguration, null, configuration); + } else { + this.cacheManager = new CacheManager(configuration); + } + } else { + // This block contains the original code from org/springframework/cache/ehcache/EhCacheManagerFactoryBean.java + // A bit convoluted for EhCache 1.x/2.0 compatibility. + // To be much simpler once we require EhCache 2.1+ + logger.info("Attempting to load default cluster caching."); + configuration.addTerracottaConfig(terracottaConfig); + if (this.cacheManagerName != null) { + if (this.shared && createWithConfiguration == null) { + // No CacheManager.create(Configuration) method available before EhCache 2.1; + // can only set CacheManager name after creation. + this.cacheManager = (is != null ? CacheManager.create(is) : CacheManager.create()); + this.cacheManager.setName(this.cacheManagerName); + } else { + configuration.setName(this.cacheManagerName); + if (this.shared) { + this.cacheManager = (CacheManager) ReflectionUtils.invokeMethod(createWithConfiguration, null, configuration); + } else { + this.cacheManager = new CacheManager(configuration); + } + } + } else if (this.shared) { + // For strict backwards compatibility: use simplest possible constructors... + this.cacheManager = (is != null ? CacheManager.create(is) : CacheManager.create()); + } else { + this.cacheManager = (is != null ? new CacheManager(is) : new CacheManager()); + } + } + } catch (CacheException ce) { + // this is thrown if we can't connect to the Terracotta server on initialization + if (this.cacheEnabled && this.cacheManager == null) { + logger.error("You have cluster caching enabled in sakai.properties, but do not have a Terracotta server running at "+ + serverConfigurationService.getString("cluster.cache.server.url", "localhost:9510")+ + ". Please ensure the server is running and available.", ce); + // use the default cache instead + this.cacheEnabled = false; + afterPropertiesSet(); + } else { + logger.error("An error occurred while creating the cache manager: ", ce); + } + } finally { + if (is != null) { + is.close(); + } + } + } + + public CacheManager getObject() { + return this.cacheManager; + } + + public Class getObjectType() { + return (this.cacheManager != null ? this.cacheManager.getClass() : CacheManager.class); + } + + public boolean isSingleton() { + return true; + } + + public void destroy() { + logger.info("Shutting down EhCache CacheManager"); + this.cacheManager.shutdown(); + } + + public void setServerConfigurationService(ServerConfigurationService serverConfigurationService) { + this.serverConfigurationService = serverConfigurationService; + } + +} Property changes on: kernel-impl/src/main/java/org/sakaiproject/memory/impl/SakaiCacheManagerFactoryBean.java ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property