diff -Naur Edit2/opennms-dao/src/main/java/org/opennms/netmgt/config/DatabaseSchemaConfigFactory.java Edit3/opennms-dao/src/main/java/org/opennms/netmgt/config/DatabaseSchemaConfigFactory.java --- Edit2/opennms-dao/src/main/java/org/opennms/netmgt/config/DatabaseSchemaConfigFactory.java 2008-01-18 13:57:33.000000000 -0500 +++ Edit3/opennms-dao/src/main/java/org/opennms/netmgt/config/DatabaseSchemaConfigFactory.java 2008-01-18 13:58:05.000000000 -0500 @@ -221,6 +221,42 @@ } /** + * + */ + private void finishConstruction() { + Table primary = getPrimaryTable(); + Set joinableSet = new HashSet(); + Map primaryJoins = new HashMap(); + joinableSet.add(primary.getName()); + int joinableCount = 0; + // loop until we stop adding entries to the set + while (joinableCount < joinableSet.size()) { + joinableCount = joinableSet.size(); + Set newSet = new HashSet(joinableSet); + Enumeration e = getDatabaseSchema().enumerateTable(); + // for each table not already in the set + while (e.hasMoreElements()) { + Table t = e.nextElement(); + if (!joinableSet.contains(t.getName()) && (t.getVisible() == null || t.getVisible().equalsIgnoreCase("true"))) { + Enumeration ejoin = t.enumerateJoin(); + // for each join does it join a table in the set? + while (ejoin.hasMoreElements()) { + Join j = ejoin.nextElement(); + if (joinableSet.contains(j.getTable())) { + newSet.add(t.getName()); + primaryJoins.put(t.getName(), j); + } + } + } + } + joinableSet = newSet; + } + // FIXME: m_joinable is never read + //m_joinable = Collections.synchronizedSet(joinableSet); + m_primaryJoins = Collections.synchronizedMap(primaryJoins); + } + + /** * Find a table using its name as the search key. * * @param name @@ -281,31 +317,6 @@ } /** - * Construct a joining expression necessary to join the given table to the - * primary table. - * - * @param t - * the table to create the expression for - * @return a string representing the joining expression or "" if no - * expression is found - */ - public String constructJoinExprForTable(Table t) { - StringBuffer buf = new StringBuffer(); - - // change this to use getPrimaryJoinsForTable - Join[] joins = getPrimaryJoinsForTable(t); - for (int i = 0; i < joins.length; i++) { - Join j = joins[i]; - if (i != 0) - buf.append(" AND "); - buf.append(i == 0 ? t.getName() : joins[i - 1].getTable()).append('.').append(j.getColumn()); - buf.append(" = "); - buf.append(j.getTable()).append('.').append(j.getTableColumn()); - } - return buf.toString(); - } - - /** * Returns an array of the names of tables involved in a join of the given * table and the primary. The tables are listing in order starting with * primary and moving 'toward' the given table. @@ -355,39 +366,28 @@ } /** + * Construct a joining expression necessary to join the given table to the + * primary table. * + * @param t + * the table to create the expression for + * @return a string representing the joining expression or "" if no + * expression is found */ - private void finishConstruction() { - Table primary = getPrimaryTable(); - Set joinableSet = new HashSet(); - Map primaryJoins = new HashMap(); - joinableSet.add(primary.getName()); - int joinableCount = 0; - // loop until we stop adding entries to the set - while (joinableCount < joinableSet.size()) { - joinableCount = joinableSet.size(); - Set newSet = new HashSet(joinableSet); - Enumeration
e = getDatabaseSchema().enumerateTable(); - // for each table not already in the set - while (e.hasMoreElements()) { - Table t = e.nextElement(); - if (!joinableSet.contains(t.getName()) && (t.getVisible() == null || t.getVisible().equalsIgnoreCase("true"))) { - Enumeration ejoin = t.enumerateJoin(); - // for each join does it join a table in the set? - while (ejoin.hasMoreElements()) { - Join j = ejoin.nextElement(); - if (joinableSet.contains(j.getTable())) { - newSet.add(t.getName()); - primaryJoins.put(t.getName(), j); - } - } - } - } - joinableSet = newSet; + public String constructJoinExprForTable(Table t) { + StringBuffer buf = new StringBuffer(); + + // change this to use getPrimaryJoinsForTable + Join[] joins = getPrimaryJoinsForTable(t); + for (int i = 0; i < joins.length; i++) { + Join j = joins[i]; + if (i != 0) + buf.append(" AND "); + buf.append(i == 0 ? t.getName() : joins[i - 1].getTable()).append('.').append(j.getColumn()); + buf.append(" = "); + buf.append(j.getTable()).append('.').append(j.getTableColumn()); } - // FIXME: m_joinable is never read - //m_joinable = Collections.synchronizedSet(joinableSet); - m_primaryJoins = Collections.synchronizedMap(primaryJoins); + return buf.toString(); } } diff -Naur Edit2/opennms-dao/src/main/java/org/opennms/netmgt/dao/support/JdbcFilterDao.java Edit3/opennms-dao/src/main/java/org/opennms/netmgt/dao/support/JdbcFilterDao.java --- Edit2/opennms-dao/src/main/java/org/opennms/netmgt/dao/support/JdbcFilterDao.java 2008-01-18 13:55:36.000000000 -0500 +++ Edit3/opennms-dao/src/main/java/org/opennms/netmgt/dao/support/JdbcFilterDao.java 2008-01-18 13:58:05.000000000 -0500 @@ -34,9 +34,57 @@ import org.springframework.util.Assert; public class JdbcFilterDao implements FilterDao, InitializingBean { - private NodeDao m_nodeDao; private DataSource m_dataSource; private DatabaseSchemaConfigFactory m_databaseSchemaConfigFactory; + private NodeDao m_nodeDao; + + public void setDataSource(DataSource dataSource) { + m_dataSource = dataSource; + } + + public DataSource getDataSource() { + return m_dataSource; + } + + public void setDatabaseSchemaConfigFactory(DatabaseSchemaConfigFactory factory) { + m_databaseSchemaConfigFactory = factory; + } + + public DatabaseSchemaConfigFactory getDatabaseSchemaConfigFactory() { + return m_databaseSchemaConfigFactory; + } + + public void afterPropertiesSet() { + Assert.state(m_dataSource != null, "property dataSource cannot be null"); + Assert.state(m_databaseSchemaConfigFactory != null, "property databaseSchemaConfigFactory cannot be null"); + } + + public void setNodeDao(NodeDao nodeDao) { + m_nodeDao = nodeDao; + } + + public NodeDao getNodeDao() { + return m_nodeDao; + } + + public void walkMatchingNodes(String rule, EntityVisitor visitor) { + Assert.state(m_nodeDao != null, "property nodeDao cannot be null"); + + SortedMap map; + try { + map = getNodeMap(rule); + } catch (FilterParseException e) { + throw new DataRetrievalFailureException("Could not parse rule '" + rule + "': " + e, e); + } + if (log().isDebugEnabled()) { + log().debug("got " + map.size() + " results"); + } + + for (Integer nodeId : map.keySet()) { + OnmsNode node = getNodeDao().load(nodeId); + visitor.visitNode(node); + } + } /** * This method returns a map of all nodeids and nodelabels that match @@ -108,39 +156,6 @@ return resultMap; } - private final Category log() { - return ThreadCategory.getInstance(getClass()); - } - - public void afterPropertiesSet() { - Assert.state(m_dataSource != null, "property dataSource cannot be null"); - Assert.state(m_databaseSchemaConfigFactory != null, "property databaseSchemaConfigFactory cannot be null"); - } - - public NodeDao getNodeDao() { - return m_nodeDao; - } - - public void setNodeDao(NodeDao nodeDao) { - m_nodeDao = nodeDao; - } - - public DataSource getDataSource() { - return m_dataSource; - } - - public void setDataSource(DataSource dataSource) { - m_dataSource = dataSource; - } - - public void setDatabaseSchemaConfigFactory(DatabaseSchemaConfigFactory factory) { - m_databaseSchemaConfigFactory = factory; - } - - public DatabaseSchemaConfigFactory getDatabaseSchemaConfigFactory() { - return m_databaseSchemaConfigFactory; - } - public Map> getIPServiceMap(String rule) { Map> ipServices = new TreeMap>(); @@ -287,77 +302,6 @@ } } - public void validateRule(String rule) throws FilterParseException { - getSQLStatement(rule); - } - - /** - * This method is used to parse and valiate a rule into its graph tree. If - * the parser cannot validate the rule then an exception is generated. - * - * @param rule - * The rule to parse. - * - * @throws FilterParseException - * Thrown if the rule cannot be parsed. - */ - private Start parseRule(String rule) throws FilterParseException { - if (rule != null && rule.length() > 0) { - try { - // Create a Parser instance. - Parser p = new Parser(new Lexer(new PushbackReader(new StringReader(rule)))); - - // Parse the input. - return p.parse(); - } catch (Exception e) { - log().error("Failed to parse the filter rule '" + rule + "': " + e, e); - throw new FilterParseException("Parse error in rule '" + rule + "': " + e, e); - } - } else { - throw new FilterParseException("Parse error: rule is null or empty"); - } - } - - /** - * This method parses a rule and returns the SQL select statement equivalent - * of the rule. - * - * @return the sql select statement - */ - protected String getSQLStatement(String rule) { - Start parseTree = parseRule(rule); - SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); - return translation.getStatement(); - } - - protected String getSQLStatement(String rule, long nodeId, String ipaddr, String service) { - Start parseTree = parseRule(rule); - SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); - translation.setConstraintTranslation(nodeId, ipaddr, service); - return translation.getStatement(); - } - - protected String getIPServiceMappingStatement(String rule) { - Start parseTree = parseRule(rule); - SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); - translation.setIPServiceMappingTranslation(); - return translation.getStatement(); - } - - protected String getNodeMappingStatement(String rule) { - Start parseTree = parseRule(rule); - SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); - translation.setNodeMappingTranslation(); - return translation.getStatement(); - } - - protected String getInterfaceWithServiceStatement(String rule) { - Start parseTree = parseRule(rule); - SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); - translation.setInterfaceWithServiceTranslation(); - return translation.getStatement(); - } - public boolean isRuleMatching(String rule) { Start parseTree = parseRule(rule); SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); @@ -411,22 +355,79 @@ } } - public void walkMatchingNodes(String rule, EntityVisitor visitor) { - Assert.state(m_nodeDao != null, "property nodeDao cannot be null"); + public void validateRule(String rule) throws FilterParseException { + getSQLStatement(rule); + } - SortedMap map; - try { - map = getNodeMap(rule); - } catch (FilterParseException e) { - throw new DataRetrievalFailureException("Could not parse rule '" + rule + "': " + e, e); - } - if (log().isDebugEnabled()) { - log().debug("got " + map.size() + " results"); - } + private final Category log() { + return ThreadCategory.getInstance(getClass()); + } - for (Integer nodeId : map.keySet()) { - OnmsNode node = getNodeDao().load(nodeId); - visitor.visitNode(node); + protected String getNodeMappingStatement(String rule) { + Start parseTree = parseRule(rule); + SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); + translation.setNodeMappingTranslation(); + return translation.getStatement(); + } + + protected String getIPServiceMappingStatement(String rule) { + Start parseTree = parseRule(rule); + SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); + translation.setIPServiceMappingTranslation(); + return translation.getStatement(); + } + + protected String getInterfaceWithServiceStatement(String rule) { + Start parseTree = parseRule(rule); + SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); + translation.setInterfaceWithServiceTranslation(); + return translation.getStatement(); + } + + /** + * This method parses a rule and returns the SQL select statement equivalent + * of the rule. + * + * @return the sql select statement + */ + protected String getSQLStatement(String rule) { + Start parseTree = parseRule(rule); + SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); + return translation.getStatement(); + } + + protected String getSQLStatement(String rule, long nodeId, String ipaddr, String service) { + Start parseTree = parseRule(rule); + SQLTranslation translation = new SQLTranslation(parseTree, getDatabaseSchemaConfigFactory()); + translation.setConstraintTranslation(nodeId, ipaddr, service); + return translation.getStatement(); + } + + /** + * This method is used to parse and valiate a rule into its graph tree. If + * the parser cannot validate the rule then an exception is generated. + * + * @param rule + * The rule to parse. + * + * @throws FilterParseException + * Thrown if the rule cannot be parsed. + */ + private Start parseRule(String rule) throws FilterParseException { + if (rule != null && rule.length() > 0) { + try { + // Create a Parser instance. + Parser p = new Parser(new Lexer(new PushbackReader(new StringReader(rule)))); + + // Parse the input. + return p.parse(); + } catch (Exception e) { + log().error("Failed to parse the filter rule '" + rule + "': " + e, e); + throw new FilterParseException("Parse error in rule '" + rule + "': " + e, e); + } + } else { + throw new FilterParseException("Parse error: rule is null or empty"); } } + } diff -Naur Edit2/opennms-dao/src/main/java/org/opennms/netmgt/filter/SQLTranslation.java Edit3/opennms-dao/src/main/java/org/opennms/netmgt/filter/SQLTranslation.java --- Edit2/opennms-dao/src/main/java/org/opennms/netmgt/filter/SQLTranslation.java 2008-01-18 13:57:33.000000000 -0500 +++ Edit3/opennms-dao/src/main/java/org/opennms/netmgt/filter/SQLTranslation.java 2008-01-18 14:02:28.000000000 -0500 @@ -108,9 +108,14 @@ public static final String VIRTUAL_CATINC_PREFIX = "catinc"; /** - * The list of tables required to create the approriate SQL statement + * The starting node of the parse tree */ - private List
m_tables; + private Start m_node; + + /** + * A modifier on the selectList (like 'DISTINCT') + */ + private String m_selectModifier; /** * The list of columns to be returned by the SQL. @@ -118,9 +123,9 @@ private List m_selectList; /** - * A modifier on the selectList (like 'DISTINCT') + * The list of tables required to create the approriate SQL statement */ - private String m_selectModifier; + private List
m_tables; /** * The from part of the clause @@ -138,14 +143,154 @@ private StringBuffer m_ipaddr; /** - * The starting node of the parse tree + * The limit count for the filter, or null if there is no limit. */ - private Start m_node; + private Integer m_limitCount = null; + + public SQLTranslation(Start node, DatabaseSchemaConfigFactory databaseSchemaConfigFactory) { + m_schemaFactory = databaseSchemaConfigFactory; + + m_node = node; + + m_selectList = new ArrayList(); + + m_from = new StringBuffer(" FROM "); + + m_where = new StringBuffer(" WHERE ("); + + m_tables = new ArrayList
(m_schemaFactory.getTableCount()); + setDefaultTranslation(); + } /** - * The limit count for the filter, or null if there is no limit. + * Initializes the pieces of the SQL statement to perform a default query to + * select distinct ip addresses based the query that is built from the rest + * of the rule. */ - private Integer m_limitCount = null; + public void setDefaultTranslation() { + + m_selectModifier = "DISTINCT"; + + m_selectList.clear(); + m_selectList.add(addColumnToStatement("ipAddr")); + } + + public void setNodeMappingTranslation() { + m_selectModifier = "DISTINCT"; + + m_selectList.clear(); + m_selectList.add(addColumnToStatement("nodeid")); + m_selectList.add(addColumnToStatement("nodelabel")); + + } + + public void setIPServiceMappingTranslation() { + m_selectModifier = ""; + + m_selectList.clear(); + m_selectList.add(addColumnToStatement("ipAddr")); + m_selectList.add(addColumnToStatement("serviceName")); + + } + + public void setInterfaceWithServiceTranslation() { + m_selectModifier = "DISTINCT"; + + m_selectList.clear(); + m_selectList.add(addColumnToStatement("ipAddr")); + m_selectList.add(addColumnToStatement("serviceName")); + m_selectList.add(addColumnToStatement("nodeID")); + } + + /** + * This method should be called if you want to put constraints on the node, + * interface or service that is returned in the rule. This is useful to see + * if a particular node, interface, or service matches in the rule, and is + * primarily used to filter notices. A subselect is built containing joins + * constrained by node, interface, and service if they are not null or + * blank. This select is then anded with the filter rule to get the complete + * SQL statement. + * + * @param nodeId + * a node id to constrain against + * @param ipaddr + * an ipaddress to constrain against + * @param service + * a service name to constrain against + */ + public void setConstraintTranslation(long nodeId, String ipaddr, String service) { + m_selectModifier = "DISTINCT"; + + m_selectList.clear(); + + String ipAddrColumn = addColumnToStatement("ipAddr"); + m_selectList.add(ipAddrColumn); + + StringBuffer constraint = new StringBuffer(); + boolean needAnd = false; + if (nodeId != 0) { + if (needAnd) + constraint.append(" AND "); + String nodeIDColumn = addColumnToStatement("nodeID"); + constraint.append(nodeIDColumn).append(" = ").append(nodeId); + needAnd = true; + } + + if (ipaddr != null && !ipaddr.equals("")) { + if (needAnd) + constraint.append(" AND "); + constraint.append(ipAddrColumn).append(" = '").append(ipaddr).append('\''); + needAnd = true; + } + + if (service != null && !service.equals("")) { + String serviceColumn = addColumnToStatement("serviceName"); + if (needAnd) + constraint.append(" AND "); + constraint.append(serviceColumn).append(" = '").append(service).append('\''); + needAnd = true; + } + + m_where.append(constraint).append(") AND ("); + + } + + /** + * Set a limit on the number of rows returned. + */ + public void setLimitCount(Integer count) { + m_limitCount = count; + } + + /** + * This method returns the complete sql statement for the filter that was + * parsed. The SQL statement is the result of the select, from, and where + * components assembled from the code. + * + */ + public String getStatement() { + // don't walk tree if there is no tree to walk + if (m_node == null) + return null; + + // this will walk the tree and build the rest of the sql statement + // + m_node.apply(this); + + return buildSelectClause() + m_from.toString() + m_where.toString() + + buildLimitClause(); + } + + private String buildSelectClause() { + StringBuffer clause = new StringBuffer("SELECT "); + clause.append(m_selectModifier).append(" "); + + for (int i = 0; i < m_selectList.size(); i++) { + clause.append((String) m_selectList.get(i)).append(i < m_selectList.size() - 1 ? ", " : ""); + } + + return clause.toString(); + } /** * This method is used to build the join condtions to be added to the where @@ -170,6 +315,14 @@ return joinBuf.toString(); } + private String buildLimitClause() { + if (m_limitCount != null) { + return " LIMIT " + m_limitCount; + } else { + return ""; + } + } + /** * Validate the identifier by ensuring it is references in the schema. Also * checks for 'virtual columns' be checking the prefix. If it is this turns @@ -322,128 +475,6 @@ } } - public SQLTranslation(Start node, DatabaseSchemaConfigFactory databaseSchemaConfigFactory) { - m_schemaFactory = databaseSchemaConfigFactory; - - m_node = node; - - m_selectList = new ArrayList(); - - m_from = new StringBuffer(" FROM "); - - m_where = new StringBuffer(" WHERE ("); - - m_tables = new ArrayList
(m_schemaFactory.getTableCount()); - setDefaultTranslation(); - } - - private String buildSelectClause() { - StringBuffer clause = new StringBuffer("SELECT "); - clause.append(m_selectModifier).append(" "); - - for (int i = 0; i < m_selectList.size(); i++) { - clause.append((String) m_selectList.get(i)).append(i < m_selectList.size() - 1 ? ", " : ""); - } - - return clause.toString(); - } - - /** - * Initializes the pieces of the SQL statement to perform a default query to - * select distinct ip addresses based the query that is built from the rest - * of the rule. - */ - public void setDefaultTranslation() { - m_selectModifier = "DISTINCT"; - - m_selectList.clear(); - m_selectList.add(addColumnToStatement("ipAddr")); - } - - public void setIPServiceMappingTranslation() { - m_selectModifier = ""; - - m_selectList.clear(); - m_selectList.add(addColumnToStatement("ipAddr")); - m_selectList.add(addColumnToStatement("serviceName")); - } - - public void setNodeMappingTranslation() { - m_selectModifier = "DISTINCT"; - - m_selectList.clear(); - m_selectList.add(addColumnToStatement("nodeid")); - m_selectList.add(addColumnToStatement("nodelabel")); - } - - /** - * This method should be called if you want to put constraints on the node, - * interface or service that is returned in the rule. This is useful to see - * if a particular node, interface, or service matches in the rule, and is - * primarily used to filter notices. A subselect is built containing joins - * constrained by node, interface, and service if they are not null or - * blank. This select is then anded with the filter rule to get the complete - * SQL statement. - * - * @param nodeId - * a node id to constrain against - * @param ipaddr - * an ipaddress to constrain against - * @param service - * a service name to constrain against - */ - public void setConstraintTranslation(long nodeId, String ipaddr, String service) { - m_selectModifier = "DISTINCT"; - - m_selectList.clear(); - - String ipAddrColumn = addColumnToStatement("ipAddr"); - m_selectList.add(ipAddrColumn); - - StringBuffer constraint = new StringBuffer(); - boolean needAnd = false; - if (nodeId != 0) { - if (needAnd) - constraint.append(" AND "); - String nodeIDColumn = addColumnToStatement("nodeID"); - constraint.append(nodeIDColumn).append(" = ").append(nodeId); - needAnd = true; - } - - if (ipaddr != null && !ipaddr.equals("")) { - if (needAnd) - constraint.append(" AND "); - constraint.append(ipAddrColumn).append(" = '").append(ipaddr).append('\''); - needAnd = true; - } - - if (service != null && !service.equals("")) { - String serviceColumn = addColumnToStatement("serviceName"); - if (needAnd) - constraint.append(" AND "); - constraint.append(serviceColumn).append(" = '").append(service).append('\''); - needAnd = true; - } - - m_where.append(constraint).append(") AND ("); - } - - public void setInterfaceWithServiceTranslation() { - m_selectModifier = "DISTINCT"; - - m_selectList.clear(); - m_selectList.add(addColumnToStatement("ipAddr")); - m_selectList.add(addColumnToStatement("serviceName")); - m_selectList.add(addColumnToStatement("nodeID")); - } - - /** - * Set a limit on the number of rows returned. - */ - public void setLimitCount(Integer count) { - m_limitCount = count; - } - public void outStart(Start node) { // finish the where clause by putting in the join clauses to // the ipinterface table, separating them from the rest of the @@ -705,30 +736,4 @@ outAIntegerOctet(node); } - /** - * This method returns the complete sql statement for the filter that was - * parsed. The SQL statement is the result of the select, from, and where - * components assembled from the code. - * - */ - public String getStatement() { - // don't walk tree if there is no tree to walk - if (m_node == null) - return null; - - // this will walk the tree and build the rest of the sql statement - // - m_node.apply(this); - - return buildSelectClause() + m_from.toString() + m_where.toString() - + buildLimitClause(); - } - - private String buildLimitClause() { - if (m_limitCount != null) { - return " LIMIT " + m_limitCount; - } else { - return ""; - } - } }