pax_global_header00006660000000000000000000000064133050102340014501gustar00rootroot0000000000000052 comment=6765204729c95da970badb8a4be5f979e6fcbe24 mckoisqldb-1.0.6/000077500000000000000000000000001330501023400136355ustar00rootroot00000000000000mckoisqldb-1.0.6/COPYRIGHT.txt000066400000000000000000000001021330501023400157370ustar00rootroot00000000000000Mckoi SQL Database Copyright 2000-2018 Diehl and Associates, Inc. mckoisqldb-1.0.6/LICENSE000066400000000000000000000261351330501023400146510ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mckoisqldb-1.0.6/distrib_files/000077500000000000000000000000001330501023400164575ustar00rootroot00000000000000mckoisqldb-1.0.6/distrib_files/COPYRIGHT.txt000066400000000000000000000001021330501023400205610ustar00rootroot00000000000000Mckoi SQL Database Copyright 2000-2018 Diehl and Associates, Inc. mckoisqldb-1.0.6/distrib_files/LICENSE.txt000066400000000000000000000261351330501023400203110ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mckoisqldb-1.0.6/pom.xml000066400000000000000000000057111330501023400151560ustar00rootroot00000000000000 4.0.0 com.mckoi mckoisqldb jar Mckoi SQL Database (MckoiSQLDB) 1.0.6 http://www.mckoi.com/database/ A full SQL database system with JDBC driver that can be embedded in a Java application or operate as a stand-alone server with clients connecting via TCP/IP. org.sonatype.oss oss-parent 7 GNU General Public License, Version 2 http://www.apache.org/licenses/LICENSE-2.0 repo https://github.com/Mckoi/origsqldb scm:https://github.com/Mckoi/origsqldb.git Tobias Downer Tobias Downer toby@mckoi.com org.apache.maven.plugins maven-compiler-plugin 2.5.1 1.3 1.3 ${project.build.sourceEncoding} org.apache.maven.plugins maven-jar-plugin 2.4 true com.mckoi.runtime.McKoiDBMain org.apache.maven.plugins maven-resources-plugin 2.3 ${project.build.sourceEncoding} src/main/resources/ ${basedir}/distrib_files META-INF LICENSE COPYRIGHT.txt UTF-8 mckoisqldb-1.0.6/src/000077500000000000000000000000001330501023400144245ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/000077500000000000000000000000001330501023400153505ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/000077500000000000000000000000001330501023400162715ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/000077500000000000000000000000001330501023400170475ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/000077500000000000000000000000001330501023400201515ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/JDBCDriver.java000066400000000000000000000024361330501023400226770ustar00rootroot00000000000000/** * com.mckoi.JDBCDriver 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi; /** * Instance class that registers the mckoi JDBC driver with the JDBC * Driver Manager. *

* This class now also extends com.mckoi.database.jdbc.MDriver. * * @author Tobias Downer */ public class JDBCDriver extends com.mckoi.database.jdbc.MDriver { /** * Just referencing this class will register the JDBC driver. Any objections * to this behaviour? */ static { com.mckoi.database.jdbc.MDriver.register(); } /** * Constructor. */ public JDBCDriver() { super(); // Or we could move driver registering here... } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/000077500000000000000000000000001330501023400217155ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/AbstractAggregateFunction.java000066400000000000000000000111711330501023400276410ustar00rootroot00000000000000/** * com.mckoi.database.AbstractAggregateFunction 06 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Provides convenience methods for handling aggregate functions (functions * that are evaluated over a grouping set). Note that this class handles the * most common form of aggregate functions. These are aggregates with no more * or no less than one parameter, and that return NULL if the group set has a * length of 0. If an aggregate function doesn't fit this design, then the * developer must roll their own AbstractFunction to handle it. *

* This object handles full expressions being passed as parameters to the * aggregate function. The expression is evaluated for each set in the * group. Therefore the aggregate function, avg(length(description)) will * find the average length of the description column. sum(price * quantity) * will find the sum of the price * quantity of each set in the group. * * @author Tobias Downer */ public abstract class AbstractAggregateFunction extends AbstractFunction { /** * Constructs an aggregate function. */ public AbstractAggregateFunction(String name, Expression[] params) { super(name, params); setAggregate(true); // Aggregates must have only one argument if (parameterCount() != 1) { throw new RuntimeException("'" + name + "' function must have one argument."); } } // ---------- Abstract ---------- /** * Evaluates the aggregate function for the given values and returns the * result. If this aggregate was 'sum' then this method would sum the two * values. If this aggregate was 'avg' then this method would also sum the * two values and the 'postEvalAggregate' would divide by the number * processed. *

* NOTE: This first time this method is called on a set, 'val1' is 'null' and * 'val2' contains the first value in the set. */ public abstract TObject evalAggregate(GroupResolver group, QueryContext context, TObject val1, TObject val2); /** * Called just before the value is returned to the parent. This does any * final processing on the result before it is returned. If this aggregate * was 'avg' then we'd divide by the size of the group. */ public TObject postEvalAggregate(GroupResolver group, QueryContext context, TObject result) { // By default, do nothing.... return result; } // ---------- Implemented from AbstractFunction ---------- public final TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { if (group == null) { throw new RuntimeException("'" + getName() + "' can only be used as an aggregate function."); } TObject result = null; // All aggregates functions return 'null' if group size is 0 int size = group.size(); if (size == 0) { // Return a NULL of the return type return new TObject(returnTType(resolver, context), null); } TObject val; Variable v = getParameter(0).getVariable(); // If the aggregate parameter is a simple variable, then use optimal // routine, if (v != null) { for (int i = 0; i < size; ++i) { val = group.resolve(v, i); result = evalAggregate(group, context, result, val); } } else { // Otherwise we must resolve the expression for each entry in group, // This allows for expressions such as 'sum(quantity * price)' to // work for a group. Expression exp = getParameter(0); for (int i = 0; i < size; ++i) { val = exp.evaluate(null, group.getVariableResolver(i), context); result = evalAggregate(group, context, result, val); } } // Post method. result = postEvalAggregate(group, context, result); return result; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/AbstractDataTable.java000066400000000000000000000034441330501023400260720ustar00rootroot00000000000000/** * com.mckoi.database.AbstractDataTable 06 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * This is the abstract class implemented by a DataTable like table. Both * DataTable and DataTableFilter objects extend this object. *

* @author Tobias Downer */ public abstract class AbstractDataTable extends Table implements RootTable { /** * Returns the fully resolved table name. */ public TableName getTableName() { return getDataTableDef().getTableName(); } // ---------- Implemented from Table ---------- /** * This function is used to check that two tables are identical. * We first check the table names are identical. Then check the column * filter is the same. */ public boolean typeEquals(RootTable table) { if (table instanceof AbstractDataTable) { AbstractDataTable dest = (AbstractDataTable) table; return (getTableName().equals(dest.getTableName())); } else { return (this == table); } } /** * Returns a string that represents this table. */ public String toString() { return getTableName().toString() + "[" + getRowCount() + "]"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/AbstractFunction.java000066400000000000000000000120241330501023400260300ustar00rootroot00000000000000/** * com.mckoi.database.AbstractFunction 12 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.math.BigDecimal; import java.util.ArrayList; import java.util.List; /** * An abstract implementation of Function. * * @author Tobias Downer */ public abstract class AbstractFunction implements Function { /** * The name of the function. */ private String name; /** * The list of expressions this function has as parameters. */ private Expression[] params; /** * Set to true if this is an aggregate function (requires a group). It is * false by default. */ private boolean is_aggregate; /** * Constructs the Function with the given expression array as parameters. */ public AbstractFunction(String name, Expression[] params) { this.name = name; this.params = params; is_aggregate = false; } /** * Call this from the constructor if the function is an aggregate. */ protected void setAggregate(boolean status) { is_aggregate = status; } /** * Returns the number of parameters for this function. */ public int parameterCount() { return params.length; } /** * Returns the parameter at the given index in the parameters list. */ public Expression getParameter(int n) { return params[n]; } /** * Returns true if the param is the special case glob parameter (*). */ public boolean isGlob() { if (params == FunctionFactory.GLOB_LIST) { return true; } if (params.length == 1) { Expression exp = params[0]; return (exp.size() == 1 && new String(exp.text()).equals("*")); } return false; } // ---------- Implemented from Function ---------- /** * Returns the name of the function. The name is a unique identifier that * can be used to recreate this function. This identifier can be used to * easily serialize the function when grouped with its parameters. */ public String getName() { return name; } /** * Returns the list of all Variable's that are used by this function. This * looks up each expression in the list of parameters. This will cascade * if the expressions have a Function, etc. */ public List allVariables() { ArrayList result_list = new ArrayList(); for (int i = 0; i < params.length; ++i) { List l = params[i].allVariables(); result_list.addAll(l); } return result_list; } /** * Returns the list of all elements that are used by this function. This * looks up each expression in the list of parameters. This will cascade * if the expressions have a Function, etc. */ public List allElements() { ArrayList result_list = new ArrayList(); for (int i = 0; i < params.length; ++i) { List l = params[i].allElements(); result_list.addAll(l); } return result_list; } /** * Returns whether the function is an aggregate function or not. */ public final boolean isAggregate(QueryContext context) { if (is_aggregate) { return true; } else { // Check if arguments are aggregates for (int i = 0; i < params.length; ++i) { Expression exp = params[i]; if (exp.hasAggregateFunction(context)) { return true; } } } return false; } /** * Prepares the parameters of the function. */ public void prepareParameters(ExpressionPreparer preparer) throws DatabaseException { for (int i = 0; i < params.length; ++i) { params[i].prepare(preparer); } } /** * The init function. By default, we don't do anything however this should * be overwritten if we need to check the parameter arguments. */ public void init(VariableResolver resolver) { } /** * By Default, we assume a function returns a Numeric object. */ public TType returnTType(VariableResolver resolver, QueryContext context) { return returnTType(); } public TType returnTType() { return TType.NUMERIC_TYPE; } // ---------- Convenience methods ---------- public String toString() { StringBuffer buf = new StringBuffer(); buf.append(name); buf.append('('); for (int i = 0; i < params.length; ++i) { buf.append(params[i].text().toString()); if (i < params.length - 1) { buf.append(','); } } buf.append(')'); return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/AbstractInternalTableInfo.java000066400000000000000000000057651330501023400276210ustar00rootroot00000000000000/** * com.mckoi.database.AbstractInternalTableInfo 23 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An implementation of InternalTableInfo that provides a number of methods to * aid in the productions of the InternalTableInfo interface. *

* This leaves the 'createInternalTable' method implementation to the derived * class. * * @author Tobias Downer */ abstract class AbstractInternalTableInfo implements InternalTableInfo { /** * The list of table names (as TableName) that this object maintains. */ private TableName[] table_list; /** * The list of DataTableDef objects that descibe each table in the above * list. */ private DataTableDef[] table_def_list; /** * The table type of table objects returned by this method. */ private String table_type; /** * Constructs the container than manages the creation of the given table * objects. */ AbstractInternalTableInfo(String type, DataTableDef[] table_def_list) { this.table_def_list = table_def_list; this.table_type = type; table_list = new TableName[table_def_list.length]; for (int i = 0; i < table_list.length; ++i) { table_list[i] = table_def_list[i].getTableName(); } } /** * Returns the number of internal table sources that this object is * maintaining. */ public int getTableCount() { return table_list.length; } /** * Finds the index in this container of the given table name, otherwise * returns -1. */ public int findTableName(TableName name) { for (int i = 0; i < table_list.length; ++i) { if (table_list[i].equals(name)) { return i; } } return -1; } /** * Returns the name of the table at the given index in this container. */ public TableName getTableName(int i) { return table_list[i]; } /** * Returns the DataTableDef object that describes the table at the given * index in this container. */ public DataTableDef getDataTableDef(int i) { return table_def_list[i]; } /** * Returns true if this container contains a table with the given name. */ public boolean containsTableName(TableName name) { return findTableName(name) != -1; } /** * Returns a String that describes the type of the table at the given index. */ public String getTableType(int i) { return table_type; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/AbstractInternalTableInfo2.java000066400000000000000000000103631330501023400276710ustar00rootroot00000000000000/** * com.mckoi.database.AbstractInternalTableInfo2 14 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An implementation of InternalTableInfo that provides a number of methods to * aid in the productions of the InternalTableInfo interface for a transaction * specific model of a set of tables that is based on a single system table. * This would be used to model table views for triggers, views, procedures and * sequences all of which are table sets tied to a single table respectively, * and the number of items in the table represent the number of tables to model. *

* This abstraction assumes that the name of the schema/table are in columns 0 * and 1 of the backed system table. * * @author Tobias Downer */ abstract class AbstractInternalTableInfo2 implements InternalTableInfo { /** * The transaction we are connected to. */ protected final Transaction transaction; /** * The table in the transaction that contains the list of items we are * modelling. */ protected final TableName table_name; /** * Constructor. */ public AbstractInternalTableInfo2(Transaction transaction, TableName table_name) { this.transaction = transaction; this.table_name = table_name; } public int getTableCount() { if (transaction.tableExists(table_name)) { return transaction.getTable(table_name).getRowCount(); } else { return 0; } } public int findTableName(TableName name) { if (transaction.realTableExists(table_name)) { // Search the table. We assume that the schema and name of the object // are in columns 0 and 1 respectively. MutableTableDataSource table = transaction.getTable(table_name); RowEnumeration row_e = table.rowEnumeration(); int p = 0; while (row_e.hasMoreRows()) { int row_index = row_e.nextRowIndex(); TObject ob_name = table.getCellContents(1, row_index); if (ob_name.getObject().toString().equals(name.getName())) { TObject ob_schema = table.getCellContents(0, row_index); if (ob_schema.getObject().toString().equals(name.getSchema())) { // Match so return this return p; } } ++p; } } return -1; } public TableName getTableName(int i) { if (transaction.realTableExists(table_name)) { // Search the table. We assume that the schema and name of the object // are in columns 0 and 1 respectively. MutableTableDataSource table = transaction.getTable(table_name); RowEnumeration row_e = table.rowEnumeration(); int p = 0; while (row_e.hasMoreRows()) { int row_index = row_e.nextRowIndex(); if (i == p) { TObject ob_schema = table.getCellContents(0, row_index); TObject ob_name = table.getCellContents(1, row_index); return new TableName(ob_schema.getObject().toString(), ob_name.getObject().toString()); } ++p; } } throw new RuntimeException("Out of bounds."); } public boolean containsTableName(TableName name) { // This set can not contain the table that is backing it, so we always // return false for that. This check stops an annoying recursive // situation for table name resolution. if (name.equals(table_name)) { return false; } else { return findTableName(name) != -1; } } public abstract DataTableDef getDataTableDef(int i); public abstract String getTableType(int i); public abstract MutableTableDataSource createInternalTable(int index); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/AbstractQueryContext.java000066400000000000000000000042251330501023400267210ustar00rootroot00000000000000/** * com.mckoi.database.AbstractQueryContext 25 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.HashMap; /** * An abstract implementation of QueryContext * * @author Tobias Downer */ public abstract class AbstractQueryContext implements QueryContext { /** * Any marked tables that are made during the evaluation of a query plan. * (String) -> (Table) */ private HashMap marked_tables; /** * Marks a table in a query plan. */ public void addMarkedTable(String mark_name, Table table) { if (marked_tables == null) { marked_tables = new HashMap(); } marked_tables.put(mark_name, table); } /** * Returns a table that was marked in a query plan or null if no mark was * found. */ public Table getMarkedTable(String mark_name) { if (marked_tables == null) { return null; } return (Table) marked_tables.get(mark_name); } /** * Put a Table into the cache. */ public void putCachedNode(long id, Table table) { if (marked_tables == null) { marked_tables = new HashMap(); } marked_tables.put(new Long(id), table); } /** * Returns a cached table or null if it isn't cached. */ public Table getCachedNode(long id) { if (marked_tables == null) { return null; } return (Table) marked_tables.get(new Long(id)); } /** * Clears the cache of any cached tables. */ public void clearCache() { if (marked_tables != null) { marked_tables.clear(); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Assignment.java000066400000000000000000000043731330501023400246770ustar00rootroot00000000000000/** * com.mckoi.database.Assignment 18 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An assignment from a variable to an expression. For example;

*

 *   value_of = value_of * 1.10
 *   name = concat("CS-", name)
 *   description = concat("LEGACY: ", upper(number));
 * 
* * @author Tobias Downer */ public final class Assignment implements StatementTreeObject, java.io.Serializable, Cloneable { static final long serialVersionUID = 498589698743066869L; /** * The Variable that is the lhs of the assignment. */ private Variable variable; /** * Set expression that is the rhs of the assignment. */ private Expression expression; /** * Constructs the assignment. */ public Assignment(Variable variable, Expression expression) { this.variable = variable; this.expression = expression; } /** * Returns the variable for this assignment. */ public Variable getVariable() { return variable; } /** * Returns the Expression for this assignment. */ public Expression getExpression() { return expression; } // ---------- Implemented from StatementTreeObject ---------- public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { if (expression != null) { expression.prepare(preparer); } } public Object clone() throws CloneNotSupportedException { Assignment v = (Assignment) super.clone(); v.variable = (Variable) variable.clone(); v.expression = (Expression) expression.clone(); return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/BlindSearch.java000066400000000000000000000264541330501023400247510ustar00rootroot00000000000000/** * com.mckoi.database.BlindSearch 14 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.Arrays; import java.util.Comparator; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; /** * This is a scheme that performs a blind search of a given set. It records * no information about how a set element relates to the rest. It blindly * searches through the set to find elements that match the given criteria. *

* This scheme performs badly on large sets because it requires that the * database is queried often for information. However since it records no * information about the set, memory requirements are non-existant. *

* This scheme should not be used for anything other than small domain sets * because the performance suffers very badly with larger sets. It is ideal * for small domain sets because of its no memory overhead. For any select * operation this algorithm must check every element in the set. * * @author Tobias Downer */ public final class BlindSearch extends SelectableScheme { /** * The Constructor. */ public BlindSearch(TableDataSource table, int column) { super(table, column); } /** * This scheme doesn't take any notice of insertions or removals. */ public void insert(int row) { if (isImmutable()) { throw new Error("Tried to change an immutable scheme."); } } /** * This scheme doesn't take any notice of insertions or removals. */ public void remove(int row) { if (isImmutable()) { throw new Error("Tried to change an immutable scheme."); } } /** * Reads the entire state of the scheme from the input stream. * This is a trivial case for BlindSearch which doesn't require any * data to be stored. */ public void readFrom(InputStream in) throws IOException { } /** * Writes the entire state of the scheme to the output stream. * This is a trivial case for BlindSearch which doesn't require any * data to be stored. */ public void writeTo(OutputStream out) throws IOException { } /** * Returns an exact copy of this scheme including any optimization * information. The copied scheme is identical to the original but does not * share any parts. Modifying any part of the copied scheme will have no * effect on the original and vice versa. */ public SelectableScheme copy(TableDataSource table, boolean immutable) { // Return a fresh object. This implementation has no state so we can // ignore the 'immutable' flag. return new BlindSearch(table, getColumn()); } /** * Disposes and invalidates the BlindSearch. */ public void dispose() { // Nothing to do! } /** * Selection methods for obtaining various sub-sets of information from the * set. */ /** * We implement an insert sort algorithm here. Each new row is inserted * into our row vector at the sorted corrent position. * The algorithm assumes the given vector is already sorted. We then just * subdivide the set until we can insert at the required position. */ private int search(TObject ob, IntegerVector vec, int lower, int higher) { if (lower >= higher) { if (ob.compareTo(getCellContents(vec.intAt(lower))) > 0) { return lower + 1; } else { return lower; } } int mid = lower + ((higher - lower) / 2); int comp_result = ob.compareTo(getCellContents(vec.intAt(mid))); if (comp_result == 0) { return mid; } else if (comp_result < 0) { return search(ob, vec, lower, mid - 1); } else { return search(ob, vec, mid + 1, higher); } } /** * Searches for a given TObject (ob) in the row list between the two * bounds. This will return the highest row of the set of values that are * equal to 'ob'. *

* This returns the place to insert ob into the vector, it should not be * used to determine if ob is in the list or not. */ private int highestSearch(TObject ob, IntegerVector vec, int lower, int higher) { if ((higher - lower) <= 5) { // Start from the bottom up until we find the highest val for (int i = higher; i >= lower; --i) { int res = ob.compareTo(getCellContents(vec.intAt(i))); if (res >= 0) { return i + 1; } } // Didn't find return lowest return lower; } int mid = (lower + higher) / 2; int comp_result = ob.compareTo(getCellContents(vec.intAt(mid))); if (comp_result == 0) { // We know the bottom is between 'mid' and 'higher' return highestSearch(ob, vec, mid, higher); } else if (comp_result < 0) { return highestSearch(ob, vec, lower, mid - 1); } else { return highestSearch(ob, vec, mid + 1, higher); } } private void doInsertSort(IntegerVector vec, int row) { int list_size = vec.size(); if (list_size == 0) { vec.addInt(row); } else { int point = highestSearch(getCellContents(row), vec, 0, list_size - 1); if (point == list_size) { vec.addInt(row); } else { vec.insertIntAt(row, point); } } } public IntegerVector selectAll() { IntegerVector row_list = new IntegerVector(getTable().getRowCount()); RowEnumeration e = getTable().rowEnumeration(); while (e.hasMoreRows()) { doInsertSort(row_list, e.nextRowIndex()); } return row_list; } public IntegerVector selectRange(SelectableRange range) { int set_size = getTable().getRowCount(); // If no items in the set return an empty set if (set_size == 0) { return new IntegerVector(0); } return selectRange(new SelectableRange[] { range } ); } public IntegerVector selectRange(SelectableRange[] ranges) { int set_size = getTable().getRowCount(); // If no items in the set return an empty set if (set_size == 0) { return new IntegerVector(0); } RangeChecker checker = new RangeChecker(ranges); return checker.resolve(); } // ---------- Inner classes ---------- /** * Object used to during range check loop. */ final class RangeChecker { /** * The sorted list of all items in the set created as a cache for finding * the first and last values. */ private IntegerVector sorted_set = null; /** * The list of flags for each check in the range. * Either 0 for no check, 1 for < or >, 2 for <= or >=. */ private byte[] lower_flags; private byte[] upper_flags; /** * The TObject objects to check against. */ private TObject[] lower_cells; private TObject[] upper_cells; /** * Constructs the checker. */ public RangeChecker(SelectableRange[] ranges) { int size = ranges.length; lower_flags = new byte[size]; upper_flags = new byte[size]; lower_cells = new TObject[size]; upper_cells = new TObject[size]; for (int i = 0; i < ranges.length; ++i) { setupRange(i, ranges[i]); } } private void resolveSortedSet() { if (sorted_set == null) { // System.out.println("SLOW RESOLVE SORTED SET ON BLIND SEARCH."); sorted_set = selectAll(); } } /** * Resolves a cell. */ private TObject resolveCell(TObject ob) { if (ob == SelectableRange.FIRST_IN_SET) { resolveSortedSet(); return getCellContents(sorted_set.intAt(0)); } else if (ob == SelectableRange.LAST_IN_SET) { resolveSortedSet(); return getCellContents(sorted_set.intAt(sorted_set.size() - 1)); } else { return ob; } } /** * Set up a range. */ public void setupRange(int i, SelectableRange range) { TObject l = range.getStart(); byte lf = range.getStartFlag(); TObject u = range.getEnd(); byte uf = range.getEndFlag(); // Handle lower first if (l == SelectableRange.FIRST_IN_SET && lf == SelectableRange.FIRST_VALUE) { // Special case no lower check lower_flags[i] = 0; } else { if (lf == SelectableRange.FIRST_VALUE) { lower_flags[i] = 2; // >= } else if (lf == SelectableRange.AFTER_LAST_VALUE) { lower_flags[i] = 1; // > } else { throw new Error("Incorrect lower flag."); } lower_cells[i] = resolveCell(l); } // Now handle upper if (u == SelectableRange.LAST_IN_SET && uf == SelectableRange.LAST_VALUE) { // Special case no upper check upper_flags[i] = 0; } else { if (uf == SelectableRange.LAST_VALUE) { upper_flags[i] = 2; // <= } else if (uf == SelectableRange.BEFORE_FIRST_VALUE) { upper_flags[i] = 1; // < } else { throw new Error("Incorrect upper flag."); } upper_cells[i] = resolveCell(u); } } /** * Resolves the ranges. */ public IntegerVector resolve() { // The idea here is to only need to scan the column once to find all // the cells that meet our criteria. IntegerVector ivec = new IntegerVector(); RowEnumeration e = getTable().rowEnumeration(); int compare_tally = 0; int size = lower_flags.length; while (e.hasMoreRows()) { int row = e.nextRowIndex(); // For each range range_set: for (int i = 0; i < size; ++i) { boolean result = true; byte lf = lower_flags[i]; if (lf != 0) { ++compare_tally; TObject v = getCellContents(row); int compare = lower_cells[i].compareTo(v); if (lf == 1) { // > result = (compare < 0); } else if (lf == 2) { // >= result = (compare <= 0); } else { throw new Error("Incorrect flag."); } } if (result) { byte uf = upper_flags[i]; if (uf != 0) { ++compare_tally; TObject v = getCellContents(row); int compare = upper_cells[i].compareTo(v); if (uf == 1) { // < result = (compare > 0); } else if (uf == 2) { // >= result = (compare >= 0); } else { throw new Error("Incorrect flag."); } } // Pick this row if (result) { doInsertSort(ivec, row); break range_set; } } } } // System.out.println("Blind Search compare tally: " + compare_tally); return ivec; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/BlobStore.java000066400000000000000000001073111330501023400244560ustar00rootroot00000000000000/** * com.mckoi.database.BlobStore 18 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import java.util.zip.Deflater; import java.util.zip.Inflater; import java.util.zip.DataFormatException; import java.io.IOException; import java.io.InputStream; import java.io.Reader; import com.mckoi.util.PagedInputStream; import com.mckoi.store.Store; import com.mckoi.store.Area; import com.mckoi.store.MutableArea; import com.mckoi.store.AreaWriter; import com.mckoi.database.jdbc.AsciiReader; import com.mckoi.database.jdbc.BinaryToUnicodeReader; import com.mckoi.database.global.Ref; import com.mckoi.database.global.BlobRef; import com.mckoi.database.global.ClobRef; import com.mckoi.database.global.ByteLongObject; /** * A structure inside an Area that maintains the storage of any number of large * binary objects. A blob store allows for the easy allocation of areas for * storing blob data and for reading and writing blob information via BlobRef * objects. *

* A BlobStore can be broken down to the following simplistic functions; *

* 1) Allocation of an area to store a new blob.
* 2) Reading the information in a Blob given a Blob reference identifier.
* 3) Reference counting to a particular Blob.
* 4) Cleaning up a Blob when no static references are left.
* * @author Tobias Downer */ final class BlobStore implements BlobStoreInterface { /** * The magic value for fixed record list structures. */ private final static int MAGIC = 0x012BC53A9; /** * The outer Store object that is to contain the blob store. */ private Store store; /** * The FixedRecordList structure that maintains a list of fixed size records * for blob reference counting. */ private FixedRecordList fixed_list; /** * The first delete chain element. */ private long first_delete_chain_record; /** * Constructs the BlobStore on the given Area object. */ BlobStore(Store store) { this.store = store; fixed_list = new FixedRecordList(store, 24); } /** * Creates the blob store and returns a pointer in the store to the header * information. This value is later used to initialize the store. */ long create() throws IOException { // Init the fixed record list area. // The fixed list entries are formatted as follows; // ( status (int), reference_count (int), // blob_size (long), blob_pointer (long) ) long fixed_list_p = fixed_list.create(); // Delete chain is empty when we start first_delete_chain_record = -1; fixed_list.setReservedLong(-1); // Allocate a small header that contains the MAGIC, and the pointer to the // fixed list structure. AreaWriter blob_store_header = store.createArea(32); long blob_store_p = blob_store_header.getID(); // Write the blob store header information // The magic blob_store_header.putInt(MAGIC); // The version blob_store_header.putInt(1); // The pointer to the fixed list area blob_store_header.putLong(fixed_list_p); // And finish blob_store_header.finish(); // Return the pointer to the blob store header return blob_store_p; } /** * Initializes the blob store given a pointer to the blob store pointer * header (the value previously returned by the 'create' method). */ void init(long blob_store_p) throws IOException { // Get the header area Area blob_store_header = store.getArea(blob_store_p); blob_store_header.position(0); // Read the magic int magic = blob_store_header.getInt(); int version = blob_store_header.getInt(); if (magic != MAGIC) { throw new IOException("MAGIC value for BlobStore is not correct."); } if (version != 1) { throw new IOException("version number for BlobStore is not correct."); } // Read the pointer to the fixed area long fixed_list_p = blob_store_header.getLong(); // Init the FixedRecordList area fixed_list.init(fixed_list_p); // Set the delete chain first_delete_chain_record = fixed_list.getReservedLong(); } /** * Simple structure used when copying blob information. */ private static class CopyBlobInfo { int ref_count; long size; long ob_p; }; /** * Copies all the blob data from the given BlobStore into this blob store. * Any blob information that already exists within this BlobStore is deleted. * We assume this method is called after the blob store is created or * initialized. */ void copyFrom(StoreSystem store_system, BlobStore src_blob_store) throws IOException { FixedRecordList src_fixed_list = src_blob_store.fixed_list; long node_count; synchronized (src_fixed_list) { node_count = src_fixed_list.addressableNodeCount(); } synchronized (fixed_list) { // Make sure our fixed_list is big enough to accomodate the copied list, while (fixed_list.addressableNodeCount() < node_count) { fixed_list.increaseSize(); } // We rearrange the delete chain long last_deleted = -1; // We copy blobs in groups no larger than 1024 Blobs final int BLOCK_WRITE_COUNT = 1024; int max_to_read = (int) Math.min(BLOCK_WRITE_COUNT, node_count); long p = 0; while (max_to_read > 0) { // (CopyBlboInfo) ArrayList src_copy_list = new ArrayList(); synchronized (src_fixed_list) { for (int i = 0; i < max_to_read; ++i) { Area a = src_fixed_list.positionOnNode(p + i); int status = a.getInt(); // If record is not deleted if (status != 0x020000) { CopyBlobInfo info = new CopyBlobInfo(); info.ref_count = a.getInt(); info.size = a.getLong(); info.ob_p = a.getLong(); src_copy_list.add(info); } else { src_copy_list.add(null); } } } try { store.lockForWrite(); // We now should have a list of all records from the src to copy, int sz = src_copy_list.size(); for (int i = 0; i < sz; ++i) { CopyBlobInfo info = (CopyBlobInfo) src_copy_list.get(i); MutableArea a = fixed_list.positionOnNode(p + i); // Either set a deleted entry or set the entry with a copied blob. if (info == null) { a.putInt(0x020000); a.putInt(0); a.putLong(-1); a.putLong(last_deleted); a.checkOut(); last_deleted = p + i; } else { // Get the Area containing the blob header data in the source // store Area src_blob_header = src_blob_store.store.getArea(info.ob_p); // Read the information from the header, int res = src_blob_header.getInt(); int type = src_blob_header.getInt(); long total_block_size = src_blob_header.getLong(); long total_block_pages = src_blob_header.getLong(); // Allocate a new header AreaWriter dst_blob_header = store.createArea( 4 + 4 + 8 + 8 + (total_block_pages * 8)); long new_ob_header_p = dst_blob_header.getID(); // Copy information into the header dst_blob_header.putInt(res); dst_blob_header.putInt(type); dst_blob_header.putLong(total_block_size); dst_blob_header.putLong(total_block_pages); // Allocate and copy each page, for (int n = 0; n < total_block_pages; ++n) { // Get the block information long block_p = src_blob_header.getLong(); long new_block_p; // Copy the area if the block id is not -1 if (block_p != -1) { Area src_block = src_blob_store.store.getArea(block_p); int block_type = src_block.getInt(); int block_size = src_block.getInt(); // Copy a new block, int new_block_size = block_size + 4 + 4; AreaWriter dst_block_p = store.createArea(new_block_size); new_block_p = dst_block_p.getID(); src_block.position(0); src_block.copyTo(dst_block_p, new_block_size); // And finish dst_block_p.finish(); } else { new_block_p = -1; } // Write the new header dst_blob_header.putLong(new_block_p); } // And finish 'dst_blob_header' dst_blob_header.finish(); // Set up the data in the fixed list a.putInt(1); // Note all the blobs are written with 0 reference count. a.putInt(0); a.putLong(info.size); a.putLong(new_ob_header_p); // Check out the changes a.checkOut(); } } } finally { store.unlockForWrite(); } node_count -= max_to_read; p += max_to_read; max_to_read = (int) Math.min(BLOCK_WRITE_COUNT, node_count); // Set a checkpoint in the destination store system so we write out // all pending changes from the log store_system.setCheckPoint(); } // Set the delete chain first_delete_chain_record = last_deleted; fixed_list.setReservedLong(last_deleted); } // synchronized (fixed_list) } /** * Convenience method that converts the given String into a ClobRef * object and pushes it into the given BlobStore object. */ ClobRef putStringInBlobStore(String str) throws IOException { final int BUF_SIZE = 64 * 1024; int size = str.length(); byte type = 4; // Enable compression (ISSUE: Should this be enabled by default?) type = (byte) (type | 0x010); ClobRef ref = (ClobRef) allocateLargeObject(type, size * 2); byte[] buf = new byte[BUF_SIZE]; long p = 0; int str_i = 0; while (size > 0) { int to_write = Math.min(BUF_SIZE / 2, size); int buf_i = 0; for (int i = 0; i < to_write; ++i) { char c = str.charAt(str_i); buf[buf_i] = (byte) (c >> 8); ++buf_i; buf[buf_i] = (byte) c; ++buf_i; ++str_i; } ref.write(p, buf, buf_i); size -= to_write; p += to_write * 2; } ref.complete(); return ref; } /** * Convenience method that converts the given ByteLongObject into a * BlobRef object and pushes it into the given BlobStore object. */ BlobRef putByteLongObjectInBlobStore(ByteLongObject blob) throws IOException { final int BUF_SIZE = 64 * 1024; byte[] src_buf = blob.getByteArray(); final int size = src_buf.length; BlobRef ref = (BlobRef) allocateLargeObject((byte) 2, size); byte[] copy_buf = new byte[BUF_SIZE]; int offset = 0; int to_write = Math.min(BUF_SIZE, size); while (to_write > 0) { System.arraycopy(src_buf, offset, copy_buf, 0, to_write); ref.write(offset, copy_buf, to_write); offset += to_write; to_write = Math.min(BUF_SIZE, (size - offset)); } ref.complete(); return ref; } /** * Finds a free place to add a record and returns an index to the record here. * This may expand the record space as necessary if there are no free record * slots to use. *

* NOTE: Unfortunately this is cut-and-paste from the way * V2MasterTableDataSource manages recycled elements. */ private long addToRecordList(long record_p) throws IOException { synchronized (fixed_list) { // If there is no free deleted records in the delete chain, if (first_delete_chain_record == -1) { // Increase the size of the list structure. fixed_list.increaseSize(); // The start record of the new size int new_block_number = fixed_list.listBlockCount() - 1; long start_index = fixed_list.listBlockFirstPosition(new_block_number); long size_of_block = fixed_list.listBlockNodeCount(new_block_number); // The Area object for the new position MutableArea a = fixed_list.positionOnNode(start_index); a.putInt(0); a.putInt(0); a.putLong(-1); // Initially unknown size a.putLong(record_p); // Set the rest of the block as deleted records for (long n = 1; n < size_of_block - 1; ++n) { a.putInt(0x020000); a.putInt(0); a.putLong(-1); a.putLong(start_index + n + 1); } // The last block is end of delete chain. a.putInt(0x020000); a.putInt(0); a.putLong(-1); a.putLong(-1); // Check out the changes. a.checkOut(); // And set the new delete chain first_delete_chain_record = start_index + 1; // Set the reserved area fixed_list.setReservedLong(first_delete_chain_record); // // Flush the changes to the store // store.flush(); // Return pointer to the record we just added. return start_index; } else { // Pull free block from the delete chain and recycle it. long recycled_record = first_delete_chain_record; MutableArea block = fixed_list.positionOnNode(recycled_record); int rec_pos = block.position(); // Status of the recycled block int status = block.getInt(); if ((status & 0x020000) == 0) { throw new Error("Assertion failed: record is not deleted!"); } // Reference count (currently unused in delete chains). block.getInt(); // The size (should be -1); block.getLong(); // The pointer to the next in the chain. long next_chain = block.getLong(); first_delete_chain_record = next_chain; // Update the first_delete_chain_record field in the header fixed_list.setReservedLong(first_delete_chain_record); // Update the block block.position(rec_pos); block.putInt(0); block.putInt(0); block.putLong(-1); // Initially unknown size block.putLong(record_p); // Check out the changes block.checkOut(); return recycled_record; } } } /** * Allocates an area in the store for a large binary object to be stored. * After the blob area is allocated the blob may be written. This returns * a BlobRef object for future access to the blob. *

* A newly allocated blob is read and write enabled. A call to the * 'completeBlob' method must be called to finalize the blob at which point * the blob becomes a static read-only object. */ Ref allocateLargeObject(byte type, long size) throws IOException { if (size < 0) { throw new IOException("Negative blob size not allowed."); } try { store.lockForWrite(); // Allocate the area (plus header area) for storing the blob pages long page_count = ((size - 1) / (64 * 1024)) + 1; AreaWriter blob_area = store.createArea((page_count * 8) + 24); long blob_p = blob_area.getID(); // Set up the area header blob_area.putInt(0); // Reserved for future blob_area.putInt(type); blob_area.putLong(size); blob_area.putLong(page_count); // Initialize the empty blob area for (long i = 0; i < page_count; ++i) { blob_area.putLong(-1); } // And finish blob_area.finish(); // Update the fixed_list and return the record number for this blob long reference_id = addToRecordList(blob_p); byte st_type = (byte) (type & 0x0F); if (st_type == 2) { // Create a BlobRef implementation that can access this blob return new BlobRefImpl(reference_id, type, size, true); } else if (st_type == 3) { return new ClobRefImpl(reference_id, type, size, true); } else if (st_type == 4) { return new ClobRefImpl(reference_id, type, size, true); } else { throw new IOException("Unknown large object type"); } } finally { store.unlockForWrite(); } } /** * Returns a Ref object that allows read-only access to a large object in this * blob store. */ public Ref getLargeObject(long reference_id) throws IOException { long blob_p; long size; synchronized (fixed_list) { // Assert that the blob reference id given is a valid range if (reference_id < 0 || reference_id >= fixed_list.addressableNodeCount()) { throw new IOException("reference_id is out of range."); } // Position on this record Area block = fixed_list.positionOnNode(reference_id); // Read the information in the fixed record int status = block.getInt(); // Assert that the status is not deleted if ((status & 0x020000) != 0) { throw new Error("Assertion failed: record is deleted!"); } // Get the reference count int reference_count = block.getInt(); // Get the total size of the blob size = block.getLong(); // Get the blob pointer blob_p = block.getLong(); } Area blob_area = store.getArea(blob_p); blob_area.position(0); blob_area.getInt(); // (reserved) // Read the type byte type = (byte) blob_area.getInt(); // The size of the block long block_size = blob_area.getLong(); // The number of pages in the blob long page_count = blob_area.getLong(); if (type == (byte) 2) { // Create a new BlobRef object. return new BlobRefImpl(reference_id, type, size, false); } else { // Create a new ClobRef object. return new ClobRefImpl(reference_id, type, size, false); } } /** * Call this to complete a blob in the store after a blob has been completely * written. Only BlobRef implementations returned by the 'allocateBlob' * method are accepted. */ void completeBlob(AbstractRef ref) throws IOException { // Assert that the BlobRef is open and allocated ref.assertIsOpen(); // Get the blob reference id (reference to the fixed record list). long blob_reference_id = ref.getID(); synchronized (fixed_list) { // Update the record in the fixed list. MutableArea block = fixed_list.positionOnNode(blob_reference_id); // Record the position int rec_pos = block.position(); // Read the information in the fixed record int status = block.getInt(); // Assert that the status is open if (status != 0) { throw new IOException("Assertion failed: record is not open."); } int reference_count = block.getInt(); long size = block.getLong(); long page_count = block.getLong(); try { store.lockForWrite(); // Set the fixed blob record as complete. block.position(rec_pos); // Write the new status block.putInt(1); // Write the reference count block.putInt(0); // Write the completed size block.putLong(ref.getRawSize()); // Write the pointer block.putLong(page_count); // Check out the change block.checkOut(); } finally { store.unlockForWrite(); } } // Now the blob has been finalized so change the state of the BlobRef // object. ref.close(); } /** * Tells the BlobStore that a static reference has been established in a * table to the blob referenced by the given id. This is used to count * references to a blob, and possibly clean up a blob if there are no * references remaining to it. *

* NOTE: It is the responsibility of the callee to establish a 'lockForWrite' * lock on the store before this is used. */ public void establishReference(long blob_reference_id) { try { synchronized (fixed_list) { // Update the record in the fixed list. MutableArea block = fixed_list.positionOnNode(blob_reference_id); // Record the position int rec_pos = block.position(); // Read the information in the fixed record int status = block.getInt(); // Assert that the status is static if (status != 1) { throw new RuntimeException("Assertion failed: record is not static."); } int reference_count = block.getInt(); // Set the fixed blob record as complete. block.position(rec_pos + 4); // Write the reference count + 1 block.putInt(reference_count + 1); // Check out the change block.checkOut(); } // // Flush all changes to the store. // store.flush(); } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } } /** * Tells the BlobStore that a static reference has been released to the * given blob. This would typically be called when the row in the database * is removed. *

* NOTE: It is the responsibility of the callee to establish a 'lockForWrite' * lock on the store before this is used. */ public void releaseReference(long blob_reference_id) { try { synchronized (fixed_list) { // Update the record in the fixed list. MutableArea block = fixed_list.positionOnNode(blob_reference_id); // Record the position int rec_pos = block.position(); // Read the information in the fixed record int status = block.getInt(); // Assert that the status is static if (status != 1) { throw new RuntimeException("Assertion failed: " + "Record is not static (status = " + status + ")"); } int reference_count = block.getInt(); if (reference_count == 0) { throw new RuntimeException( "Releasing when Blob reference counter is at 0."); } long object_size = block.getLong(); long object_p = block.getLong(); // If reference count == 0 then we need to free all the resources // associated with this Blob in the blob store. if ((reference_count - 1) == 0) { // Free the resources associated with this object. Area blob_area = store.getArea(object_p); blob_area.getInt(); byte type = (byte) blob_area.getInt(); long total_size = blob_area.getLong(); long page_count = blob_area.getLong(); // Free all of the pages in this blob. for (long i = 0; i < page_count; ++i) { long page_p = blob_area.getLong(); if (page_p > 0) { store.deleteArea(page_p); } } // Free the blob area object itself. store.deleteArea(object_p); // Write out the blank record. block.position(rec_pos); block.putInt(0x020000); block.putInt(0); block.putLong(-1); block.putLong(first_delete_chain_record); // CHeck out these changes block.checkOut(); first_delete_chain_record = blob_reference_id; // Update the first_delete_chain_record field in the header fixed_list.setReservedLong(first_delete_chain_record); } else { // Simply decrement the reference counter for this record. block.position(rec_pos + 4); // Write the reference count - 1 block.putInt(reference_count - 1); // Check out this change block.checkOut(); } } // // Flush all changes to the store. // store.flush(); } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } } /** * Reads a section of the blob referenced by the given id, offset and length * into the byte array. */ private void readBlobByteArray(long reference_id, long offset, byte[] buf, int off, int length) throws IOException { // ASSERT: Read and write position must be 64K aligned. if (offset % (64 * 1024) != 0) { throw new RuntimeException("Assert failed: offset is not 64k aligned."); } // ASSERT: Length is less than or equal to 64K if (length > (64 * 1024)) { throw new RuntimeException("Assert failed: length is greater than 64K."); } int status; int reference_count; long size; long blob_p; synchronized (fixed_list) { // Assert that the blob reference id given is a valid range if (reference_id < 0 || reference_id >= fixed_list.addressableNodeCount()) { throw new IOException("blob_reference_id is out of range."); } // Position on this record Area block = fixed_list.positionOnNode(reference_id); // Read the information in the fixed record status = block.getInt(); // Assert that the status is not deleted if ((status & 0x020000) != 0) { throw new Error("Assertion failed: record is deleted!"); } // Get the reference count reference_count = block.getInt(); // Get the total size of the blob size = block.getLong(); // Get the blob pointer blob_p = block.getLong(); } // Assert that the area being read is within the bounds of the blob if (offset < 0 || offset + length > size) { throw new IOException("Blob invalid read. offset = " + offset + ", length = " + length); } // Open an Area into the blob Area blob_area = store.getArea(blob_p); blob_area.getInt(); byte type = (byte) blob_area.getInt(); // Convert to the page number long page_number = (offset / (64 * 1024)); blob_area.position((int) ((page_number * 8) + 24)); long page_p = blob_area.getLong(); // Read the page Area page_area = store.getArea(page_p); page_area.position(0); int page_type = page_area.getInt(); int page_size = page_area.getInt(); if ((type & 0x010) != 0) { // The page is compressed byte[] page_buf = new byte[page_size]; page_area.get(page_buf, 0, page_size); Inflater inflater = new Inflater(); inflater.setInput(page_buf, 0, page_size); try { int result_length = inflater.inflate(buf, off, length); if (result_length != length) { throw new RuntimeException( "Assert failed: decompressed length is incorrect."); } } catch (DataFormatException e) { throw new IOException("ZIP Data Format Error: " + e.getMessage()); } inflater.end(); } else { // The page is not compressed page_area.get(buf, off, length); } } /** * Writes a section of the blob referenced by the given id, offset and * length to the byte array. Note that this does not perform any checks on * whether we are allowed to write to this blob. */ private void writeBlobByteArray(long reference_id, long offset, byte[] buf, int length) throws IOException { // ASSERT: Read and write position must be 64K aligned. if (offset % (64 * 1024) != 0) { throw new RuntimeException("Assert failed: offset is not 64k aligned."); } // ASSERT: Length is less than or equal to 64K if (length > (64 * 1024)) { throw new RuntimeException("Assert failed: length is greater than 64K."); } int status; int reference_count; long size; long blob_p; synchronized (fixed_list) { // Assert that the blob reference id given is a valid range if (reference_id < 0 || reference_id >= fixed_list.addressableNodeCount()) { throw new IOException("blob_reference_id is out of range."); } // Position on this record Area block = fixed_list.positionOnNode(reference_id); // Read the information in the fixed record status = block.getInt(); // Assert that the status is not deleted if ((status & 0x020000) != 0) { throw new Error("Assertion failed: record is deleted!"); } // Get the reference count reference_count = block.getInt(); // Get the total size of the blob size = block.getLong(); // Get the blob pointer blob_p = block.getLong(); } // Open an Area into the blob MutableArea blob_area = store.getMutableArea(blob_p); blob_area.getInt(); byte type = (byte) blob_area.getInt(); size = blob_area.getLong(); // Assert that the area being read is within the bounds of the blob if (offset < 0 || offset + length > size) { throw new IOException("Blob invalid write. offset = " + offset + ", length = " + length + ", size = " + size); } // Convert to the page number long page_number = (offset / (64 * 1024)); blob_area.position((int) ((page_number * 8) + 24)); long page_p = blob_area.getLong(); // Assert that 'page_p' is -1 if (page_p != -1) { // This means we are trying to rewrite a page we've already written // before. throw new RuntimeException("Assert failed: page_p is not -1"); } // Is the compression bit set? byte[] to_write; int write_length; if ((type & 0x010) != 0) { // Yes, compression Deflater deflater = new Deflater(); deflater.setInput(buf, 0, length); deflater.finish(); to_write = new byte[65 * 1024]; write_length = deflater.deflate(to_write); } else { // No compression to_write = buf; write_length = length; } try { store.lockForWrite(); // Allocate and write the page. AreaWriter page_area = store.createArea(write_length + 8); page_p = page_area.getID(); page_area.putInt(1); page_area.putInt(write_length); page_area.put(to_write, 0, write_length); // Finish this page page_area.finish(); // Update the page in the header. blob_area.position((int) ((page_number * 8) + 24)); blob_area.putLong(page_p); // Check out this change. blob_area.checkOut(); } finally { store.unlockForWrite(); } } /** * An InputStream implementation that reads from the underlying blob data as * fixed size pages. */ private class BLOBInputStream extends PagedInputStream { final static int B_SIZE = 64 * 1024; private long reference_id; public BLOBInputStream(final long reference_id, final long size) { super(B_SIZE, size); this.reference_id = reference_id; } public void readPageContent(byte[] buf, long pos, int length) throws IOException { readBlobByteArray(reference_id, pos, buf, 0, length); } } /** * An abstract implementation of a Ref object for referencing large objects * in this blob store. */ private class AbstractRef { /** * The reference identifier. This is a pointer into the fixed list * structure. */ protected final long reference_id; /** * The total size of the large object in bytes. */ protected final long size; /** * The type of large object. */ protected final byte type; /** * Set to true if this large object is open for writing, otherwise the * object is an immutable static object. */ private boolean open_for_write; /** * Constructs the Ref implementation. */ AbstractRef(long reference_id, byte type, long size, boolean open_for_write) { this.reference_id = reference_id; this.size = size; this.type = type; this.open_for_write = open_for_write; } /** * Asserts that this blob is open for writing. */ void assertIsOpen() { if (!open_for_write) { throw new Error("Large object ref is newly allocated."); } } public long getRawSize() { return size; } /** * Marks this large object as closed to write operations. */ void close() { open_for_write = false; } public int length() { return (int) size; } public long getID() { return reference_id; } public byte getType() { return type; } public void read(long offset, byte[] buf, int length) throws IOException { // Reads the section of the blob into the given buffer byte array at the // given offset of the blob. readBlobByteArray(reference_id, offset, buf, 0, length); } public void write(long offset, byte[] buf, int length) throws IOException { if (open_for_write) { writeBlobByteArray(reference_id, offset, buf, length); } else { throw new IOException("Blob is read-only."); } } public void complete() throws IOException { completeBlob(this); } } /** * An implementation of ClobRef used to represent a reference to a large * character object inside this blob store. */ private class ClobRefImpl extends AbstractRef implements ClobRef { /** * Constructs the ClobRef implementation. */ ClobRefImpl(long reference_id, byte type, long size, boolean open_for_write) { super(reference_id, type, size, open_for_write); } // ---------- Implemented from ClobRef ---------- public int length() { byte st_type = (byte) (type & 0x0F); if (st_type == 3) { return (int) size; } else if (st_type == 4) { return (int) (size / 2); } else { throw new RuntimeException("Unknown type."); } } public Reader getReader() { byte st_type = (byte) (type & 0x0F); if (st_type == 3) { return new AsciiReader(new BLOBInputStream(reference_id, size)); } else if (st_type == 4) { return new BinaryToUnicodeReader( new BLOBInputStream(reference_id, size)); } else { throw new RuntimeException("Unknown type."); } } public String toString() { final int BUF_SIZE = 8192; Reader r = getReader(); StringBuffer buf = new StringBuffer(length()); char[] c = new char[BUF_SIZE]; try { while(true) { int has_read = r.read(c, 0, BUF_SIZE); if (has_read == 0 || has_read == -1) { return new String(buf); } buf.append(c); } } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } } } /** * An implementation of BlobRef used to represent a blob reference inside this * blob store. */ private class BlobRefImpl extends AbstractRef implements BlobRef { /** * Constructs the BlobRef implementation. */ BlobRefImpl(long reference_id, byte type, long size, boolean open_for_write) { super(reference_id, type, size, open_for_write); } // ---------- Implemented from BlobRef ---------- public InputStream getInputStream() { return new BLOBInputStream(reference_id, size); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/BlobStoreInterface.java000066400000000000000000000037371330501023400263060ustar00rootroot00000000000000/** * com.mckoi.database.BlobStoreInterface 21 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; import com.mckoi.database.global.Ref; /** * A very restricted interface for accessing a blob store. This is used by a * MasterTableDataSource implementation to query and resolve blob information. * * @author Tobias Downer */ public interface BlobStoreInterface { /** * Given a large object reference identifier, generates a Ref implementation * that provides access to the information in the large object. The Ref * implementation returned by this object is a read-only static object. * This may return either a BlobRef or a ClobRef object depending on the * type of the object. */ Ref getLargeObject(long reference_id) throws IOException; /** * Tells the BlobStore that a static reference has been established in a * table to the blob referenced by the given id. This is used to count * references to a blob, and possibly clean up a blob if there are no * references remaining to it. */ void establishReference(long reference_id); /** * Tells the BlobStore that a static reference has been released to the * given blob. This would typically be called when the row in the database * is removed. */ void releaseReference(long reference_id); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Caster.java000066400000000000000000000521201330501023400240010ustar00rootroot00000000000000/** * com.mckoi.database.Caster 25 Oct 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.StringObject; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.global.SQLTypes; import com.mckoi.util.BigNumber; import java.lang.reflect.Constructor; /** * Methods to choose and perform casts from database type to Java types. * * @author Jim McBeath */ public class Caster { /** The cost to cast to the closest Java primitive type. */ public final static int PRIMITIVE_COST = 100; /** The cost to cast to the closes Java object type. */ public final static int OBJECT_COST = 200; /** The maximum positive byte value as a BigNumber. */ private final static BigNumber maxBigNumByte = BigNumber.fromInt(Byte.MAX_VALUE); /** The maximum positive byte value as a BigNumber. */ private final static BigNumber minBigNumByte = BigNumber.fromInt(Byte.MIN_VALUE); /** The maximum positive short value as a BigNumber. */ private final static BigNumber maxBigNumShort = BigNumber.fromInt(Short.MAX_VALUE); /** The maximum positive short value as a BigNumber. */ private final static BigNumber minBigNumShort = BigNumber.fromInt(Short.MIN_VALUE); /** The maximum positive integer value as a BigNumber. */ private final static BigNumber maxBigNumInt = BigNumber.fromInt(Integer.MAX_VALUE); /** The maximum positive integer value as a BigNumber. */ private final static BigNumber minBigNumInt = BigNumber.fromInt(Integer.MIN_VALUE); /** The maximum positive long value as a BigNumber. */ private final static BigNumber maxBigNumLong = BigNumber.fromLong(Long.MAX_VALUE); /** The maximum positive long value as a BigNumber. */ private final static BigNumber minBigNumLong = BigNumber.fromLong(Long.MIN_VALUE); /** The maximum positive float value as a BigNumber. */ private final static BigNumber maxBigNumFloat = BigNumber.fromDouble(Float.MAX_VALUE); /** The minimum positive float value as a BigNumber. */ private static BigNumber minBigNumFloat = BigNumber.fromDouble(Float.MIN_VALUE); /** The maximum positive double value as a BigNumber. */ private static BigNumber maxBigNumDouble = BigNumber.fromDouble(Double.MAX_VALUE); /** * Find any JAVA_OBJECTs in the args and deserialize them into * real Java objects. * * @param args The args to deserialize. Any JAVA_OBJECT args are * converted in-place to a new TObject with a value which is * the deserialized object. */ public static void deserializeJavaObjects(TObject[] args) { for (int i = 0; i < args.length; i++) { int sqlType = args[i].getTType().getSQLType(); if (sqlType != SQLTypes.JAVA_OBJECT) { continue; // not a JAVA_OBJECT } Object argVal = args[i].getObject(); if (!(argVal instanceof ByteLongObject)) { continue; // not ByteLongObject, we don't know how to deserialize } Object javaObj = ObjectTranslator.deserialize((ByteLongObject)argVal); args[i] = new TObject(args[i].getTType(), javaObj); } } /** * Search for the best constructor that we can use with the given * argument types. * * @param constructs The set of constructors from which to select. * @param argSqlTypes The SQL types of the database arguments to be passed * to the constructor. * @return The constructor with the lowest cost, or null if there * are no constructors that match the args. */ public static Constructor findBestConstructor( Constructor[] constructs, TObject[] args) { int bestCost = 0; // not used if bestConstructor is null Constructor bestConstructor = null; int[] argSqlTypes = getSqlTypes(args); for (int i = 0; i < constructs.length; ++i) { Class[] targets = constructs[i].getParameterTypes(); int cost = getCastingCost(args, argSqlTypes, targets); if (cost < 0) { continue; // not a usable constructor } if (bestConstructor == null || cost < bestCost) { bestCost = cost; // found a better one, remember it bestConstructor = constructs[i]; } } return bestConstructor; // null if we didn't find any } /** * Get the SQL types for the given database arguments. * * @param args The database args. * @return The SQL types of the args. */ public static int[] getSqlTypes(TObject[] args) { int[] sqlTypes = new int[args.length]; for (int i = 0; i < args.length; i++) { sqlTypes[i] = getSqlType(args[i]); } return sqlTypes; } /** * Get the SQL type for a database argument. * If the actual value does not fit into the declared type, the returned * type is widened as required for the value to fit. * * @param arg The database argument. * @return The SQL type of the arg. */ public static int getSqlType(TObject arg) { int sqlType = arg.getTType().getSQLType(); Object argVal = arg.getObject(); if (!(argVal instanceof BigNumber)) { return sqlType; // We have special checks only for numeric values } BigNumber b = (BigNumber)argVal; BigNumber bAbs; switch (sqlType) { case SQLTypes.NUMERIC: case SQLTypes.DECIMAL: // If the type is NUMERIC or DECIMAL, then look at the data value // to see if it can be narrowed to int, long or double. if (b.canBeRepresentedAsInt()) { sqlType = SQLTypes.INTEGER; } else if (b.canBeRepresentedAsLong()) { sqlType = SQLTypes.BIGINT; } else { bAbs = b.abs(); if (b.getScale() == 0) { if (bAbs.compareTo(maxBigNumInt) <= 0) { sqlType = SQLTypes.INTEGER; } else if (bAbs.compareTo(maxBigNumLong) <= 0) { sqlType = SQLTypes.BIGINT; } } else if (bAbs.compareTo(maxBigNumDouble) <= 0) { sqlType = SQLTypes.DOUBLE; } } // If we can't translate NUMERIC or DECIMAL to int, long or double, // then leave it as is. break; case SQLTypes.BIT: if (b.canBeRepresentedAsInt()) { int n = b.intValue(); if (n == 0 || n == 1) { return sqlType; // Allowable BIT value } } // The value does not fit in a BIT, move up to a TINYINT sqlType = SQLTypes.TINYINT; // FALL THROUGH case SQLTypes.TINYINT: if (b.compareTo(maxBigNumByte) <= 0 && b.compareTo(minBigNumByte) >=0 ) { return sqlType; // Fits in a TINYINT } // The value does not fit in a TINYINT, move up to a SMALLINT sqlType = SQLTypes.SMALLINT; // FALL THROUGH case SQLTypes.SMALLINT: if (b.compareTo(maxBigNumShort) <= 0 && b.compareTo(minBigNumShort) >= 0) { return sqlType; // Fits in a SMALLINT } // The value does not fit in a SMALLINT, move up to a INTEGER sqlType = SQLTypes.INTEGER; // FALL THROUGH case SQLTypes.INTEGER: if (b.compareTo(maxBigNumInt) <= 0 && b.compareTo(minBigNumInt) >= 0) { return sqlType; // Fits in a INTEGER } // The value does not fit in a INTEGER, move up to a BIGINT sqlType = SQLTypes.BIGINT; // That's as far as we go break; case SQLTypes.REAL: bAbs = b.abs(); if (bAbs.compareTo(maxBigNumFloat) <= 0 && (bAbs.compareTo(minBigNumFloat) >= 0 || b.doubleValue() == 0.0)) { return sqlType; // Fits in a REAL } // The value does not fit in a REAL, move up to a DOUBLE sqlType = SQLTypes.DOUBLE; break; default: break; } return sqlType; } /** * Get a string giving the database types of all of the arguments. * Useful for error messages. * * @param args The arguments. * @return A string with the types of all of the arguments, * using comma as a separator. */ public static String getArgTypesString(TObject[] args) { StringBuffer sb = new StringBuffer(); for (int n = 0; n < args.length; n++) { if (n > 0) { sb.append(","); } if (args[n] == null) { sb.append("null"); } else { int sqlType = getSqlType(args[n]); String typeName; if (sqlType == SQLTypes.JAVA_OBJECT) { Object argObj = args[n].getObject(); if (argObj == null) { typeName = "null"; } else { typeName = argObj.getClass().getName(); } } else { typeName = DataTableColumnDef.sqlTypeToString(sqlType); } sb.append(typeName); } } return sb.toString(); } /** * Get the cost for casting the given arg types * to the desired target classes. * * @param args The database arguments from which we are casting. * @param argSqlTypes The SQL types of the args. * @param targets The java classes to which we are casting. * @return The cost of doing the cast for all arguments, * or -1 if the args can not be cast to the targets. */ static int getCastingCost(TObject[] args, int[] argSqlTypes, Class[] targets) { if (targets.length != argSqlTypes.length) { return -1; // wrong number of args } // Sum up the cost of converting each arg int totalCost = 0; for (int n = 0; n < argSqlTypes.length; ++n) { int argCost = getCastingCost(args[n], argSqlTypes[n], targets[n]); if (argCost < 0) { return -1; //can't cast this arg type } int positionalCost = argCost * n / 10000; //Add a little bit to disambiguate constructors based on //argument position. This gives preference to earlier //argument in cases where the cost of two sets of //targets for the same set of args would otherwise //be the same. totalCost += argCost + positionalCost; } return totalCost; } // These arrays are used in the getCastingCost method below. private static String[] bitPrims = { "boolean" }; private static Class[] bitClasses = { Boolean.class }; private static String[] tinyPrims = { "byte", "short", "int", "long" }; private static Class[] tinyClasses = { Byte.class, Short.class, Integer.class, Long.class, Number.class }; private static String[] smallPrims = { "short", "int", "long" }; private static Class[] smallClasses = { Short.class, Integer.class, Long.class, Number.class }; private static String[] intPrims = { "int", "long" }; private static Class[] intClasses = { Integer.class, Long.class, Number.class }; private static String[] bigPrims = { "long" }; private static Class[] bigClasses = { Long.class, Number.class }; private static String[] floatPrims = { "float", "double" }; private static Class[] floatClasses = { Float.class, Double.class, Number.class }; private static String[] doublePrims = { "double" }; private static Class[] doubleClasses = { Double.class, Number.class }; private static String[] stringPrims = { }; private static Class[] stringClasses = { String.class }; private static String[] datePrims = { }; private static Class[] dateClasses = { java.sql.Date.class, java.util.Date.class }; private static String[] timePrims = { }; private static Class[] timeClasses = { java.sql.Time.class, java.util.Date.class }; private static String[] timestampPrims = { }; private static Class[] timestampClasses = { java.sql.Timestamp.class, java.util.Date.class }; /** * Get the cost to cast an SQL type to the desired target class. * The cost is 0 to cast to TObject, * 100 to cast to the closest primitive, * or 200 to cast to the closest Object, * plus 1 for each widening away from the closest. * * @param arg The argument to cast. * @param argSqlType The SQL type of the arg. * @param target The target to which to cast. * @return The cost to do the cast, or -1 if the cast can not be done. */ static int getCastingCost(TObject arg, int argSqlType, Class target) { //If the user has a method that takes a TObject, assume he can handle //anything. if (target == TObject.class) { return 0; } switch (argSqlType) { case SQLTypes.BIT: return getCastingCost(arg, bitPrims, bitClasses, target); case SQLTypes.TINYINT: return getCastingCost(arg, tinyPrims, tinyClasses, target); case SQLTypes.SMALLINT: return getCastingCost(arg, smallPrims, smallClasses, target); case SQLTypes.INTEGER: return getCastingCost(arg, intPrims, intClasses, target); case SQLTypes.BIGINT: return getCastingCost(arg, bigPrims, bigClasses, target); case SQLTypes.REAL: return getCastingCost(arg, floatPrims, floatClasses, target); case SQLTypes.FLOAT: case SQLTypes.DOUBLE: return getCastingCost(arg, doublePrims, doubleClasses, target); // We only get a NUMERIC or DECIMAL type here if we were not able to // convert it to int, long or double, so we can't handle it. For now we // require that these types be handled by a method that takes a TObject. // That gets checked at the top of this method, so if we get to here // the target is not a TOBject, so we don't know how to handle it. case SQLTypes.NUMERIC: case SQLTypes.DECIMAL: return -1; case SQLTypes.CHAR: case SQLTypes.VARCHAR: case SQLTypes.LONGVARCHAR: return getCastingCost(arg, stringPrims, stringClasses, target); case SQLTypes.DATE: return getCastingCost(arg, datePrims, dateClasses, target); case SQLTypes.TIME: return getCastingCost(arg, timePrims, timeClasses, target); case SQLTypes.TIMESTAMP: return getCastingCost(arg, timestampPrims, timestampClasses, target); case SQLTypes.BINARY: case SQLTypes.VARBINARY: case SQLTypes.LONGVARBINARY: return -1; // Can't handle these, user must use TObject // We can cast a JAVA_OBJECT only if the value is a subtype of the // target class. case SQLTypes.JAVA_OBJECT: Object argVal = arg.getObject(); if (argVal == null || target.isAssignableFrom(argVal.getClass())) { return OBJECT_COST; } return -1; // If the declared data type is NULL, then we have no type info to // determine how to cast it. case SQLTypes.NULL: return -1; default: return -1; // Don't know how to cast other types } } /** * Get the cost to cast to the specified target from the set of * allowable primitives and object classes. * * @param arg The value being cast. * @param prims The set of allowable Java primitive types to which we can * cast, ordered with the preferred types first. * If the value of the arg is null, it can not be cast to a * primitive type. * @param objects The set of allowable Java Object types to which we can * cast, ordered with the preferred types first. * @param target The target class to which we are casting. * @return The cost of the cast, or -1 if the cast is not allowed. */ static int getCastingCost(TObject arg, String[] prims, Class[] objects, Class target) { if (target.isPrimitive()) { Object argVal = arg.getObject(); // get the vaue of the arg if (argVal == null) { return -1; // can't cast null to a primitive } String targetName = target.getName(); // Look for the closest allowable primitive for (int i = 0; i < prims.length; i++) { if (targetName.equals(prims[i])) return PRIMITIVE_COST+i; // Cost of casting to a primitive plus the widening cost (i) } } else { // Look for the closest allowable object class for (int i = 0; i < objects.length; i++) { if (objects[i].isAssignableFrom(target)) return OBJECT_COST+i; // Cost of casting to an object class plus the widening cost (i) } } return -1; // can't cast it } /** * Cast the given arguments to the specified constructors parameter types. * The caller must already have checked to make sure the argument count * and types match the constructor. * * @param args The database arguments from which to cast. * @param constructor The constructor to which to cast. * @return The cast arguments. */ public static Object[] castArgsToConstructor( TObject[] args, Constructor constructor) { Class[] targets = constructor.getParameterTypes(); return castArgs(args, targets); } /** * Cast the given arguments to the specified classes. * The caller must already have checked to make sure the argument count * and types match the constructor. * * @param args The database arguments from which to cast. * @param targets The java classes to which to cast. * @return The cast arguments. */ static Object[] castArgs(TObject[] args, Class[] targets) { if (targets.length != args.length) { // we shouldn't get this error throw new RuntimeException("array length mismatch: arg="+args.length+ ", targets="+targets.length); } Object[] castedArgs = new Object[args.length]; for (int n = 0; n < args.length; ++n) { castedArgs[n] = castArg(args[n], targets[n]); } return castedArgs; } /** * Cast the object to the specified target. * * @param arg The database argumument from which to cast. * @param target The java class to which to cast. * @return The cast object. */ static Object castArg(TObject arg, Class target) { // By the time we get here, we have already run through the cost function // and eliminated the casts that don't work, including not allowing a null // value to be cast to a primitive type. if (target == TObject.class) { return arg; } Object argVal = arg.getObject(); if (argVal == null) { // If the arg is null, then we must be casting to an Object type, // so just return null. return null; } //boolean isPrimitive = target.isPrimitive(); String targetName = target.getName(); if (argVal instanceof Boolean) { //BIT if (targetName.equals("boolean") || Boolean.class.isAssignableFrom(target)) { return argVal; } } else if (argVal instanceof Number) { //TINYINT, SMALLINT, INTEGER, BIGINT, //REAL, FLOAT, DOUBLE, NUMERIC, DECIMAL Number num = (Number)argVal; if (targetName.equals("byte") || Byte.class.isAssignableFrom(target)) { return new Byte(num.byteValue()); } if (targetName.equals("short") || Short.class.isAssignableFrom(target)) { return new Short(num.shortValue()); } if (targetName.equals("int") || Integer.class.isAssignableFrom(target)) { return new Integer(num.intValue()); } if (targetName.equals("long") || Long.class.isAssignableFrom(target)) { return new Long(num.longValue()); } if (targetName.equals("float") || Float.class.isAssignableFrom(target)) { return new Float(num.floatValue()); } if (targetName.equals("double") || Double.class.isAssignableFrom(target)) { return new Float(num.doubleValue()); } } else if (argVal instanceof java.util.Date) { //DATE, TIME, TIMESTAMP java.util.Date date = (java.util.Date)argVal; if (java.sql.Date.class.isAssignableFrom(target)) { return new java.sql.Date(date.getTime()); } if (java.sql.Time.class.isAssignableFrom(target)) { return new java.sql.Time(date.getTime()); } if (java.sql.Timestamp.class.isAssignableFrom(target)) { return new java.sql.Timestamp(date.getTime()); } if (java.util.Date.class.isAssignableFrom(target)) { return date; } } else if (argVal instanceof String || argVal instanceof StringObject) { //CHAR, VARCHAR, LONGVARCHAR String s = argVal.toString(); if (String.class.isAssignableFrom(target)) { return s; } } else if (getSqlType(arg) == SQLTypes.JAVA_OBJECT) { // JAVA_OBJECT if (target.isAssignableFrom(argVal.getClass())) { return argVal; } } else { // BINARY, VARBINARY, LONGVARBINARY // NULL // We don't know how to handle any of these except as TObject } // Can't cast - we should not get here, since we checked for the // legality of the cast when calculating the cost. However, the // code to do the cost is not the same as the code to do the casting, // so we may have messed up in one or the other. throw new RuntimeException("Programming error: Can't cast from "+ argVal.getClass().getName() + " to " + target.getName()); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CellBufferInputStream.java000066400000000000000000000134721330501023400267740ustar00rootroot00000000000000/** * com.mckoi.database.CellBufferInputStream 12 Sep 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; //import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.io.DataInput; /** * This is a reusable cell stream object that is extended from the * ByteArrayInputStream class, which provides methods for reusing the object * on a different byte[] arrays. It is used as an efficient way of reading * cell information from a binary fixed length cell type file. *

* It would usually be wrapped in a DataInputStream object. *

* @author Tobias Downer */ final class CellBufferInputStream extends InputStream implements CellInput { // /** // * A wrapped DataInputStream over this stream. // */ // private DataInputStream wrapped_data; private byte buf[]; private int pos; private int mark = 0; private int count; /** * The Constructor. */ CellBufferInputStream() { this.buf = null; this.pos = 0; this.count = 0; } /** * Sets up the stream to the start of the underlying array. */ void setArray(byte[] new_buffer) { buf = new_buffer; pos = 0; mark = 0; count = new_buffer.length; } /** * Sets up the stream to the underlying array with the given variables. */ void setArray(byte[] new_buffer, int offset, int length) { buf = new_buffer; pos = offset; mark = 0; count = Math.min(new_buffer.length, length + offset); } /** * Sped up methods. */ public int read() { return buf[pos++] & 0x0FF; // return (pos < count) ? (buf[pos++] & 0xff) : -1; } public int read(byte b[], int off, int len) { // if (b == null) { // throw new NullPointerException(); // } else if ((off < 0) || (off > b.length) || (len < 0) || // ((off + len) > b.length) || ((off + len) < 0)) { // throw new IndexOutOfBoundsException(); // } if (pos >= count) { return -1; } if (pos + len > count) { len = count - pos; } if (len <= 0) { return 0; } System.arraycopy(buf, pos, b, off, len); pos += len; return len; } public long skip(long n) { if (pos + n > count) { n = count - pos; } if (n < 0) { return 0; } pos += n; return n; } public int available() { return count - pos; } public void mark(int readAheadLimit) { mark = pos; } public void reset() { pos = mark; } public void close() throws IOException { } // ---------- Implemented from DataInput ---------- public void readFully(byte[] b) throws IOException { read(b, 0, b.length); } public void readFully(byte b[], int off, int len) throws IOException { read(b, off, len); } public int skipBytes(int n) throws IOException { return (int) skip(n); } public boolean readBoolean() throws IOException { return (read() != 0); } public byte readByte() throws IOException { return (byte) read(); } public int readUnsignedByte() throws IOException { return read(); } public short readShort() throws IOException { int ch1 = read(); int ch2 = read(); return (short)((ch1 << 8) + (ch2 << 0)); } public int readUnsignedShort() throws IOException { int ch1 = read(); int ch2 = read(); return (ch1 << 8) + (ch2 << 0); } public char readChar() throws IOException { int ch1 = read(); int ch2 = read(); return (char)((ch1 << 8) + (ch2 << 0)); } private char[] char_buffer = new char[8192]; public String readChars(int length) throws IOException { if (length <= char_buffer.length) { for (int i = 0; i < length; ++i) { char_buffer[i] = (char) (((buf[pos++] & 0x0FF) << 8) + ((buf[pos++] & 0x0FF) << 0)); } return new String(char_buffer, 0, length); } else { StringBuffer chrs = new StringBuffer(length); for (int i = length; i > 0; --i) { chrs.append((char) (((buf[pos++] & 0x0FF) << 8) + ((buf[pos++] & 0x0FF) << 0))); } return new String(chrs); } } public int readInt() throws IOException { return ((buf[pos++] & 0x0FF) << 24) + ((buf[pos++] & 0x0FF) << 16) + ((buf[pos++] & 0x0FF) << 8) + ((buf[pos++] & 0x0FF) << 0); } public long readLong() throws IOException { return ((long)(readInt()) << 32) + (readInt() & 0xFFFFFFFFL); } public float readFloat() throws IOException { return Float.intBitsToFloat(readInt()); } public double readDouble() throws IOException { return Double.longBitsToDouble(readLong()); } public String readLine() throws IOException { throw new Error("Not implemented."); } public String readUTF() throws IOException { throw new Error("Not implemented."); } // /** // * Returns a wrapped DataInputStream for this stream. This is a // * convenience, but will improve on efficiency of a // * 'new DataInputStream(...)' type allocation. // */ // DataInputStream getDataInputStream() { // if (wrapped_data != null) { // return wrapped_data; // } // return wrapped_data = new DataInputStream(this); // } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CellBufferOutputStream.java000066400000000000000000000030311330501023400271630ustar00rootroot00000000000000/** * com.mckoi.database.CellBufferOutputStream 27 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; /** * This is a ByteArrayOutputStream that allows access to the underlying byte * array. It can be instantiated, and then used over and over as a temporary * buffer between the writeTo methods and the underlying random access file * stream. *

* @author Tobias Downer */ public final class CellBufferOutputStream extends ByteArrayOutputStream { /** * The Constructor. */ public CellBufferOutputStream(int length) { super(length); } /** * Returns the underlying stream you should not use the stream while you have * a handle on this reference. */ public byte[] getByteArray() { return buf; } /** * Sets the pointer to specified point in the array. */ public void seek(int pointer) { count = pointer; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CellInput.java000066400000000000000000000020261330501023400244570ustar00rootroot00000000000000/** * com.mckoi.database.CellInput 22 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; /** * * * @author Tobias Downer */ public interface CellInput extends DataInput { /** * Reads a string of chars from the input stream up to the given length * and returns it as a String object. */ String readChars(int length) throws IOException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CellInputStream.java000066400000000000000000000102031330501023400256270ustar00rootroot00000000000000/** * com.mckoi.database.CellInputStream 22 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; /** * An implementation of CellInput that reads data from an underlying stream. * * @author Tobias Downer */ final class CellInputStream implements CellInput { /** * The parent input stream. */ private InputStream parent_stream; /** * The Constructor. */ CellInputStream(InputStream parent_stream) { setParentStream(parent_stream); } /** * Sets the parent input stream for this stream. This allows us to * recycle this object. */ public void setParentStream(InputStream parent_stream) { this.parent_stream = parent_stream; } public int read() throws IOException { return parent_stream.read(); } public int read(byte b[], int off, int len) throws IOException { return parent_stream.read(b, off, len); } public long skip(long n) throws IOException { return parent_stream.skip(n); } public int available() throws IOException { return parent_stream.available(); } public void mark(int readAheadLimit) throws IOException { parent_stream.mark(readAheadLimit); } public void reset() throws IOException { parent_stream.reset(); } public void close() throws IOException { parent_stream.close(); } // ---------- Implemented from DataInput ---------- public void readFully(byte[] b) throws IOException { read(b, 0, b.length); } public void readFully(byte b[], int off, int len) throws IOException { read(b, off, len); } public int skipBytes(int n) throws IOException { return (int) skip(n); } public boolean readBoolean() throws IOException { return (read() != 0); } public byte readByte() throws IOException { return (byte) read(); } public int readUnsignedByte() throws IOException { return read(); } public short readShort() throws IOException { int ch1 = read(); int ch2 = read(); return (short)((ch1 << 8) + (ch2 << 0)); } public int readUnsignedShort() throws IOException { int ch1 = read(); int ch2 = read(); return (ch1 << 8) + (ch2 << 0); } public char readChar() throws IOException { int ch1 = read(); int ch2 = read(); return (char)((ch1 << 8) + (ch2 << 0)); } private char[] char_buffer; public String readChars(int length) throws IOException { if (length <= 8192) { if (char_buffer == null) { char_buffer = new char[8192]; } for (int i = 0; i < length; ++i) { char_buffer[i] = readChar(); } return new String(char_buffer, 0, length); } else { StringBuffer chrs = new StringBuffer(length); for (int i = length; i > 0; --i) { chrs.append(readChar()); } return new String(chrs); } } public int readInt() throws IOException { int ch1 = read(); int ch2 = read(); int ch3 = read(); int ch4 = read(); return (int)((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4 << 0)); } public long readLong() throws IOException { return ((long)(readInt()) << 32) + (readInt() & 0xFFFFFFFFL); } public float readFloat() throws IOException { return Float.intBitsToFloat(readInt()); } public double readDouble() throws IOException { return Double.longBitsToDouble(readLong()); } public String readLine() throws IOException { throw new Error("Not implemented."); } public String readUTF() throws IOException { throw new Error("Not implemented."); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CollatedBaseSearch.java000066400000000000000000000211621330501023400262320ustar00rootroot00000000000000/** * com.mckoi.database.CollatedBaseSearch 26 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; /** * An implementation of SelectableScheme that is based on some collated set of * data. This can be used to implement more advanced types of selectable * schemes based on presistant indexes (see InsertSearch). *

* The default implementation maintains no state, *

* Derived classes are required to implement 'copy', 'searchFirst' and * 'searchLast' methods. With these basic methods, a selectable scheme can * be generated provided the column is sorted in ascending order (value of row i * is <= value of row i+1). Overwrite 'firstInCollationOrder', * 'lastInCollationOrder' and 'addRangeToSet' methods for non sorted underlying * sets. *

* Assumptions - the underlying column is sorted low to high (value of row i * is <= value of row i+1). * * @author Tobias Downer */ public abstract class CollatedBaseSearch extends SelectableScheme { /** * The Constructor. */ public CollatedBaseSearch(TableDataSource table, int column) { super(table, column); } /** * This scheme doesn't take any notice of insertions or removals. */ public void insert(int row) { // Ignore insert (no state to maintain) if (isImmutable()) { throw new Error("Tried to change an immutable scheme."); } } /** * This scheme doesn't take any notice of insertions or removals. */ public void remove(int row) { // Ignore remove (no state to maintain) if (isImmutable()) { throw new Error("Tried to change an immutable scheme."); } } /** * Reads the entire state of the scheme from the input stream. * This is a trivial case for BlindSearch which doesn't require any * data to be stored. */ public void readFrom(InputStream in) throws IOException { } /** * Writes the entire state of the scheme to the output stream. * This is a trivial case for BlindSearch which doesn't require any * data to be stored. */ public void writeTo(OutputStream out) throws IOException { } /** * Disposes and invalidates the BlindSearch. */ public void dispose() { // Nothing to do! } // -------- Abstract or overwrittable methods ---------- /** * Finds the position in the collated set of the first value in the column * equal to the given value. If the value is not to be found in the column, * it returns -(insert_position + 1). */ protected abstract int searchFirst(TObject val); /** * Finds the position in the collated set of the last value in the column * equal to the given value. If the value is not to be found in the column, * it returns -(insert_position + 1). */ protected abstract int searchLast(TObject val); /** * The size of the set (the number of rows in this column). */ protected int setSize() { return getTable().getRowCount(); } /** * Returns the first value of this column (in collated order). For * example, if the column contains (1, 4, 8} then '1' is returned. */ protected TObject firstInCollationOrder() { return getCellContents(0); } /** * Returns the last value of this column (in collated order). For * example, if the column contains (1, 4, 8} then '8' is returned. */ protected TObject lastInCollationOrder() { return getCellContents(setSize() - 1); } /** * Adds the set indexes to the list that represent the range of values * between the start (inclusive) and end offset (inclusive) given. */ protected IntegerVector addRangeToSet(int start, int end, IntegerVector ivec) { if (ivec == null) { ivec = new IntegerVector((end - start) + 2); } for (int i = start; i <= end; ++i) { ivec.addInt(i); } return ivec; } // ---------- Range search methods ---------- public IntegerVector selectAll() { return addRangeToSet(0, setSize() - 1, null); } /** * Given a flag (FIRST_VALUE, LAST_VALUE, BEFORE_FIRST_VALUE or * AFTER_LAST_VALUE) and a value which is either a place marker (first, last * in set) or a TObject object, this will determine the position in this * set of the range point. For example, we may want to know the index of * the last instance of a particular number in a set of numbers which * would be 'positionOfRangePoint(SelectableRange.LAST_VALUE, * [number TObject])'. *

* Note how the position is determined if the value is not found in the set. */ private int positionOfRangePoint(byte flag, TObject val) { int p; TObject cell; switch(flag) { case(SelectableRange.FIRST_VALUE): if (val == SelectableRange.FIRST_IN_SET) { return 0; } if (val == SelectableRange.LAST_IN_SET) { // Get the last value and search for the first instance of it. cell = lastInCollationOrder(); } else { cell = val; } p = searchFirst(cell); // (If value not found) if (p < 0) { return -(p + 1); } return p; case(SelectableRange.LAST_VALUE): if (val == SelectableRange.LAST_IN_SET) { return setSize() - 1; } if (val == SelectableRange.FIRST_IN_SET) { // Get the first value. cell = firstInCollationOrder(); } else { cell = val; } p = searchLast(cell); // (If value not found) if (p < 0) { return -(p + 1) - 1; } return p; case(SelectableRange.BEFORE_FIRST_VALUE): if (val == SelectableRange.FIRST_IN_SET) { return -1; } if (val == SelectableRange.LAST_IN_SET) { // Get the last value and search for the first instance of it. cell = lastInCollationOrder(); } else { cell = val; } p = searchFirst(cell); // (If value not found) if (p < 0) { return -(p + 1) - 1; } return p - 1; case(SelectableRange.AFTER_LAST_VALUE): if (val == SelectableRange.LAST_IN_SET) { return setSize(); } if (val == SelectableRange.FIRST_IN_SET) { // Get the first value. cell = firstInCollationOrder(); } else { cell = val; } p = searchLast(cell); // (If value not found) if (p < 0) { return -(p + 1); } return p + 1; default: throw new Error("Unrecognised flag."); } } /** * Adds a range from this set to the given IntegerVector. IntegerVector may * be null if a list has not yet been allocated for the range. */ private IntegerVector addRange(SelectableRange range, IntegerVector ivec) { int r1, r2; // Select the range specified. byte start_flag = range.getStartFlag(); TObject start = range.getStart(); byte end_flag = range.getEndFlag(); TObject end = range.getEnd(); r1 = positionOfRangePoint(start_flag, start); r2 = positionOfRangePoint(end_flag, end); if (r2 < r1) { return ivec; } // Add the range to the set return addRangeToSet(r1, r2, ivec); } public IntegerVector selectRange(SelectableRange range) { // If no items in the set return an empty set if (setSize() == 0) { return new IntegerVector(0); } IntegerVector ivec = addRange(range, null); if (ivec == null) { return new IntegerVector(0); } return ivec; } public IntegerVector selectRange(SelectableRange[] ranges) { // If no items in the set return an empty set if (setSize() == 0) { return new IntegerVector(0); } IntegerVector ivec = null; for (int i = 0; i < ranges.length; ++i) { SelectableRange range = ranges[i]; ivec = addRange(range, ivec); } if (ivec == null) { return new IntegerVector(0); } return ivec; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CompositeTable.java000066400000000000000000000157311330501023400255010ustar00rootroot00000000000000/** * com.mckoi.database.CompositeTable 28 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * A composite of two or more datasets used to implement UNION, INTERSECTION, * and DIFFERENCE. * * @author Tobias Downer */ public class CompositeTable extends Table implements RootTable { // ---------- Statics ---------- /** * The composite function for finding the union of the tables. */ public static int UNION = 1; /** * The composite function for finding the interestion of the tables. */ public static int INTERSECT = 2; /** * The composite function for finding the difference of the tables. */ public static int EXCEPT = 3; // ---------- Members ---------- /** * The 'master table' used to resolve information about this table such as * fields and field types. */ private Table master_table; /** * The tables being made a composite of. */ private Table[] composite_tables; /** * The list of indexes of rows to include in each table. */ private IntegerVector[] table_indexes; /** * The schemes to describe the entity relation in the given column. */ private SelectableScheme[] column_scheme; /** * The number of root locks on this table. */ private int roots_locked; /** * Constructs the composite table given the 'master_table' (the field * structure this composite dataset is based on), and a list of tables to * be the composite of this table. The 'master_table' must be one of the * elements of the 'composite_list' array. *

* NOTE: This does not set up table indexes for a composite function. */ public CompositeTable(Table master_table, Table[] composite_list) { super(); this.master_table = master_table; this.composite_tables = composite_list; this.column_scheme = new SelectableScheme[master_table.getColumnCount()]; } /** * Consturcts the composite table assuming the first item in the list is the * master table. */ public CompositeTable(Table[] composite_list) { this(composite_list[0], composite_list); } /** * Removes duplicate rows from the table. If 'pre_sorted' is true then each * composite index is already in sorted order. */ private void removeDuplicates(boolean pre_sorted) { throw new Error("PENDING"); } /** * Sets up the indexes in this composite table by performing for composite * function on the tables. If the 'all' parameter is true then duplicate * rows are removed. */ public void setupIndexesForCompositeFunction(int function, boolean all) { int size = composite_tables.length; table_indexes = new IntegerVector[size]; if (function == UNION) { // Include all row sets in all tables for (int i = 0; i < size; ++i) { table_indexes[i] = composite_tables[i].selectAll(); } if (!all) { removeDuplicates(false); } } else { throw new Error("Unrecognised composite function"); } } // ---------- Implemented from Table ---------- public Database getDatabase() { return master_table.getDatabase(); } public int getColumnCount() { return master_table.getColumnCount(); } public int getRowCount() { int row_count = 0; for (int i = 0; i < table_indexes.length; ++i) { row_count += table_indexes[i].size(); } return row_count; } public int findFieldName(Variable v) { return master_table.findFieldName(v); } public DataTableDef getDataTableDef() { return master_table.getDataTableDef(); } public Variable getResolvedVariable(int column) { return master_table.getResolvedVariable(column); } SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { SelectableScheme scheme = column_scheme[column]; if (scheme == null) { scheme = new BlindSearch(this, column); column_scheme[column] = scheme; } // If we are getting a scheme for this table, simple return the information // from the column_trees Vector. if (table == this) { return scheme; } // Otherwise, get the scheme to calculate a subset of the given scheme. else { return scheme.getSubsetScheme(table, original_column); } } void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { if (ancestor != this) { throw new RuntimeException("Method routed to incorrect table ancestor."); } } RawTableInformation resolveToRawTable(RawTableInformation info) { System.err.println("Efficiency Warning in DataTable.resolveToRawTable."); IntegerVector row_set = new IntegerVector(); RowEnumeration e = rowEnumeration(); while (e.hasMoreRows()) { row_set.addInt(e.nextRowIndex()); } info.add(this, row_set); return info; } public TObject getCellContents(int column, int row) { for (int i = 0; i < table_indexes.length; ++i) { IntegerVector ivec = table_indexes[i]; int sz = ivec.size(); if (row < sz) { return composite_tables[i].getCellContents(column, ivec.intAt(row)); } else { row -= sz; } } throw new Error("Row '" + row + "' out of bounds."); } public RowEnumeration rowEnumeration() { return new SimpleRowEnumeration(getRowCount()); } void addDataTableListener(DataTableListener listener) { for (int i = 0; i < composite_tables.length; ++i) { composite_tables[i].addDataTableListener(listener); } } void removeDataTableListener(DataTableListener listener) { for (int i = 0; i < composite_tables.length; ++i) { composite_tables[i].removeDataTableListener(listener); } } public void lockRoot(int lock_key) { // For each table, recurse. roots_locked++; for (int i = 0; i < composite_tables.length; ++i) { composite_tables[i].lockRoot(lock_key); } } public void unlockRoot(int lock_key) { // For each table, recurse. roots_locked--; for (int i = 0; i < composite_tables.length; ++i) { composite_tables[i].unlockRoot(lock_key); } } public boolean hasRootsLocked() { return roots_locked != 0; } // ---------- Implemented from RootTable ---------- public boolean typeEquals(RootTable table) { return (this == table); // return true; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ConnectionTriggerManager.java000066400000000000000000000411041330501023400274760ustar00rootroot00000000000000/** * com.mckoi.database.ConnectionTriggerManager 13 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.ArrayList; import com.mckoi.debug.Lvl; import com.mckoi.util.IntegerVector; import com.mckoi.util.BigNumber; /** * A trigger manager on a DatabaseConnection that maintains a list of all * triggers set in the database, and the types of triggers they are. This * object is closely tied to a DatabaseConnection. *

* The trigger manager actually uses a trigger itself to maintain a list of * tables that have triggers, and the action to perform on the trigger. * * @author Tobias Downer */ public final class ConnectionTriggerManager { /** * The DatabaseConnection. */ private DatabaseConnection connection; /** * The list of triggers currently in view. * (TriggerInfo) */ private ArrayList triggers_active; /** * If this is false then the list is not validated and must be refreshed * when we next access trigger information. */ private boolean list_validated; /** * True if the trigger table was modified during the last transaction. */ private boolean trigger_modified; /** * Constructs the manager. */ ConnectionTriggerManager(DatabaseConnection connection) { this.connection = connection; this.triggers_active = new ArrayList(); this.list_validated = false; this.trigger_modified = false; // Attach a commit trigger listener connection.attachTableBackedCache(new CTMBackedCache()); } /** * Returns a Table object that contains the trigger information with the * given name. Returns an empty table if no trigger found. */ private Table findTrigger(QueryContext context, DataTable table, String schema, String name) { // Find all the trigger entries with this name Operator EQUALS = Operator.get("="); Variable schemav = table.getResolvedVariable(0); Variable namev = table.getResolvedVariable(1); Table t = table.simpleSelect(context, namev, EQUALS, new Expression(TObject.stringVal(name))); return t.exhaustiveSelect(context, Expression.simple( schemav, EQUALS, TObject.stringVal(schema))); } /** * Creates a new trigger action on a stored procedure and makes the change * to the transaction of this DatabaseConnection. If the connection is * committed then the trigger is made a perminant change to the database. * * @param schema the schema name of the trigger. * @param name the name of the trigger. * @param type the type of trigger. * @param procedure_name the name of the procedure to execute. * @param params any constant parameters for the triggering procedure. */ public void createTableTrigger(String schema, String name, int type, TableName on_table, String procedure_name, TObject[] params) throws DatabaseException { TableName trigger_table_name = new TableName(schema, name); // Check this name is not reserved DatabaseConnection.checkAllowCreate(trigger_table_name); // Before adding the trigger, make sure this name doesn't already resolve // to an object in the database with this schema/name. if (!connection.tableExists(trigger_table_name)) { // Encode the parameters ByteArrayOutputStream bout = new ByteArrayOutputStream(); try { ObjectOutputStream ob_out = new ObjectOutputStream(bout); ob_out.writeInt(1); // version ob_out.writeObject(params); ob_out.flush(); } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } byte[] encoded_params = bout.toByteArray(); // Insert the entry into the trigger table, DataTable table = connection.getTable(Database.SYS_DATA_TRIGGER); RowData row = new RowData(table); row.setColumnDataFromTObject(0, TObject.stringVal(schema)); row.setColumnDataFromTObject(1, TObject.stringVal(name)); row.setColumnDataFromTObject(2, TObject.intVal(type)); row.setColumnDataFromTObject(3, TObject.stringVal("T:" + on_table.toString())); row.setColumnDataFromTObject(4, TObject.stringVal(procedure_name)); row.setColumnDataFromTObject(5, TObject.objectVal(encoded_params)); row.setColumnDataFromTObject(6, TObject.stringVal(connection.getUser().getUserName())); table.add(row); // Invalidate the list invalidateTriggerList(); // Notify that this database object has been successfully created. connection.databaseObjectCreated(trigger_table_name); // Flag that this transaction modified the trigger table. trigger_modified = true; } else { throw new RuntimeException("Trigger name '" + schema + "." + name + "' already in use."); } } /** * Drops a trigger that has previously been defined. */ public void dropTrigger(String schema, String name) throws DatabaseException { QueryContext context = new DatabaseQueryContext(connection); DataTable table = connection.getTable(Database.SYS_DATA_TRIGGER); // Find the trigger Table t = findTrigger(context, table, schema, name); if (t.getRowCount() == 0) { throw new StatementException("Trigger '" + schema + "." + name + "' not found."); } else if (t.getRowCount() > 1) { throw new RuntimeException( "Assertion failed: multiple entries for the same trigger name."); } else { // Drop this trigger, table.delete(t); // Notify that this database object has been successfully dropped. connection.databaseObjectDropped(new TableName(schema, name)); // Flag that this transaction modified the trigger table. trigger_modified = true; } } /** * Returns true if the trigger exists, false otherwise. */ public boolean triggerExists(String schema, String name) { QueryContext context = new DatabaseQueryContext(connection); DataTable table = connection.getTable(Database.SYS_DATA_TRIGGER); // Find the trigger Table t = findTrigger(context, table, schema, name); if (t.getRowCount() == 0) { // Trigger wasn't found return false; } else if (t.getRowCount() > 1) { throw new RuntimeException( "Assertion failed: multiple entries for the same trigger name."); } else { // Trigger found return true; } } /** * Invalidates the trigger list causing the list to rebuild when a potential * triggering event next occurs. *

* NOTE: must only be called from the thread that owns the * DatabaseConnection. */ private void invalidateTriggerList() { list_validated = false; triggers_active.clear(); } /** * Build the trigger list if it is not validated. */ private void buildTriggerList() { if (!list_validated) { // Cache the trigger table DataTable table = connection.getTable(Database.SYS_DATA_TRIGGER); RowEnumeration e = table.rowEnumeration(); // For each row while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); TObject trig_schem = table.getCellContents(0, row_index); TObject trig_name = table.getCellContents(1, row_index); TObject type = table.getCellContents(2, row_index); TObject on_object = table.getCellContents(3, row_index); TObject action = table.getCellContents(4, row_index); TObject misc = table.getCellContents(5, row_index); TriggerInfo trigger_info = new TriggerInfo(); trigger_info.schema = trig_schem.getObject().toString(); trigger_info.name = trig_name.getObject().toString(); trigger_info.type = type.toBigNumber().intValue(); trigger_info.on_object = on_object.getObject().toString(); trigger_info.action = action.getObject().toString(); trigger_info.misc = misc; // Add to the list triggers_active.add(trigger_info); } list_validated = true; } } /** * Performs any trigger action for this event. For example, if we have it * setup so a trigger fires when there is an INSERT event on table x then * we perform the triggering procedure right here. */ void performTriggerAction(TableModificationEvent evt) { // REINFORCED NOTE: The 'tableExists' call is REALLY important. First it // makes sure the transaction on the connection is established (it should // be anyway if a trigger is firing), and it also makes sure the trigger // table exists - which it may not be during database init. if (connection.tableExists(Database.SYS_DATA_TRIGGER)) { // If the trigger list isn't built, then do so now buildTriggerList(); // On object value to test for, TableName table_name = evt.getTableName(); String on_ob_test = "T:" + table_name.toString(); // Search the triggers list for an event that matches this event int sz = triggers_active.size(); for (int i = 0; i < sz; ++i) { TriggerInfo t_info = (TriggerInfo) triggers_active.get(i); if (t_info.on_object.equals(on_ob_test)) { // Table name matches // Do the types match? eg. before/after match, and // insert/delete/update is being listened to. if (evt.listenedBy(t_info.type)) { // Type matches this trigger, so we need to fire it // Parse the action string String action = t_info.action; // Get the procedure name to fire (qualify it against the schema // of the table being fired). ProcedureName procedure_name = ProcedureName.qualify(table_name.getSchema(), action); // Set up OLD and NEW tables // Record the old table state DatabaseConnection.OldNewTableState current_state = connection.getOldNewTableState(); // Set the new table state // If an INSERT event then we setup NEW to be the row being inserted // If an DELETE event then we setup OLD to be the row being deleted // If an UPDATE event then we setup NEW to be the row after the // update, and OLD to be the row before the update. connection.setOldNewTableState( new DatabaseConnection.OldNewTableState(table_name, evt.getRowIndex(), evt.getRowData(), evt.isBefore())); try { // Invoke the procedure (no arguments) connection.getProcedureManager().invokeProcedure( procedure_name, new TObject[0]); } finally { // Reset the OLD and NEW tables to previous values connection.setOldNewTableState(current_state); } } } } // for each trigger } } /** * Returns an InternalTableInfo object used to model the list of triggers * that are accessible within the given Transaction object. This is used to * model all triggers that have been defined as tables. */ static InternalTableInfo createInternalTableInfo(Transaction transaction) { return new TriggerInternalTableInfo(transaction); } // ---------- Inner classes ---------- /** * A TableBackedCache that manages the list of connection level triggers that * are currently active on this connection. */ private class CTMBackedCache extends TableBackedCache { /** * Constructor. */ public CTMBackedCache() { super(Database.SYS_DATA_TRIGGER); } public void purgeCacheOfInvalidatedEntries( IntegerVector added_rows, IntegerVector removed_rows) { // Note that this is called when a transaction is started or stopped. // If the trigger table was modified, we need to invalidate the trigger // list. This covers the case when we rollback a trigger table change if (trigger_modified) { invalidateTriggerList(); trigger_modified = false; } // If any data has been committed removed then completely flush the // cache. else if ((removed_rows != null && removed_rows.size() > 0) || (added_rows != null && added_rows.size() > 0)) { invalidateTriggerList(); } } } /** * Container class for all trigger actions defined on the database. */ private class TriggerInfo { String schema; String name; int type; String on_object; String action; TObject misc; } /** * An object that models the list of triggers as table objects in a * transaction. */ private static class TriggerInternalTableInfo extends AbstractInternalTableInfo2 { TriggerInternalTableInfo(Transaction transaction) { super(transaction, Database.SYS_DATA_TRIGGER); } private static DataTableDef createDataTableDef(String schema, String name) { // Create the DataTableDef that describes this entry DataTableDef def = new DataTableDef(); def.setTableName(new TableName(schema, name)); // Add column definitions def.addColumn(DataTableColumnDef.createNumericColumn("type")); def.addColumn(DataTableColumnDef.createStringColumn("on_object")); def.addColumn(DataTableColumnDef.createStringColumn("procedure_name")); def.addColumn(DataTableColumnDef.createStringColumn("param_args")); def.addColumn(DataTableColumnDef.createStringColumn("owner")); // Set to immutable def.setImmutable(); // Return the data table def return def; } public String getTableType(int i) { return "TRIGGER"; } public DataTableDef getDataTableDef(int i) { TableName table_name = getTableName(i); return createDataTableDef(table_name.getSchema(), table_name.getName()); } public MutableTableDataSource createInternalTable(int index) { MutableTableDataSource table = transaction.getTable(Database.SYS_DATA_TRIGGER); RowEnumeration row_e = table.rowEnumeration(); int p = 0; int i; int row_i = -1; while (row_e.hasMoreRows()) { i = row_e.nextRowIndex(); if (p == index) { row_i = i; } else { ++p; } } if (p == index) { String schema = table.getCellContents(0, row_i).getObject().toString(); String name = table.getCellContents(1, row_i).getObject().toString(); final DataTableDef table_def = createDataTableDef(schema, name); final TObject type = table.getCellContents(2, row_i); final TObject on_object = table.getCellContents(3, row_i); final TObject procedure_name = table.getCellContents(4, row_i); final TObject param_args = table.getCellContents(5, row_i); final TObject owner = table.getCellContents(6, row_i); // Implementation of MutableTableDataSource that describes this // trigger. return new GTDataSource(transaction.getSystem()) { public DataTableDef getDataTableDef() { return table_def; } public int getRowCount() { return 1; } public TObject getCellContents(int col, int row) { switch (col) { case 0: return type; case 1: return on_object; case 2: return procedure_name; case 3: return param_args; case 4: return owner; default: throw new RuntimeException("Column out of bounds."); } } }; } else { throw new RuntimeException("Index out of bounds."); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ConvertUtils.java000066400000000000000000000143201330501023400252210ustar00rootroot00000000000000/** * com.mckoi.database.ConvertUtils 04 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.ArrayList; import com.mckoi.debug.DebugLogger; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.IntegerListInterface; /** * Various static database convertion tools for converting for upgrading * parts of the database. * * @author Tobias Downer */ class ConvertUtils { /** * Upgrades an .ijf index file to an .iid IndexStore. With version 0.92 * of the database we introduced a specialized scalable IndexStore for * storing all indexing information. *

* Returns an list of MasterTableJournal that contains any journal entries * that are pending to be made to the table. */ static ArrayList convertIndexFiles1(File original_ijf, IndexStore new_store, DataTableDef table_def, DebugLogger logger) throws IOException { int column_count = table_def.columnCount(); // Open the old ijf file FixedSizeDataStore ijf = new FixedSizeDataStore(original_ijf, -1, false, logger); ijf.open(false); int block_size = 1024; if (table_def.getTableName().getSchema().equals("SYS_INFO")) { block_size = 128; } // Create and initialize the new index store new_store.create(block_size); new_store.init(); new_store.addIndexLists(column_count + 1, (byte) 1); new_store.flush(); IndexSet index_set = new_store.getSnapshotIndexSet(); // Load the index header. int header_size = 8 + 4 + 4 + (column_count * 4); byte[] index_header_data = new byte[header_size]; byte[] reserved_header = new byte[64]; ijf.readReservedBuffer(reserved_header, 0, 64); // Get the current 'unique_id' value. long unique_id = ByteArrayUtil.getLong(reserved_header, 8); int cur_header_sector = ByteArrayUtil.getInt(reserved_header, 0); ijf.readAcross(cur_header_sector, index_header_data, 0, index_header_data.length); // 'index_header_data' will now contain the header format. // --- // Convert the master index first, // Where is the information in the header file? int mast_index_sector = ByteArrayUtil.getInt(index_header_data, 8); InputStream sin = ijf.getSectorInputStream(mast_index_sector); DataInputStream din = new DataInputStream(sin); int ver = din.readInt(); // The version. if (ver != 1) { throw new IOException("Unrecognised master index list version."); } // The master index is always at 0. IntegerListInterface master_index = index_set.getIndex(0); int entries_count = din.readInt(); int previous = -1; for (int i = 0; i < entries_count; ++i) { int entry = din.readInt(); if (entry == previous) { throw new IOException("Master index format corrupt - double entry."); } else if (entry < previous) { throw new IOException("Master index format corrupt - not sorted."); } master_index.add(entry); } // Close the stream din.close(); // --- // Any journal modifications // Where is the information in the header file? int journal_sector = ByteArrayUtil.getInt(index_header_data, 12); sin = ijf.getSectorInputStream(journal_sector); din = new DataInputStream(sin); ver = din.readInt(); // The version. if (ver != 1) { throw new Error("Unrecognised journals list version."); } ArrayList transaction_mod_list = new ArrayList(); int num_journals = din.readInt(); for (int i = 0; i < num_journals; ++i) { MasterTableJournal journal = new MasterTableJournal(); journal.readFrom(din); transaction_mod_list.add(journal); } // Close the stream din.close(); // --- // Convert the indices for each column // This is the new made up list of indices IntegerListInterface[] column_indices = new IntegerListInterface[column_count]; // For each column for (int column = 0; column < column_count; ++column) { // First check this is an indexable type. if (table_def.columnAt(column).isIndexableType()) { // Where is the information in the header file? int scheme_sector = ByteArrayUtil.getInt(index_header_data, 16 + (column * 4)); sin = ijf.getSectorInputStream(scheme_sector); din = new DataInputStream(sin); // Read the type of scheme for this column (1=Insert, 2=Blind). byte t = (byte) din.read(); if (t == 1) { // The index list for the given column IntegerListInterface col_index = index_set.getIndex(column + 1); column_indices[column] = col_index; // Read from the input and output to the list. int vec_size = din.readInt(); for (int i = 0; i < vec_size; ++i) { int row = din.readInt(); col_index.add(row); } } else { // Ignore otherwise } // Close the stream din.close(); } // If column is indexable } // for each column // --- // Commit the new index store changes new_store.commitIndexSet(index_set); // Dispose of the set index_set.dispose(); // Set the unique id new_store.setUniqueID(unique_id); // Flush the changes and synchronize with the file system. new_store.flush(); new_store.hardSynch(); // Close and delete the old ijf file ijf.close(); ijf.delete(); // Return the list of MasterTableJournal return transaction_mod_list; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/CorrelatedVariable.java000066400000000000000000000061601330501023400263150ustar00rootroot00000000000000/** * com.mckoi.database.CorrelatedVariable 08 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A wrapper for a variable in a sub-query that references a column outside * of the current query. A correlated variable differs from a regular * variable because its value is constant in an operation, but may vary over * future iterations of the operation. *

* This object is NOT immutable. * * @author Tobias Downer */ public class CorrelatedVariable implements Cloneable, java.io.Serializable { static final long serialVersionUID = -607848111230634419L; /** * The Variable reference itself. */ private Variable variable; /** * The number of sub-query branches back that the reference for this * variable can be found. */ private int query_level_offset; /** * The temporary value this variable has been set to evaluate to. */ private transient TObject eval_result; /** * Constructs the CorrelatedVariable. */ public CorrelatedVariable(Variable variable, int level_offset) { this.variable = variable; this.query_level_offset = level_offset; } /** * Returns the wrapped Variable. */ public Variable getVariable() { return variable; } /** * Returns the number of sub-query branches back that the reference for this * variable can be found. For example, if the correlated variable references * the direct descendant this will return 1. */ public int getQueryLevelOffset() { return query_level_offset; } /** * Sets the value this correlated variable evaluates to. */ public void setEvalResult(TObject ob) { this.eval_result = ob; } /** * Given a VariableResolver this will set the value of the correlated * variable. */ public void setFromResolver(VariableResolver resolver) { Variable v = getVariable(); setEvalResult(resolver.resolve(v)); } /** * Returns the value this correlated variable evaluates to. */ public TObject getEvalResult() { return eval_result; } /** * Returns the TType this correlated variable evaluates to. */ public TType returnTType() { return eval_result.getTType(); } /** * Clones the object. */ public Object clone() throws CloneNotSupportedException { CorrelatedVariable v = (CorrelatedVariable) super.clone(); v.variable = (Variable) variable.clone(); return v; } public String toString() { return "CORRELATED: " + getVariable() + " = " + getEvalResult(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataCellCache.java000066400000000000000000000264421330501023400251650ustar00rootroot00000000000000/** * com.mckoi.database.DataCellCache 21 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.Cache; import com.mckoi.debug.*; import java.util.HashMap; /** * This object represents a cache for accesses to the the data cells within * a Table. Whenever a column/row index to a cell is accessed, the cache is * first checked. If the cell is not in the cache then it may go ahead and * read the cell from the file. *

* ISSUE: We may need to keep track of memory used. Since a String may * use up much memory, we may need a cap on the maximum size the cache can grow * to. For example, we wouldn't want to cache a large document. This could * be handled at a higher level? * * @author Tobias Downer */ final class DataCellCache { /** * The TransactionSystem that this cache is from. */ private final TransactionSystem system; /** * The maximum size of a DataCell that is allowed to go in the cache. */ private int MAX_CELL_SIZE; /** * The master cache. */ private final DCCache cache; /** * The current size of the cache. */ private long current_cache_size; /** * The Constructors. * * @param max_cache_size the maximum size in bytes that the cache is allowed * to grow to (eg. 4000000). * @param max_cell_size the maximum size of an object that can be stored in * the cache. * @param hash_size the number of elements in the hash (should be a prime * number). */ DataCellCache(TransactionSystem system, int max_cache_size, int max_cell_size, int hash_size) { this.system = system; MAX_CELL_SIZE = max_cell_size; cache = new DCCache(hash_size, max_cache_size); } DataCellCache(TransactionSystem system, int max_cache_size, int max_cell_size) { this(system, max_cache_size, max_cell_size, 88547); // Good prime number hash size } /** * Dynamically resizes the data cell cache so it can store more/less data. * This is used to change cache dynamics at runtime. */ public synchronized void alterCacheDynamics( int max_cache_size, int max_cell_size) { MAX_CELL_SIZE = max_cell_size; cache.setCacheSize(max_cache_size); } /** * Inner class that creates an object that hashes nicely over the cache * source. */ private final static class DCCacheKey { final int row; final short column; final int table_id; DCCacheKey(final int table_id, final short column, final int row) { this.table_id = table_id; this.column = column; this.row = row; } public boolean equals(Object ob) { DCCacheKey dest_key = (DCCacheKey) ob; return row == dest_key.row && column == dest_key.column && table_id == dest_key.table_id; } public int hashCode() { // Yicks - this one is the best by far! return (((int) column + table_id + (row * 189977)) * 50021) << 4; } } /** * Returns an approximation of the amount of memory taken by a TObject. */ private static final int amountMemory(TObject cell) { return 16 + cell.approximateMemoryUse(); } /** * Puts a TObject on the cache for the given row/column of the table. * Ignores any cells that are larger than the maximum size. */ public synchronized void put(int table_key, int row, int column, TObject cell) { int memory_use = amountMemory(cell); if (memory_use <= MAX_CELL_SIZE) { // Generate the key DCCacheKey key = new DCCacheKey(table_key, (short) column, row); // If there is an existing object here, remove it from the cache and // update the current_cache_size. TObject removed_cell = (TObject) cache.remove(key); if (removed_cell != null) { current_cache_size -= amountMemory(removed_cell); } // Put the new entry in the cache cache.put(key, cell); current_cache_size += memory_use; } else { // If the object is larger than the minimum object size that can be // cached, remove any existing entry (possibly smaller) from the cache. remove(table_key, row, column); } } /** * Gets a TObject from the cache. If the row/column is not in the cache * then it returns null. */ public synchronized TObject get(int table_key, int row, int column) { return (TObject) cache.get(new DCCacheKey(table_key, (short) column, row)); } /** * Removes a TObject from the cache. This is used when we need to notify * the cache that an object has become outdated. This should be used when * the cell has been removed or changed. * Returns the cell that was removed, or null if there was no cell at the * given location. */ public synchronized TObject remove(int table_key, int row, int column) { TObject cell = (TObject) cache.remove( new DCCacheKey(table_key, (short) column, row)); if (cell != null) { current_cache_size -= amountMemory(cell); } return cell; } /** * Completely wipe the cache of all entries. */ public synchronized void wipe() { if (cache.nodeCount() == 0 && current_cache_size != 0) { system.Debug().write(Lvl.ERROR, this, "Assertion failed - if nodeCount = 0 then current_cache_size " + "must also be 0."); } if (cache.nodeCount() != 0) { cache.removeAll(); system.stats().increment("DataCellCache.total_cache_wipe"); } current_cache_size = 0; } /** * Returns an estimation of the current cache size in bytes. */ public synchronized long getCurrentCacheSize() { return current_cache_size; } /** * Reduce the cache size by the given amount. */ private void reduceCacheSize(long val) { current_cache_size -= val; } // ---------- Primes ---------- /** * Returns a prime number from PRIME_LIST that is the closest prime greater * or equal to the given value. */ static int closestPrime(int value) { for (int i = 0; i < PRIME_LIST.length; ++i) { if (PRIME_LIST[i] >= value) { return PRIME_LIST[i]; } } // Return the last prime return PRIME_LIST[PRIME_LIST.length - 1]; } /** * A list of primes ordered from lowest to highest. */ private final static int[] PRIME_LIST = new int[] { 3001, 4799, 13999, 15377, 21803, 24247, 35083, 40531, 43669, 44263, 47387, 50377, 57059, 57773, 59399, 59999, 75913, 96821, 140551, 149011, 175633, 176389, 183299, 205507, 209771, 223099, 240259, 258551, 263909, 270761, 274679, 286129, 290531, 296269, 298021, 300961, 306407, 327493, 338851, 351037, 365489, 366811, 376769, 385069, 410623, 430709, 433729, 434509, 441913, 458531, 464351, 470531, 475207, 479629, 501703, 510709, 516017, 522211, 528527, 536311, 539723, 557567, 593587, 596209, 597451, 608897, 611069, 642547, 670511, 677827, 679051, 688477, 696743, 717683, 745931, 757109, 760813, 763957, 766261, 781559, 785597, 788353, 804493, 813559, 836917, 854257, 859973, 883217, 884789, 891493, 902281, 910199, 915199, 930847, 939749, 940483, 958609, 963847, 974887, 983849, 984299, 996211, 999217, 1007519, 1013329, 1014287, 1032959, 1035829, 1043593, 1046459, 1076171, 1078109, 1081027, 1090303, 1095613, 1098847, 1114037, 1124429, 1125017, 1130191, 1159393, 1170311, 1180631, 1198609, 1200809, 1212943, 1213087, 1226581, 1232851, 1287109, 1289867, 1297123, 1304987, 1318661, 1331107, 1343161, 1345471, 1377793, 1385117, 1394681, 1410803, 1411987, 1445261, 1460497, 1463981, 1464391, 1481173, 1488943, 1491547, 1492807, 1528993, 1539961, 1545001, 1548247, 1549843, 1551001, 1553023, 1571417, 1579099, 1600259, 1606153, 1606541, 1639751, 1649587, 1657661, 1662653, 1667051, 1675273, 1678837, 1715537, 1718489, 1726343, 1746281, 1749107, 1775489, 1781881, 1800157, 1806859, 1809149, 1826753, 1834607, 1846561, 1849241, 1851991, 1855033, 1879931, 1891133, 1893737, 1899137, 1909513, 1916599, 1917749, 1918549, 1919347, 1925557, 1946489, 1961551, 1965389, 2011073, 2033077, 2039761, 2054047, 2060171, 2082503, 2084107, 2095099, 2096011, 2112193, 2125601, 2144977, 2150831, 2157401, 2170141, 2221829, 2233019, 2269027, 2270771, 2292449, 2299397, 2303867, 2309891, 2312407, 2344301, 2348573, 2377007, 2385113, 2386661, 2390051, 2395763, 2422999, 2448367, 2500529, 2508203, 2509841, 2513677, 2516197, 2518151, 2518177, 2542091, 2547469, 2549951, 2556991, 2563601, 2575543, 2597629, 2599577, 2612249, 2620003, 2626363, 2626781, 2636773, 2661557, 2674297, 2691571, 2718269, 2725691, 2729381, 2772199, 2774953, 2791363, 2792939, 2804293, 2843021, 2844911, 2851313, 2863519, 2880797, 2891821, 2897731, 2904887, 2910251, 2928943, 2958341, 2975389 }; // ---------- Inner classes ---------- /** * This extends the 'Cache' class. */ private final class DCCache extends Cache { /** * The maximum size that the cache can grow to in bytes. */ private int MAX_CACHE_SIZE; /** * The Constructor. */ public DCCache(int cache_hash_size, int max_cache_size) { super(cache_hash_size, -1, 20); this.MAX_CACHE_SIZE = max_cache_size; } /** * Used to dynamically alter the size of the cache. May cause a cache * clean if the size is over the limit. */ public void setCacheSize(int cache_size) { this.MAX_CACHE_SIZE = cache_size; checkClean(); } // ----- Overwritten from Cache ----- protected void checkClean() { if (getCurrentCacheSize() >= MAX_CACHE_SIZE) { // Update the current cache size (before we wiped). system.stats().set((int) getCurrentCacheSize(), "DataCellCache.current_cache_size"); clean(); // The number of times we've cleared away old data cell nodes. system.stats().increment("DataCellCache.cache_clean"); } } protected boolean shouldWipeMoreNodes() { return (getCurrentCacheSize() >= (int) ((MAX_CACHE_SIZE * 100L) / 115L)); } protected void notifyWipingNode(Object ob) { super.notifyWipingNode(ob); // Update our memory indicator accordingly. TObject cell = (TObject) ob; reduceCacheSize(amountMemory(cell)); } protected void notifyGetWalks(long total_walks, long total_get_ops) { int avg = (int) ((total_walks * 1000000L) / total_get_ops); system.stats().set(avg, "DataCellCache.avg_hash_get_mul_1000000"); system.stats().set((int) getCurrentCacheSize(), "DataCellCache.current_cache_size"); system.stats().set(nodeCount(), "DataCellCache.current_node_count"); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataCellSerialization.java000066400000000000000000000336031330501023400267740ustar00rootroot00000000000000/** * com.mckoi.database.DataCellSerialization 07 Dec 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.*; import com.mckoi.util.BigNumber; import java.util.zip.*; import java.util.Date; import java.math.*; import java.io.*; /** * An object that manages the serialization and deserialization of objects * to the database file system. This object maintains a buffer that stores * intermediate serialization information as objects are written. * * @author Tobias Downer */ final class DataCellSerialization extends ByteArrayOutputStream implements CellInput { /** * A Deflater and Inflater used to compress and uncompress the size of data * fields put into the store. */ private Deflater deflater; private Inflater inflater; private byte[] compress_buf; private int compress_length; /** * If true, when writing out use the compressed form. */ private boolean use_compressed; /** * The type of object. */ private short type; /** * Set to true if null. */ private boolean is_null; /** * Constructor. */ DataCellSerialization() { super(1024); } /** * Returns the number of bytes to skip on the stream to go past the * next serialization. */ int skipSerialization(CellInput din) throws IOException { int len = din.readInt(); return len - 4; } /** * Reads input from the given CellInput object. */ Object readSerialization(CellInput din) throws IOException { count = 0; // Read the length first, int len = din.readInt(); short s = din.readShort(); type = (short) (s & 0x0FFF); is_null = (s & 0x02000) != 0; use_compressed = (s & 0x04000) != 0; // If we are compressed... if (use_compressed) { // Uncompress it, int uncompressed_len = din.readInt(); if (buf.length < uncompressed_len) { buf = new byte[uncompressed_len]; } // Write data to the compressed buffer compress_length = len - 4 - 2 - 4; if (compress_buf == null || compress_buf.length < compress_length) { compress_buf = new byte[compress_length]; } din.readFully(compress_buf, 0, compress_length); if (inflater == null) { inflater = new Inflater(); } inflater.reset(); inflater.setInput(compress_buf, 0, compress_length); int inflate_count; try { inflate_count = inflater.inflate(buf, 0, uncompressed_len); } catch (DataFormatException e) { throw new RuntimeException(e.getMessage()); } din = this; } return readFromCellInput(din); } /** * Creates a BigNumber object used to store a numeric value in the database. */ private BigNumber createBigNumber(byte[] buf, int scale, byte state) { // Otherwise generate the number from the data given. return BigNumber.fromData(buf, scale, state); } /** * Reads an object from the given CellInput. No type information is included * with the returned object so it must be wrapped in a TObject. Returns * null if the object stored was null. */ private Object readFromCellInput(CellInput din) throws IOException { // If null byte is 1 then return null data cell. if (is_null) { return null; } else { // This type isn't actually serialized anymore, but we must understand // how to deserialize it because of older database formats. if (type == Types.DB_NUMERIC) { int scale = din.readShort(); int num_len = din.readInt(); byte[] buf = new byte[num_len]; din.readFully(buf, 0, num_len); return createBigNumber(buf, scale, (byte) 0); } else if (type == Types.DB_NUMERIC_EXTENDED) { byte state = din.readByte(); int scale = din.readShort(); int num_len = din.readInt(); byte[] buf = new byte[num_len]; din.readFully(buf, 0, num_len); return createBigNumber(buf, scale, state); } else if (type == Types.DB_STRING) { int str_length = din.readInt(); // No length string is a static to save memory. if (str_length == 0) { return ""; } String dastr = din.readChars(str_length); // NOTE: We intern the string to save memory. return dastr.intern(); } else if (type == Types.DB_BOOLEAN) { if (din.readByte() == 0) { return Boolean.FALSE; } else { return Boolean.TRUE; } } else if (type == Types.DB_TIME) { return new java.util.Date(din.readLong()); } else if (type == Types.DB_BLOB) { int blob_length = din.readInt(); // Intern to save memory if (blob_length == 0) { return EMPTY_BYTE_LONG_OBJECT; } byte[] buf = new byte[blob_length]; din.readFully(buf, 0, blob_length); return new ByteLongObject(buf); } else if (type == Types.DB_OBJECT) { int blob_length = din.readInt(); byte[] buf = new byte[blob_length]; din.readFully(buf, 0, blob_length); return new ByteLongObject(buf); } else { throw new Error("Don't understand type: " + type); } } } /** * Writes the current serialized data buffer to the output stream. */ void writeSerialization(DataOutputStream out) throws IOException { int len = use_compressed ? (compress_length + 4) : count; // size + (type | null | compressed) len += 4 + 2; out.writeInt(len); short s = type; if (is_null) { s |= 0x02000; } if (use_compressed) { s |= 0x04000; } out.writeShort(s); // Write out the data. if (use_compressed) { // If compressed, must write out uncompressed size first. out.writeInt(count); out.write(compress_buf, 0, compress_length); } else { out.write(buf, 0, count); } // And that's it! } /** * Sets this up with a TObject to serialize. */ void setToSerialize(TObject cell) throws IOException { is_null = false; count = 0; use_compressed = false; TType ttype = cell.getTType(); if (ttype instanceof TStringType) { type = Types.DB_STRING; } else if (ttype instanceof TNumericType) { // NOTE: We set type to DB_NUMERIC_EXTENDED which includes support for // NaN, negative infinity and positive infinity. type = Types.DB_NUMERIC_EXTENDED; } else if (ttype instanceof TBooleanType) { type = Types.DB_BOOLEAN; } else if (ttype instanceof TDateType) { type = Types.DB_TIME; } else if (ttype instanceof TBinaryType) { type = Types.DB_BLOB; } else if (ttype instanceof TJavaObjectType) { type = Types.DB_OBJECT; } else { throw new Error("Couldn't handle type: " + ttype.getClass()); } if (cell.isNull()) { is_null = true; return; } Object ob = cell.getObject(); // Write the serialized form to the buffer, writeToBuffer(cell); // Should we compress? // If it's a string, blob or serialized object, we may want to compress it, TType type = cell.getTType(); if (type instanceof TStringType || type instanceof TBinaryType || type instanceof TJavaObjectType) { int length = count; // Any strings > 150 are compressed if (length > 150) { if (deflater == null) { deflater = new Deflater(); } deflater.setInput(buf, 0, length); deflater.finish(); if (compress_buf == null || compress_buf.length < length) { compress_buf = new byte[length]; } compress_length = deflater.deflate(compress_buf); deflater.reset(); if (compress_length < length) { use_compressed = true; } } } } /** * Writes the TObject to the data buffer in this object. */ private void writeToBuffer(TObject cell) throws IOException { Object ob = cell.getObject(); if (ob instanceof BigNumber) { BigNumber ddc = (BigNumber) ob; byte[] buf = ddc.toByteArray(); writeByte(ddc.getState()); writeShort((short) ddc.getScale()); writeInt(buf.length); write(buf); } else if (ob instanceof String) { String str = (String) ob; writeInt(str.length()); writeChars(str); } else if (ob instanceof Boolean) { Boolean bool = (Boolean) ob; writeByte((byte) (bool.booleanValue() ? 1 : 0)); } else if (ob instanceof java.util.Date) { Date date = (Date) ob; writeLong(date.getTime()); } else if (ob instanceof ByteLongObject) { ByteLongObject blob = (ByteLongObject) ob; writeInt(blob.length()); write(blob.getByteArray()); } else { throw new Error("Don't know how to serialize class " + ob.getClass()); } } public final void writeBoolean(boolean v) throws IOException { write(v ? 1 : 0); } public final void writeByte(int v) throws IOException { write(v); } public final void writeShort(int v) throws IOException { write((v >>> 8) & 0xFF); write((v >>> 0) & 0xFF); } public final void writeChar(int v) throws IOException { write((v >>> 8) & 0xFF); write((v >>> 0) & 0xFF); } public final void writeInt(int v) throws IOException { write((v >>> 24) & 0xFF); write((v >>> 16) & 0xFF); write((v >>> 8) & 0xFF); write((v >>> 0) & 0xFF); } public final void writeLong(long v) throws IOException { write((int)(v >>> 56) & 0xFF); write((int)(v >>> 48) & 0xFF); write((int)(v >>> 40) & 0xFF); write((int)(v >>> 32) & 0xFF); write((int)(v >>> 24) & 0xFF); write((int)(v >>> 16) & 0xFF); write((int)(v >>> 8) & 0xFF); write((int)(v >>> 0) & 0xFF); } public final void writeChars(String s) throws IOException { int len = s.length(); for (int i = 0 ; i < len ; ++i) { int v = s.charAt(i); write((v >>> 8) & 0xFF); write((v >>> 0) & 0xFF); } } // ---------- Implemented from CellInput ---------- public int read() throws IOException { return buf[count++] & 0x0FF; } public int read(byte b[], int off, int len) throws IOException { if (len <= 0) { return 0; } System.arraycopy(buf, count, b, off, len); count += len; return len; } public long skip(long n) throws IOException { if (n < 0) { return 0; } count += n; return n; } public int available() throws IOException { throw new Error("Not supported"); } public void mark(int readAheadLimit) throws IOException { throw new Error("Not supported"); } // [ Function clash here but it should be okay ] // public void reset() throws IOException { // throw new Error("Not supported"); // } public void close() throws IOException { throw new Error("Not supported"); } // ---------- Implemented from DataInput ---------- public void readFully(byte[] b) throws IOException { read(b, 0, b.length); } public void readFully(byte b[], int off, int len) throws IOException { read(b, off, len); } public int skipBytes(int n) throws IOException { return (int) skip(n); } public boolean readBoolean() throws IOException { return (read() != 0); } public byte readByte() throws IOException { return (byte) read(); } public int readUnsignedByte() throws IOException { return read(); } public short readShort() throws IOException { int ch1 = read(); int ch2 = read(); return (short)((ch1 << 8) + (ch2 << 0)); } public int readUnsignedShort() throws IOException { int ch1 = read(); int ch2 = read(); return (ch1 << 8) + (ch2 << 0); } public char readChar() throws IOException { int ch1 = read(); int ch2 = read(); return (char)((ch1 << 8) + (ch2 << 0)); } private char[] char_buffer; public String readChars(int length) throws IOException { if (length <= 8192) { if (char_buffer == null) { char_buffer = new char[8192]; } for (int i = 0; i < length; ++i) { char_buffer[i] = readChar(); } return new String(char_buffer, 0, length); } else { StringBuffer chrs = new StringBuffer(length); for (int i = length; i > 0; --i) { chrs.append(readChar()); } return new String(chrs); } } public int readInt() throws IOException { int ch1 = read(); int ch2 = read(); int ch3 = read(); int ch4 = read(); return (int)((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4 << 0)); } public long readLong() throws IOException { return ((long)(readInt()) << 32) + (readInt() & 0xFFFFFFFFL); } public float readFloat() throws IOException { return Float.intBitsToFloat(readInt()); } public double readDouble() throws IOException { return Double.longBitsToDouble(readLong()); } public String readLine() throws IOException { throw new Error("Not implemented."); } public String readUTF() throws IOException { throw new Error("Not implemented."); } // ---------- Some statics ----------- /** * A 0 size ByteLongObject object. */ private static final ByteLongObject EMPTY_BYTE_LONG_OBJECT = new ByteLongObject(new byte[0]); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataIndexDef.java000066400000000000000000000075471330501023400250550ustar00rootroot00000000000000/** * com.mckoi.database.DataIndexDef 07 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /** * Represents index meta-information on a table. This information is part of * DataIndexSetDef and is stored with the contents of a table. * * @author Tobias Downer */ public class DataIndexDef { /** * The name of this index. */ private String index_name; /** * The list of column name that this index represents. For example, if this * is a composite primary key, this would contain each column name in the * primary key. */ private String[] column_names; /** * Returns the index set pointer of this index. This value is used when * requesting the index from an IndexSet. */ private int index_pointer; /** * The type of Index this is. Currently only 'BLIST' is supported. */ private String index_type; /** * True if this index may only contain unique values. */ private boolean unique; /** * Constructor. */ public DataIndexDef(String index_name, String[] column_names, int index_pointer, String index_type, boolean unique) { this.index_name = index_name; this.column_names = (String[]) column_names.clone(); this.index_pointer = index_pointer; this.index_type = index_type; this.unique = unique; } public DataIndexDef(DataIndexDef def) { this(def.index_name, def.column_names, def.index_pointer, def.index_type, def.unique); } /** * Returns the name of this index. */ public String getName() { return index_name; } /** * Returns the column names that make up this index. */ public String[] getColumnNames() { return column_names; } /** * Returns the pointer to the index in the IndexSet. */ public int getPointer() { return index_pointer; } /** * Returns a String that describes the type of index this is. */ public String getType() { return index_type; } /** * Returns true if this is a unique index. */ public boolean isUniqueIndex() { return unique; } /** * Writes this object to the given DataOutputStream. */ public void write(DataOutput dout) throws IOException { dout.writeInt(1); dout.writeUTF(index_name); dout.writeInt(column_names.length); for (int i = 0; i < column_names.length; ++i) { dout.writeUTF(column_names[i]); } dout.writeInt(index_pointer); dout.writeUTF(index_type); dout.writeBoolean(unique); } /** * Reads a DataIndexDef from the given DataInput object. */ public static DataIndexDef read(DataInput din) throws IOException { int version = din.readInt(); if (version != 1) { throw new IOException("Don't understand version."); } String index_name = din.readUTF(); int sz = din.readInt(); String[] cols = new String[sz]; for (int i = 0; i < sz; ++i) { cols[i] = din.readUTF(); } int index_pointer = din.readInt(); String index_type = din.readUTF(); boolean unique = din.readBoolean(); return new DataIndexDef(index_name, cols, index_pointer, index_type, unique); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataIndexSetDef.java000066400000000000000000000134621330501023400255220ustar00rootroot00000000000000/** * com.mckoi.database.DataIndexSetDef 08 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; import java.io.DataInput; import java.io.DataOutput; import java.util.ArrayList; /** * Represents the meta-data for a set of indexes of a table. * * @author Tobias Downer */ public class DataIndexSetDef { /** * The TableName this index set meta data is for. */ private TableName table_name; /** * The list of indexes in the table. */ private ArrayList index_list; /** * True if this object is immutable. */ private boolean immutable; /** * Constructor. */ public DataIndexSetDef(TableName table_name) { this.table_name = table_name; index_list = new ArrayList(); immutable = false; } public DataIndexSetDef(DataIndexSetDef def) { this.table_name = def.table_name; index_list = new ArrayList(); for (int i = 0; i < def.indexCount(); ++i) { index_list.add(new DataIndexDef(def.indexAt(i))); } immutable = false; } /** * Sets the immutable flag. */ public void setImmutable() { this.immutable = true; } /** * Adds a DataIndexDef to this table. */ public void addDataIndexDef(DataIndexDef def) { if (!immutable) { index_list.add(def); } else { throw new RuntimeException("Tried to add index to immutable def."); } } /** * Removes a DataIndexDef to this table. */ public void removeDataIndexDef(int i) { if (!immutable) { index_list.remove(i); } else { throw new RuntimeException("Tried to add index to immutable def."); } } /** * Returns the total number of index in this table. */ public int indexCount() { return index_list.size(); } /** * Returns the DataIndexDef at the given index in this list. */ public DataIndexDef indexAt(int i) { return (DataIndexDef) index_list.get(i); } /** * Finds the index with the given name and returns the index in the list of * the index (confusing comment!). Returns -1 if the name wasn't found. */ public int findIndexWithName(String index_name) { int sz = indexCount(); for (int i = 0; i < sz; ++i) { if (indexAt(i).getName().equals(index_name)) { return i; } } return -1; } /** * Finds the first index for the given column name list. Returns -1 if an * index over the given composite columns was not found. */ public int findIndexForColumns(String[] cols) { int sz = indexCount(); for (int i = 0; i < sz; ++i) { String[] t_cols = indexAt(i).getColumnNames(); if (t_cols.length == cols.length) { boolean passed = true; for (int n = 0; n < t_cols.length && passed; ++n) { if (!t_cols[n].equals(cols[n])) { passed = false; } } if (passed) { return i; } } } return -1; } /** * Returns the DataIndexDef with the given name or null if it couldn't be * found. */ public DataIndexDef indexWithName(String index_name) { int i = findIndexWithName(index_name); if (i != -1) { return indexAt(i); } else { return null; } } /** * Attempts to resolve the given index name from the index in this table. * If 'ignore_case' is true, then we return the correct case of the index * name. */ public String resolveIndexName(String index_name, boolean ignore_case) throws DatabaseException { int sz = indexCount(); String found = null; for (int i = 0; i < sz; ++i) { boolean passed; String cur_index_name = indexAt(i).getName(); if (ignore_case) { passed = cur_index_name.equalsIgnoreCase(index_name); } else { passed = cur_index_name.equals(index_name); } if (passed) { if (found != null) { throw new DatabaseException("Ambigious index name '" + index_name + "'"); } found = cur_index_name; } } if (found == null) { throw new DatabaseException("Index '" + index_name + "' not found."); } return found; } /** * Writes this DataIndexSetDef object to the given DataOutput. */ public void write(DataOutput dout) throws IOException { dout.writeInt(1); dout.writeUTF(table_name.getSchema()); dout.writeUTF(table_name.getName()); dout.writeInt(index_list.size()); for (int i = 0; i < index_list.size(); ++i) { ((DataIndexDef) index_list.get(i)).write(dout); } } /** * Reads the DataIndexSetDef object from the given DataInput. */ public static DataIndexSetDef read(DataInput din) throws IOException { int version = din.readInt(); if (version != 1) { throw new IOException("Don't understand version."); } String schema = din.readUTF(); String name = din.readUTF(); int sz = din.readInt(); DataIndexSetDef index_set = new DataIndexSetDef(new TableName(schema, name)); for (int i = 0; i < sz; ++i) { index_set.addDataIndexDef(DataIndexDef.read(din)); } return index_set; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataTable.java000066400000000000000000000576731330501023400244230ustar00rootroot00000000000000/** * com.mckoi.database.DataTable 08 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.SQLTypes; import com.mckoi.util.IntegerVector; import com.mckoi.debug.*; import java.math.BigDecimal; import java.util.Vector; import java.util.ArrayList; import java.io.IOException; import java.io.OutputStream; import java.io.InputStream; import java.io.File; /** * DataTable is a wrapper for a MutableTableDataSource that fits into the * query hierarchy level. A DataTable represents a table within a * transaction. Adding, removing rows to a DataTable will change the * contents only with the context of the transaction the table was created in. *

* @author Tobias Downer */ public final class DataTable extends DefaultDataTable { /** * The DatabaseConnection object that is the parent of this DataTable. */ private DatabaseConnection connection; /** * A low level access to the underlying transactional data source. */ private MutableTableDataSource data_source; /** * ------ * NOTE: Following values are only kept for lock debugging reasons. These * is no technical reason why they shouldn't be removed. They allow us * to check that a data table is locked correctly when accesses are * performed on it. * ------ */ final static boolean LOCK_DEBUG = true; /** * The number of read locks we have on this table. */ private int debug_read_lock_count = 0; /** * The number of write locks we have on this table (this should only ever be * 0 or 1). */ private int debug_write_lock_count = 0; /** * Cosntructs the data table. */ DataTable(DatabaseConnection connection, MutableTableDataSource data_source) throws DatabaseException { super(connection.getDatabase()); this.connection = connection; this.data_source = data_source; } /** * Convenience - used to log debug messages. */ public final DebugLogger Debug() { return connection.getSystem().Debug(); } /** * Overwritten from DefaultDataTable to do nothing. All selectable * schemes are handled within the DataTableManager now. */ protected void blankSelectableSchemes(int type) { } /** * Returns the SelectableScheme for the given column. * (Overridden from DefaultDataTable). If the schemes are not in memory then * they are loaded now. This will synchronize over the 'table_manager' * which will effectively block this table at the lowest layer until the * indices are loaded into memory. */ protected SelectableScheme getRootColumnScheme(int column) { checkReadLock(); // Read op return data_source.getColumnScheme(column); } /** * We can declare a DataTable as a new type. This means, instead of * referencing a column as 'Customer.CustomerID' we can change the 'Customer' * part to anything we wish such as 'C1'. */ public ReferenceTable declareAs(TableName new_name) { return new ReferenceTable(this, new_name); } /** * Generates an empty RowData object for 'addRow'ing into the Table. * We must first call this method to retrieve a blank RowData object, * fill it in with the required information, and then call 'addRow' */ public final RowData createRowDataObject(QueryContext context) { checkSafeOperation(); // safe op return new RowData(this); } /** * Returns the current row count. This queries the DataTableManager for * the real value. */ public int getRowCount() { checkReadLock(); // read op return data_source.getRowCount(); } /** * Adds a given 'RowData' object to the table. This should be used for * any rows added to the table. The order that rows are added into a table * is not important. *

* This method performs some checking of the cells in the table. It first * checks that all columns declared as 'not null' have a value that is not * null. It then checks that a the added row will not cause any duplicates * in a column declared as unique. *

* It then uses the low level io manager to store the data. *

* SYNCHRONIZATION ISSUE: We are assuming this is running in a synchronized * environment that is unable to add or alter rows in this object within * the lifetime of this method. */ public final void add(RowData row_data) throws DatabaseException { checkReadWriteLock(); // write op if (!row_data.isSameTable(this)) { throw new DatabaseException( "Internal Error: Using RowData from different table"); } // Checks passed, so add to table. addRow(row_data); // Perform a referential integrity check on any changes to the table. data_source.constraintIntegrityCheck(); } /** * Adds an array of 'RowData' objects to the table. This should be used for * adding a group of rows to the table. The order that rows are added into * a table is not important. *

* This method performs some checking of the cells in the table. It first * checks that all columns declared as 'not null' have a value that is not * null. It then checks that a the added row will not cause any duplicates * in a column declared as unique. *

* It then uses the low level io manager to store the data. *

* SYNCHRONIZATION ISSUE: We are assuming this is running in a synchronized * environment that is unable to add or alter rows in this object within * the lifetime of this method. */ public final void add(RowData[] row_data_arr) throws DatabaseException { checkReadWriteLock(); // write op for (int i = 0; i < row_data_arr.length; ++i) { RowData row_data = row_data_arr[i]; if (!row_data.isSameTable(this)) { throw new DatabaseException( "Internal Error: Using RowData from different table"); } addRow(row_data); } // Perform a referential integrity check on any changes to the table. data_source.constraintIntegrityCheck(); } /** * Adds a new row of data to the table. First of all, this tells the * underlying database mechanism to add the data to this table. It then * add the row information to each SelectableScheme. */ private void addRow(RowData row) throws DatabaseException { // This table name (for event notification) TableName table_name = getTableName(); // Fire the 'before' trigger for an insert on this table connection.fireTableEvent(new TableModificationEvent(connection, table_name, row, true)); // Add the row to the underlying file system int row_number = data_source.addRow(row); // Fire the 'after' trigger for an insert on this table connection.fireTableEvent(new TableModificationEvent(connection, table_name, row, false)); // NOTE: currently nothing being done with 'row_number' after it's added. // The underlying table data source manages the row index. } /** * Removes the given row from the table. This is called just before the * row is actually deleted. The method is provided to allow for some * maintenance of any search structures such as B-Trees. This is called * from the 'delete' method in Table. */ private void removeRow(int row_number) throws DatabaseException { // This table name (for event notification) TableName table_name = getTableName(); // Fire the 'before' trigger for the delete on this table connection.fireTableEvent(new TableModificationEvent(connection, table_name, row_number, true)); // Delete the row from the underlying database data_source.removeRow(row_number); // Fire the 'after' trigger for the delete on this table connection.fireTableEvent(new TableModificationEvent(connection, table_name, row_number, false)); } /** * Updates the given row with the given data in this table. This method * will likely add the modified data to a new row and delete the old * version of the row. */ private void updateRow(int row_number, RowData row) throws DatabaseException { // This table name (for event notification) TableName table_name = getTableName(); // Fire the 'before' trigger for the update on this table connection.fireTableEvent( new TableModificationEvent(connection, table_name, row_number, row, true)); // Update the row in the underlying database data_source.updateRow(row_number, row); // Fire the 'after' trigger for the update on this table connection.fireTableEvent( new TableModificationEvent(connection, table_name, row_number, row, false)); } /** * This is the public method for removing a given result set from this * table. Given a Table object, this will remove from this table any row * that are in the given table. The given Table must have this object as * its distant ancestor. If it does not then it will throw an exception. * Examples: table.delete(table) -- delete the entire table. * table.delete(table.select( < some condition > )); * It returns the number of rows that were deleted. *

* INTERNAL NOTE: The 'table' parameter may be the result * of joins. This may cause the same row in this table to be referenced * more than once. We must make sure that we delete any given row only * once by using the 'distinct' function. *

* 'limit' dictates how many rows will be deleted. If 'limit' is less than * 0 then this indicates there is no limit. Keep in mind that rows are * picked out from top to bottom in the 'table' object. Normally the * input table will be the result of an un-ordered 'where' clause so using * a limit does not permit deletes in a deterministic manner. *

* ASSUMPTION: There are no duplicate rows in the input set. */ public int delete(Table table, int limit) throws DatabaseException { checkReadWriteLock(); // write op IntegerVector row_set = new IntegerVector(table.getRowCount()); RowEnumeration e = table.rowEnumeration(); while (e.hasMoreRows()) { row_set.addInt(e.nextRowIndex()); } e = null; // HACKY: Find the first column of this table in the search table. This // will allow us to generate a row set of only the rows in the search // table. int first_column = table.findFieldName(getResolvedVariable(0)); if (first_column == -1) { throw new DatabaseException("Search table does not contain any " + "reference to table being deleted from"); } // Generate a row set that is in this tables domain. table.setToRowTableDomain(first_column, row_set, this); // row_set may contain duplicate row indices, therefore we must sort so // any duplicates are grouped and therefore easier to find. row_set.quickSort(); // If limit less than zero then limit is whole set. if (limit < 0) { limit = Integer.MAX_VALUE; } // Remove each row in row set in turn. Make sure we don't remove the // same row index twice. int len = Math.min(row_set.size(), limit); int last_removed = -1; int remove_count = 0; for (int i = 0; i < len; ++i) { int to_remove = row_set.intAt(i); if (to_remove < last_removed) { throw new DatabaseException( "Internal error: row sorting error or row_set not in the range > 0"); } if (to_remove != last_removed) { removeRow(to_remove); last_removed = to_remove; ++remove_count; } } if (remove_count > 0) { // Perform a referential integrity check on any changes to the table. data_source.constraintIntegrityCheck(); } return remove_count; } // Unlimited delete public int delete(Table table) throws DatabaseException { return delete(table, -1); } /** * Updates the table by applying the assignment operations over each row * that is found in the input 'table' set. The input table must be a direct * child of this DataTable. *

* This operation assumes that there is a WRITE lock on this table. A * WRITE lock means no other thread may access this table while the * operation is being performed. (However, a result set may still be * downloading from this table). *

* 'limit' dictates how many rows will be updated. If 'limit' is less than * 0 then this indicates there is no limit. Keep in mind that rows are * picked out from top to bottom in the 'table' object. Normally the * input table will be the result of an un-ordered 'where' clause so using * a limit does not permit updates in a deterministic manner. *

* Returns the number of rows updated in this table. *

* NOTE: We assume there are no duplicate rows to the root set from the * given 'table'. */ public final int update(QueryContext context, Table table, Assignment[] assign_list, int limit) throws DatabaseException { checkReadWriteLock(); // write op // Get the rows from the input table. IntegerVector row_set = new IntegerVector(); RowEnumeration e = table.rowEnumeration(); while (e.hasMoreRows()) { row_set.addInt(e.nextRowIndex()); } e = null; // HACKY: Find the first column of this table in the search table. This // will allow us to generate a row set of only the rows in the search // table. int first_column = table.findFieldName(getResolvedVariable(0)); if (first_column == -1) { throw new DatabaseException("Search table does not contain any " + "reference to table being updated from"); } // Convert the row_set to this table's domain. table.setToRowTableDomain(first_column, row_set, this); // NOTE: Assume there's no duplicate rows. RowData original_data = createRowDataObject(context); RowData row_data = createRowDataObject(context); // If limit less than zero then limit is whole set. if (limit < 0) { limit = Integer.MAX_VALUE; } // Update each row in row set in turn up to the limit. int len = Math.min(row_set.size(), limit); int update_count = 0; for (int i = 0; i < len; ++i) { int to_update = row_set.intAt(i); // Make a RowData object from this row (plus keep the original intact // incase we need to roll back to it). original_data.setFromRow(to_update); row_data.setFromRow(to_update); // Run each assignment on the RowData. for (int n = 0; n < assign_list.length; ++n) { Assignment assignment = assign_list[n]; row_data.evaluate(assignment, context); } // Update the row updateRow(to_update, row_data); ++update_count; } if (update_count > 0) { // Perform a referential integrity check on any changes to the table. data_source.constraintIntegrityCheck(); } return update_count; } /** * Returns the DataTableDef object for this table. This object describes * how the table is made up. *

* NOTE: Do not keep references to this object. The * DataTableDef is invalidated when a table is closed. */ public DataTableDef getDataTableDef() { checkSafeOperation(); // safe op return data_source.getDataTableDef(); } /** * Returns the schema that this table is within. */ public String getSchema() { checkSafeOperation(); // safe op return getDataTableDef().getSchema(); } /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ public void addDataTableListener(DataTableListener listener) { // Currently we do nothing with this info. } /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ public void removeDataTableListener(DataTableListener listener) { // Currently we do nothing with this info. } // -------- Methods implemented for DefaultDataTable -------- /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. */ void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { checkReadLock(); // read op if (ancestor != this && ancestor != data_source) { throw new RuntimeException("Method routed to incorrect table ancestor."); } } /** * Returns an object that represents the information in the given cell * in the table. This can be used to obtain information about the given * table cells. */ public TObject getCellContents(int column, int row) { checkSafeOperation(); // safe op return data_source.getCellContents(column, row); } /** * Returns an Enumeration of the rows in this table. * Each call to 'nextRowIndex' returns the next valid row index in the table. */ public RowEnumeration rowEnumeration() { checkReadLock(); // read op return data_source.rowEnumeration(); } /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). *

* NOTE: This is nothing to do with the 'LockingMechanism' object. */ public void lockRoot(int lock_key) { checkSafeOperation(); // safe op data_source.addRootLock(); } /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public void unlockRoot(int lock_key) { checkSafeOperation(); // safe op data_source.removeRootLock(); } /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public boolean hasRootsLocked() { // There is no reason why we would need to know this information at // this level. // We need to deprecate this properly. throw new Error("hasRootsLocked is deprecated."); } // ------------ Lock debugging methods ---------- /** * This is called by the 'Lock' class to notify this DataTable that a read/ * write lock has been applied to this table. This is for lock debugging * purposes only. */ final void notifyAddRWLock(int lock_type) { if (LOCK_DEBUG) { if (lock_type == Lock.READ) { ++debug_read_lock_count; } else if (lock_type == Lock.WRITE) { ++debug_write_lock_count; if (debug_write_lock_count > 1) { throw new Error(">1 write lock on table " + getTableName()); } } else { throw new Error("Unknown lock type: " + lock_type); } } } /** * This is called by the 'Lock' class to notify this DataTable that a read/ * write lock has been released from this table. This is for lock debugging * purposes only. */ final void notifyReleaseRWLock(int lock_type) { if (LOCK_DEBUG) { if (lock_type == Lock.READ) { --debug_read_lock_count; } else if (lock_type == Lock.WRITE) { --debug_write_lock_count; } else { Debug().writeException( new RuntimeException("Unknown lock type: " + lock_type)); } } } /** * Returns true if the database is in exclusive mode. */ private boolean isInExclusiveMode() { // Check the connection locking mechanism is in exclusive mode return connection.getLockingMechanism().isInExclusiveMode(); } /** * Checks the database is in exclusive mode. */ private void checkInExclusiveMode() { if (!isInExclusiveMode()) { Debug().writeException(new RuntimeException( "Performed exclusive operation on table and not in exclusive mode!")); } } /** * Check that we can safely read from this table. */ private void checkReadLock() { if (LOCK_DEBUG) { // All 'sUSR' tables are given read access because they may only be // written under exclusive mode anyway. boolean is_internal_table = getTableName().getSchema().equals(Database.SYSTEM_SCHEMA); if (!(is_internal_table || debug_read_lock_count > 0 || debug_write_lock_count > 0 || isInExclusiveMode())) { System.err.println(); System.err.print(" is_internal_table = " + is_internal_table); System.err.print(" debug_read_lock_count = " + debug_read_lock_count); System.err.print(" debug_write_lock_count = " + debug_write_lock_count); System.err.println(" isInExclusiveMode = " + isInExclusiveMode()); Debug().writeException(new Error( "Invalid read access on table '" + getTableName() + "'")); } } } /** * Check that we can safely read/write from this table. This should catch * any synchronization concurrent issues. */ private void checkReadWriteLock() { if (LOCK_DEBUG) { // We have to own exactly one write lock, or be in exclusive mode. if (!(debug_write_lock_count == 1 || isInExclusiveMode())) { Debug().writeException( new Error("Invalid read/write access on table '" + getTableName() + "'")); } } } /** * Check that we can run a safe operation. */ private void checkSafeOperation() { // no operation - nothing to check for... } // ---------- Overwritten to output debug info ---------- // NOTE: These can all safely be commented out. public int getColumnCount() { checkSafeOperation(); // safe op return super.getColumnCount(); } public Variable getResolvedVariable(int column) { checkSafeOperation(); // safe op return super.getResolvedVariable(column); } public int findFieldName(Variable v) { checkSafeOperation(); // safe op return super.findFieldName(v); } SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { checkReadLock(); // read op return super.getSelectableSchemeFor(column, original_column, table); } RawTableInformation resolveToRawTable(RawTableInformation info) { checkReadLock(); // read op return super.resolveToRawTable(info); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataTableColumnDef.java000066400000000000000000000563031330501023400262050ustar00rootroot00000000000000/** * com.mckoi.database.DataTableColumnDef 27 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import com.mckoi.database.global.ColumnDescription; import com.mckoi.database.global.SQLTypes; /** * All the information regarding a column in a table. * * @author Tobias Downer */ public class DataTableColumnDef { /** * A string that contains some constraints. This string contains * information about whether the column is not null, unique, primary key, * etc. */ private byte[] constraints_format = new byte[16]; /** * The name of the column. */ private String name; /** * The sql column type (as defined in java.sql.Types). */ private int sql_type; /** * The actual column type in the database (as defined in * com.mckoi.database.global.Types). */ private int db_type; /** * The size of the data. */ private int size; /** * The scale of the data. */ private int scale; /** * The locale string if this column represents a string. If this is an * empty string, the column has no locale (the string is collated * lexicographically). */ private String locale_str = ""; /** * The locale Collator strength if this column represents a string. The * value here is taken from java.text.Collator. */ private int str_strength; /** * The locale Collator decomposition if this column represents a string. The * value here is taken from java.text.Collator. */ private int str_decomposition; /** * The default expression string. */ private String default_expression_string; // /** // * The expression that is executed to set the default value. // */ // private Expression default_exp; /** * If this is a foreign key, the table.column that this foreign key * refers to. * @deprecated */ private String foreign_key = ""; /** * The type of index to use on this column. */ private String index_desc = ""; /** * If this is a Java Object column, this is a constraint that the object * must be derived from to be added to this column. If not specified, * it defaults to 'java.lang.Object'. */ private String class_constraint = ""; /** * The constraining Class object itself. */ private Class constraining_class; /** * The TType object for this column. */ public TType type; /** * Constructs the column definition. */ public DataTableColumnDef() { } /** * Creates a copy of the given column definition. */ public DataTableColumnDef(DataTableColumnDef column_def) { System.arraycopy(column_def.constraints_format, 0, constraints_format, 0, constraints_format.length); name = column_def.name; sql_type = column_def.sql_type; db_type = column_def.db_type; size = column_def.size; scale = column_def.scale; locale_str = column_def.locale_str; str_strength = column_def.str_strength; str_decomposition = column_def.str_decomposition; if (column_def.default_expression_string != null) { default_expression_string = column_def.default_expression_string; // default_exp = new Expression(column_def.default_exp); } foreign_key = column_def.foreign_key; index_desc = column_def.index_desc; class_constraint = column_def.class_constraint; type = column_def.type; } // ---------- Set methods ---------- public void setName(String name) { this.name = name; } public void setNotNull(boolean status) { constraints_format[0] = (byte) (status ? 1 : 0); } // public void setUnique(boolean status) { // constraints_format[1] = (byte) (status ? 1 : 0); // } // // public void setPrimaryKey(boolean status) { // constraints_format[2] = (byte) (status ? 1 : 0); // } public void setSQLType(int sql_type) { this.sql_type = sql_type; if (sql_type == SQLTypes.BIT || sql_type == SQLTypes.BOOLEAN) { db_type = com.mckoi.database.global.Types.DB_BOOLEAN; } else if (sql_type == SQLTypes.TINYINT || sql_type == SQLTypes.SMALLINT || sql_type == SQLTypes.INTEGER || sql_type == SQLTypes.BIGINT || sql_type == SQLTypes.FLOAT || sql_type == SQLTypes.REAL || sql_type == SQLTypes.DOUBLE || sql_type == SQLTypes.NUMERIC || sql_type == SQLTypes.DECIMAL) { db_type = com.mckoi.database.global.Types.DB_NUMERIC; } else if (sql_type == SQLTypes.CHAR || sql_type == SQLTypes.VARCHAR || sql_type == SQLTypes.LONGVARCHAR) { db_type = com.mckoi.database.global.Types.DB_STRING; } else if (sql_type == SQLTypes.DATE || sql_type == SQLTypes.TIME || sql_type == SQLTypes.TIMESTAMP) { db_type = com.mckoi.database.global.Types.DB_TIME; } else if (sql_type == SQLTypes.BINARY || sql_type == SQLTypes.VARBINARY || sql_type == SQLTypes.LONGVARBINARY) { db_type = com.mckoi.database.global.Types.DB_BLOB; } else if (sql_type == SQLTypes.JAVA_OBJECT) { db_type = com.mckoi.database.global.Types.DB_OBJECT; } else { db_type = com.mckoi.database.global.Types.DB_UNKNOWN; } } public void setDBType(int db_type) { this.db_type = db_type; if (db_type == com.mckoi.database.global.Types.DB_NUMERIC) { sql_type = SQLTypes.NUMERIC; } else if (db_type == com.mckoi.database.global.Types.DB_STRING) { sql_type = SQLTypes.LONGVARCHAR; } else if (db_type == com.mckoi.database.global.Types.DB_BOOLEAN) { sql_type = SQLTypes.BIT; } else if (db_type == com.mckoi.database.global.Types.DB_TIME) { sql_type = SQLTypes.TIMESTAMP; } else if (db_type == com.mckoi.database.global.Types.DB_BLOB) { sql_type = SQLTypes.LONGVARBINARY; } else if (db_type == com.mckoi.database.global.Types.DB_OBJECT) { sql_type = SQLTypes.JAVA_OBJECT; } else { throw new Error("Unrecognised internal type."); } } public void setSize(int size) { this.size = size; } public void setScale(int scale) { this.scale = scale; } public void setStringLocale(String locale_str, int strength, int decomposition) { // Sets this column to be of the given locale. For example, the string // "frFR" denotes french/france. See com/mckoi/database/TStringType.java // for more information. if (locale_str == null) { this.locale_str = ""; } else { this.locale_str = locale_str; this.str_strength = strength; this.str_decomposition = decomposition; } } public void setDefaultExpression(Expression expression) { this.default_expression_string = new String(expression.text().toString()); } /** * @deprecated */ public void setForeignKey(String foreign_key) { this.foreign_key = foreign_key; } /** * Sets the indexing scheme for this column. Either 'InsertSearch' or * 'BlindSearch'. If not set, then default to insert search. */ public void setIndexScheme(String index_scheme) { index_desc = index_scheme; } /** * If this column represents a Java object, this must be a class the object * is derived from to be added to this column. */ public void setClassConstraint(String class_constraint) { this.class_constraint = class_constraint; try { // Denotes an array if (class_constraint.endsWith("[]")) { String array_class = class_constraint.substring(0, class_constraint.length() - 2); Class ac; // Arrays of primitive types, if (array_class.equals("boolean")) { ac = boolean.class; } else if (array_class.equals("byte")) { ac = byte.class; } else if (array_class.equals("char")) { ac = char.class; } else if (array_class.equals("short")) { ac = short.class; } else if (array_class.equals("int")) { ac = int.class; } else if (array_class.equals("long")) { ac = long.class; } else if (array_class.equals("float")) { ac = float.class; } else if (array_class.equals("double")) { ac = double.class; } else { // Otherwise a standard array. ac = Class.forName(array_class); } // Make it into an array constraining_class = java.lang.reflect.Array.newInstance(ac, 0).getClass(); } else { // Not an array constraining_class = Class.forName(class_constraint); } } catch (ClassNotFoundException e) { throw new Error("Unable to resolve class: " + class_constraint); } } /** * Sets this DataTableColumnDef object up from information in the TType * object. This is useful when we need to create a DataTableColumnDef object * to store information based on nothing more than a TType object. This * comes in useful for purely functional tables. */ public void setFromTType(TType type) { setSQLType(type.getSQLType()); if (type instanceof TStringType) { TStringType str_type = (TStringType) type; setSize(str_type.getMaximumSize()); setStringLocale(str_type.getLocaleString(), str_type.getStrength(), str_type.getDecomposition()); } else if (type instanceof TNumericType) { TNumericType num_type = (TNumericType) type; setSize(num_type.getSize()); setScale(num_type.getScale()); } else if (type instanceof TBooleanType) { // Nothing necessary for booleans // TBooleanType bool_type = (TBooleanType) type; } else if (type instanceof TDateType) { // Nothing necessary for dates // TDateType date_type = (TDateType) type; } else if (type instanceof TNullType) { // Nothing necessary for nulls } else if (type instanceof TBinaryType) { TBinaryType binary_type = (TBinaryType) type; setSize(binary_type.getMaximumSize()); } else if (type instanceof TJavaObjectType) { TJavaObjectType java_object_type = (TJavaObjectType) type; setClassConstraint(java_object_type.getJavaClassTypeString()); } else { throw new Error("Don't know how to handle this type: " + type.getClass()); } this.type = type; } /** * Initializes the TType information for a column. This should be called * at the last part of a DataTableColumnDef setup. */ public void initTTypeInfo() { if (type == null) { type = createTTypeFor(getSQLType(), getSize(), getScale(), getLocaleString(), getStrength(), getDecomposition(), getClassConstraint()); } } // ---------- Get methods ---------- public String getName() { return name; } public boolean isNotNull() { return constraints_format[0] != 0; } public int getSQLType() { return sql_type; } /** * Returns the type as a String. */ public String getSQLTypeString() { return sqlTypeToString(getSQLType()); } /** * Returns the type as a String. */ public String getDBTypeString() { switch (getDBType()) { case com.mckoi.database.global.Types.DB_NUMERIC: return "DB_NUMERIC"; case com.mckoi.database.global.Types.DB_STRING: return "DB_STRING"; case com.mckoi.database.global.Types.DB_BOOLEAN: return "DB_BOOLEAN"; case com.mckoi.database.global.Types.DB_TIME: return "DB_TIME"; case com.mckoi.database.global.Types.DB_BLOB: return "DB_BLOB"; case com.mckoi.database.global.Types.DB_OBJECT: return "DB_OBJECT"; default: return "UNKNOWN(" + getDBType() + ")"; } } /** * Returns the Class of Java object that represents this column. */ public Class classType() { return com.mckoi.database.global.TypeUtil.toClass(getDBType()); } public int getDBType() { return db_type; } public int getSize() { return size; } public int getScale() { return scale; } public String getLocaleString() { return locale_str; } public int getStrength() { return str_strength; } public int getDecomposition() { return str_decomposition; } public Expression getDefaultExpression(TransactionSystem system) { if (default_expression_string == null) { return null; } Expression exp = Expression.parse(default_expression_string); return exp; } public String getDefaultExpressionString() { return default_expression_string; } /** * @deprecated */ public String getForeignKey() { return foreign_key; } /** * Returns the name of the scheme we use to index this column. It will * be either 'InsertSearch' or 'BlindSearch'. */ public String getIndexScheme() { if (index_desc.equals("")) { return "InsertSearch"; } return index_desc; } /** * Returns true if this type of column is able to be indexed. */ public boolean isIndexableType() { if (getDBType() == com.mckoi.database.global.Types.DB_BLOB || getDBType() == com.mckoi.database.global.Types.DB_OBJECT) { return false; } return true; } /** * If this column represents a Java Object, this returns the name of the * class the objects stored in the column must be derived from. */ public String getClassConstraint() { return class_constraint; } /** * If this column represents a Java Object, this returns the class object * that is the constraining class for the column. */ public Class getClassConstraintAsClass() { return constraining_class; } /** * Returns the TType for this column. */ public TType getTType() { if (type == null) { throw new Error("'type' variable was not set."); } return type; } // /** // * Returns this column as a TableField object. // * // * @deprecated TableField shouldn't be used anymore // */ // public TableField tableFieldValue() { // TableField field = // new TableField(getName(), getDBType(), getSize(), isNotNull()); //// if (isUnique()) { //// field.setUnique(); //// } // field.setScale(getScale()); // field.setSQLType(getSQLType()); // // return field; // } /** * Returns this column as a ColumnDescription object and gives the column * description the given name. */ public ColumnDescription columnDescriptionValue(String column_name) { ColumnDescription field = new ColumnDescription(column_name, getDBType(), getSize(), isNotNull()); field.setScale(getScale()); field.setSQLType(getSQLType()); return field; } /** * Dumps information about this object to the PrintStream. */ public void dump(PrintStream out) { out.print(getName()); out.print("("); out.print(getSQLTypeString()); out.print(")"); } // ---------- For compatibility with older versions only -------- // These are made available only because we need to convert from the // pre table constraint versions. boolean compatIsUnique() { return constraints_format[1] != 0; } boolean compatIsPrimaryKey() { return constraints_format[2] != 0; } // ---------- Convenient static methods ---------- /** * Returns a string that represents the given SQLType enumeration passed * to it. For example, pass SQLTypes.BIT and it returns the string "BIT" */ public static String sqlTypeToString(int sql_type) { switch (sql_type) { case SQLTypes.BIT: return "BIT"; case SQLTypes.TINYINT: return "TINYINT"; case SQLTypes.SMALLINT: return "SMALLINT"; case SQLTypes.INTEGER: return "INTEGER"; case SQLTypes.BIGINT: return "BIGINT"; case SQLTypes.FLOAT: return "FLOAT"; case SQLTypes.REAL: return "REAL"; case SQLTypes.DOUBLE: return "DOUBLE"; case SQLTypes.NUMERIC: return "NUMERIC"; case SQLTypes.DECIMAL: return "DECIMAL"; case SQLTypes.CHAR: return "CHAR"; case SQLTypes.VARCHAR: return "VARCHAR"; case SQLTypes.LONGVARCHAR: return "LONGVARCHAR"; case SQLTypes.DATE: return "DATE"; case SQLTypes.TIME: return "TIME"; case SQLTypes.TIMESTAMP: return "TIMESTAMP"; case SQLTypes.BINARY: return "BINARY"; case SQLTypes.VARBINARY: return "VARBINARY"; case SQLTypes.LONGVARBINARY: return "LONGVARBINARY"; case SQLTypes.JAVA_OBJECT: return "JAVA_OBJECT"; case SQLTypes.NULL: return "NULL"; case SQLTypes.BOOLEAN: return "BOOLEAN"; default: return "UNKNOWN(" + sql_type + ")"; } } /** * Returns a TType object for a column with the given type information. The * type information is the sql_type, the size and the scale of the type. */ static TType createTTypeFor(int sql_type, int size, int scale, String locale, int str_strength, int str_decomposition, String java_class) { switch (sql_type) { case (SQLTypes.BIT): case (SQLTypes.BOOLEAN): return TType.BOOLEAN_TYPE; case (SQLTypes.TINYINT): case (SQLTypes.SMALLINT): case (SQLTypes.INTEGER): case (SQLTypes.BIGINT): case (SQLTypes.FLOAT): case (SQLTypes.REAL): case (SQLTypes.DOUBLE): case (SQLTypes.NUMERIC): case (SQLTypes.DECIMAL): return new TNumericType(sql_type, size, scale); case (SQLTypes.CHAR): case (SQLTypes.VARCHAR): case (SQLTypes.LONGVARCHAR): case (SQLTypes.CLOB): return new TStringType(sql_type, size, locale, str_strength, str_decomposition); case (SQLTypes.DATE): case (SQLTypes.TIME): case (SQLTypes.TIMESTAMP): return new TDateType(sql_type); case (SQLTypes.BINARY): case (SQLTypes.VARBINARY): case (SQLTypes.LONGVARBINARY): case (SQLTypes.BLOB): return new TBinaryType(sql_type, size); case (SQLTypes.JAVA_OBJECT): return new TJavaObjectType(java_class); case (SQLTypes.ARRAY): return TType.ARRAY_TYPE; case (SQLTypes.NULL): return TType.NULL_TYPE; default: throw new Error("SQL type not recognized."); } } /** * Convenience helper - creates a DataTableColumnDef that * holds a numeric value. */ public static DataTableColumnDef createNumericColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setSQLType(java.sql.Types.NUMERIC); column.initTTypeInfo(); return column; } /** * Convenience helper - creates a DataTableColumnDef that * holds a boolean value. */ public static DataTableColumnDef createBooleanColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setSQLType(java.sql.Types.BIT); column.initTTypeInfo(); return column; } /** * Convenience helper - creates a DataTableColumnDef that * holds a string value. */ public static DataTableColumnDef createStringColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setSQLType(java.sql.Types.VARCHAR); column.setSize(Integer.MAX_VALUE); column.initTTypeInfo(); return column; } /** * Convenience helper - creates a DataTableColumnDef that * holds a binary value. */ public static DataTableColumnDef createBinaryColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setSQLType(java.sql.Types.LONGVARBINARY); column.setSize(Integer.MAX_VALUE); column.setIndexScheme("BlindSearch"); column.initTTypeInfo(); return column; } // ---------- IO Methods ---------- /** * Writes this column information out to a DataOutputStream. */ void write(DataOutput out) throws IOException { out.writeInt(2); // The version out.writeUTF(name); out.writeInt(constraints_format.length); out.write(constraints_format); out.writeInt(sql_type); out.writeInt(db_type); out.writeInt(size); out.writeInt(scale); if (default_expression_string != null) { out.writeBoolean(true); out.writeUTF(default_expression_string); //new String(default_exp.text().toString())); } else { out.writeBoolean(false); } out.writeUTF(foreign_key); out.writeUTF(index_desc); out.writeUTF(class_constraint); // Introduced in version 2. // Format the 'other' string StringBuffer other = new StringBuffer(); other.append("|"); other.append(locale_str); other.append("|"); other.append(str_strength); other.append("|"); other.append(str_decomposition); other.append("|"); // And write it out.writeUTF(new String(other)); } /** * Reads this column from a DataInputStream. */ static DataTableColumnDef read(DataInput in) throws IOException { int ver = in.readInt(); DataTableColumnDef cd = new DataTableColumnDef(); cd.name = in.readUTF(); int len = in.readInt(); in.readFully(cd.constraints_format, 0, len); cd.sql_type = in.readInt(); cd.db_type = in.readInt(); cd.size = in.readInt(); cd.scale = in.readInt(); boolean b = in.readBoolean(); if (b) { cd.default_expression_string = in.readUTF(); // cd.default_exp = Expression.parse(in.readUTF()); } cd.foreign_key = in.readUTF(); cd.index_desc = in.readUTF(); if (ver > 1) { String cc = in.readUTF(); if (!cc.equals("")) { cd.setClassConstraint(cc); } } else { cd.class_constraint = ""; } // Parse the 'other' string String other = in.readUTF(); if (other.length() > 0) { if (other.startsWith("|")) { // Read the string locale, collation strength and disposition int cur_i = 1; int next_break = other.indexOf("|", cur_i); cd.locale_str = other.substring(cur_i, next_break); cur_i = next_break + 1; next_break = other.indexOf("|", cur_i); cd.str_strength = Integer.parseInt(other.substring(cur_i, next_break)); cur_i = next_break + 1; next_break = other.indexOf("|", cur_i); cd.str_decomposition = Integer.parseInt(other.substring(cur_i, next_break)); } else { throw new Error("Incorrectly formatted DataTableColumnDef data."); } } cd.initTTypeInfo(); return cd; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataTableDef.java000066400000000000000000000244121330501023400250230ustar00rootroot00000000000000/** * com.mckoi.database.DataTableDef 27 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.ArrayList; import java.util.HashMap; import java.util.List; /** * A definition of a table. Every table in the database has a definition * that describes how it is stored on disk, the column definitions, primary * keys/foreign keys, and any check constraints. * * @author Tobias Downer */ public class DataTableDef { /** * A TableName object that represents this data table def. */ private TableName table_name; /** * The type of table this is (this is the class name of the object that * maintains the underlying database files). */ private String table_type_class; /** * The list of DataTableColumnDef objects that are the definitions of each * column in the table. */ private ArrayList column_list; /** * Set to true if this data table def is immutable. */ private boolean immutable; /** * Constructs this DataTableDef file. */ public DataTableDef() { column_list = new ArrayList(); table_type_class = ""; immutable = false; } /** * Copy constructor. */ public DataTableDef(DataTableDef table_def) { table_name = table_def.getTableName(); table_type_class = table_def.table_type_class; column_list = (ArrayList) table_def.column_list.clone(); // Copy is not immutable immutable = false; } /** * Sets this DataTableDef to immutable which means nothing is able to * change it. */ public void setImmutable() { immutable = true; } /** * Returns true if this is immutable. */ public boolean immutable() { return immutable; } /** * Checks that this object is mutable. If it isn't an exception is thrown. */ private void checkMutable() { if (immutable()) { throw new Error("Tried to mutate immutable object."); } } /** * Outputs to the PrintStream for debugging. */ public void dump(PrintStream out) { for (int i = 0; i < columnCount(); ++i) { columnAt(i).dump(out); out.println(); } } /** * Resolves variables in a column so that any unresolved column names point * to this table. Used to resolve columns in the 'check_expression'. */ void resolveColumns(boolean ignore_case, Expression exp) { // For each variable, determine if the column correctly resolves to a // column in this table. If the database is in identifier case insensitive // mode attempt to resolve the column name to a valid column in this // def. if (exp != null) { List list = exp.allVariables(); for (int i = 0; i < list.size(); ++i) { Variable v = (Variable) list.get(i); String col_name = v.getName(); // Can we resolve this to a variable in the table? if (ignore_case) { int size = columnCount(); for (int n = 0; n < size; ++n) { // If this is a column name (case ignored) then set the variable // to the correct cased name. if (columnAt(n).getName().equalsIgnoreCase(col_name)) { v.setColumnName(columnAt(n).getName()); } } } } } } /** * Resolves a single column name to its correct form. For example, if * the database is in case insensitive mode it'll resolve ID to 'id' if * 'id' is in this table. Throws a database exception if a column couldn't * be resolved (ambiguous or not found). */ public String resolveColumnName(String col_name, boolean ignore_case) throws DatabaseException { // Can we resolve this to a column in the table? int size = columnCount(); int found = -1; for (int n = 0; n < size; ++n) { // If this is a column name (case ignored) then set the column // to the correct cased name. String this_col_name = columnAt(n).getName(); if (ignore_case && this_col_name.equalsIgnoreCase(col_name)) { if (found == -1) { found = n; } else { throw new DatabaseException( "Ambiguous reference to column '" + col_name + "'"); } } else if (!ignore_case && this_col_name.equals(col_name)) { found = n; } } if (found != -1) { return columnAt(found).getName(); } else { throw new DatabaseException("Column '" + col_name + "' not found"); } } /** * Given a list of column names referencing entries in this table, this will * resolve each one to its correct form. Throws a database exception if * a column couldn't be resolved. */ public void resolveColumnsInArray(DatabaseConnection connection, ArrayList list) throws DatabaseException { boolean ignore_case = connection.isInCaseInsensitiveMode(); for (int i = 0; i < list.size(); ++i) { String col_name = (String) list.get(i); list.set(i, resolveColumnName((String) list.get(i), ignore_case)); } } // ---------- Set methods ---------- public void setTableName(TableName name) { this.table_name = name; } public void setTableClass(String clazz) { checkMutable(); if (clazz.equals("com.mckoi.database.VariableSizeDataTableFile")) { table_type_class = clazz; } else { throw new Error("Unrecognised table class: " + clazz); } } public void addColumn(DataTableColumnDef col_def) { checkMutable(); // Is there already a column with this name in the table def? for (int i = 0; i < column_list.size(); ++i) { DataTableColumnDef cd = (DataTableColumnDef) column_list.get(i); if (cd.getName().equals(col_def.getName())) { throw new Error("Duplicated columns found."); } } column_list.add(col_def); } /** * Same as 'addColumn' only this does not perform a check to ensure no * two columns are the same. */ public void addVirtualColumn(DataTableColumnDef col_def) { checkMutable(); column_list.add(col_def); } // ---------- Get methods ---------- public String getSchema() { String schema_name = table_name.getSchema(); return schema_name == null ? "" : schema_name; } public String getName() { return table_name.getName(); } public TableName getTableName() { return table_name; } public String getTableClass() { return table_type_class; } public int columnCount() { return column_list.size(); } public DataTableColumnDef columnAt(int column) { return (DataTableColumnDef) column_list.get(column); } public int findColumnName(String column_name) { int size = columnCount(); for (int i = 0; i < size; ++i) { if (columnAt(i).getName().equals(column_name)) { return i; } } return -1; } // Stores col name -> col index lookups private transient HashMap col_name_lookup; private transient Object COL_LOOKUP_LOCK = new Object(); /** * A faster way to find a column index given a string column name. This * caches column name -> column index in a HashMap. */ public final int fastFindColumnName(String col) { synchronized (COL_LOOKUP_LOCK) { if (col_name_lookup == null) { col_name_lookup = new HashMap(30); } Object ob = col_name_lookup.get(col); if (ob == null) { int ci = findColumnName(col); col_name_lookup.put(col, new Integer(ci)); return ci; } else { return ((Integer) ob).intValue(); } } } /** * Returns a copy of this object, except with no columns or constraints. */ public DataTableDef noColumnCopy() { DataTableDef def = new DataTableDef(); def.setTableName(getTableName()); // def.setSchema(schema); // def.setName(name); def.table_type_class = table_type_class; return def; } // ---------- In/Out methods ---------- /** * Writes this DataTableDef file to the data output stream. */ void write(DataOutput out) throws IOException { out.writeInt(2); // Version number out.writeUTF(getName()); out.writeUTF(getSchema()); // Added in version 2 out.writeUTF(table_type_class); out.writeInt(column_list.size()); for (int i = 0; i < column_list.size(); ++i) { ((DataTableColumnDef) column_list.get(i)).write(out); } // // -- Added in version 2 -- // // Write the constraint list. // out.writeInt(constraint_list.size()); // for (int i = 0; i < constraint_list.size(); ++i) { // ((DataTableConstraintDef) constraint_list.get(i)).write(out); // } // [ this is removed from version 1 ] // if (check_expression != null) { // out.writeBoolean(true); // // Write the text version of the expression to the stream. // out.writeUTF(new String(check_expression.text())); // } // else { // out.writeBoolean(false); // } } /** * Reads this DataTableDef file from the data input stream. */ static DataTableDef read(DataInput in) throws IOException { DataTableDef dtf = new DataTableDef(); int ver = in.readInt(); if (ver == 1) { throw new IOException("Version 1 DataTableDef no longer supported."); } else if (ver == 2) { String rname = in.readUTF(); String rschema = in.readUTF(); dtf.setTableName(new TableName(rschema, rname)); dtf.table_type_class = in.readUTF(); int size = in.readInt(); for (int i = 0; i < size; ++i) { DataTableColumnDef col_def = DataTableColumnDef.read(in); dtf.column_list.add(col_def); } } else { throw new Error("Unrecognized DataTableDef version (" + ver + ")"); } dtf.setImmutable(); return dtf; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataTableFile.java000066400000000000000000000163511330501023400252070ustar00rootroot00000000000000/** * com.mckoi.database.DataTableFile 16 Dec 1999 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; /** * This interface handles the abstraction of retreiving information from a * database file. It knows the fixed length of the data fields and can * deduce the topology of the file and retreive and store information to/ * from it. *

* The callee of this interface must ensure that all calls to implementations * of this interface are sequential and not concurrent. It is not expected * that implementations are thread safe. *

* See VariableSizeDataTableFile for an implementation of this interface. * * @author Tobias Downer */ interface DataTableFile extends TableDataSource { /** * Creates a new file of the given table. The table is initialised and * contains 0 row entries. If the table already exists in the database then * this will throw an exception. *

* On exit, the object will be initialised and loaded with the given table. * * @param def the definition of the table. */ void create(DataTableDef def) throws IOException; /** * Updates a file of the given table. If the table does not exist, then it * is created. If the table already exists but is different, then the * existing table is modified to incorporate the new fields structure. *

* The DataTableFile must have previously been 'load(table_name)' before * this call. *

* Implementations of this method may choose to reorganise information that * the relational schemes are dependant on (the row order for example). If * this method returns 'true' then we must also reindex the schemes. *

* NOTE: If the new format has columns that are not * included in the new format then the columns are deleted. * * @param def the definition of the table. * @return true if the table topology has changed. */ boolean update(DataTableDef def) throws IOException; /** * This is called periodically when this data table file requires some * maintenance. It is recommended that this method is called every * time the table is initialized (loaded). *

* The DataTableFile must have previously been 'load(table_name)' before * this call. *

* This method may change the topology of the rows (delete rows that are * marked as deleted), therefore if the method returns true you need to * re-index the schemes. * * @return true if the table topology was changed. */ boolean doMaintenance() throws IOException; // /** // * A recovery method that returns a DataTableDef object for this data // * table file that was last used in a call to 'create' or 'update'. This // * information should be kept in a secondary table topology store but it // * is useful to keep this information in the data table file just incase // * something bad happens, or tables are moved to another database. // */ // DataTableDef recoverLastDataTableDef() throws IOException; /** * Loads a previously created table. A table can be loaded in read only * mode, in which case any methods that write to the DataTableFile will * throw an IOException. * * @param table_name the name of the table. * @param read_only if true then the table file is opened as read-only. */ void load(String table_name, boolean read_only) throws IOException; /** * Shuts down the table. This is called when the table is closed and the * resources it uses are to be freed back to the system. This is called * as part of the database shut down procedure or called when we want to * free the resources associated with this table. */ void shutdown() throws IOException; /** * Deletes the data table file in the file system. This is used to clear * up resources after a table has been dropped. The table must be shut * down before this method is called. *

* NOTE: Use this with care. All data is lost! */ void drop(); /** * Flushes all information that may be cached in memory to disk. This * includes any relational data, any cached data that hasn't made it to * the file system yet. It will write out all persistant information * and leave the table in a state where it is fully represented in the * file system. */ void updateFile() throws IOException; /** * Locks the data in the file to prevent the system overwritting entries * that have been marked as removed. This is necessary so we may still * safely read removed entries from the table while the table is locked. */ void addRowsLock(); /** * Unlocks the data in the file to indicate that the system may safely * overwrite removed entries in the file. */ void removeRowsLock(); /** * Returns true if the file currently has all of its rows locked. */ boolean hasRowsLocked(); // /** // * The number of rows that are currently stored in this table. This number // * does not include the rows that have been marked as removed. // */ // int rowCount(); /** * Returns true if the given row index points to a valid and available * row entry. Returns false if the row entry has been marked as removed, * or the index goes outside the bounds of the table. */ boolean isRowValid(int record_index) throws IOException; /** * Adds a complete new row into the table. If the table is in a row locked * state, then this will always add a new entry to the end of the table. * Otherwise, new entries are added where entries were previously removed. *

* This will update any column indices that are set. * * @returns the raw row index of the row that was added. */ int addRow(RowData row_data) throws IOException; /** * Removes a row from the table at the given index. This will only mark * the entry as removed, and will not actually remove the data. This is * because a process is allowed to read the data even after the row has been * marked as removed (if the rows have been locked). *

* This will update any column indices that are set. * * @param row_index the raw row index of the entry to be marked as removed. */ void removeRow(int row_index) throws IOException; // /** // * Returns a DataCell object of the entry at the given column, row // * index in the table. This will always work provided there was once data // * stored at that index, even if the row has been marked as deleted. // */ // DataCell getCellAt(int column, int row) throws IOException; /** * Returns a unique number. This is incremented each time it is accessed. */ long nextUniqueKey() throws IOException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataTableFilter.java000066400000000000000000000165051330501023400255560ustar00rootroot00000000000000/** * com.mckoi.database.DataTableFilter 06 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * This object sits on top of a DataTable object filtering out certain types * of calls. We could use this object to implement a ReferenceTable which * can be used to declare a new table name with a DataTable type. We also * use this object to implement a filter for column removals. *

* @author Tobias Downer */ public class DataTableFilter extends AbstractDataTable { /** * The parent DataTable object. */ protected AbstractDataTable parent; /** * The Constructor. A filter can only sit on top of a DataTable or * DataTableFilter table. * ISSUE: we could make an interface for this. This is a bit of a hack. */ protected DataTableFilter(AbstractDataTable table) { super(); parent = table; } /** * Returns the Database context for this filtered table. */ public Database getDatabase() { return parent.getDatabase(); } /** * Returns the number of columns in the table. */ public int getColumnCount() { return parent.getColumnCount(); } /** * Returns the number of rows stored in the table. */ public final int getRowCount() { return parent.getRowCount(); } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public int findFieldName(Variable v) { return parent.findFieldName(v); } /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public Variable getResolvedVariable(int column) { return parent.getResolvedVariable(column); } /** * Returns a SelectableScheme for the given column in the given VirtualTable * row domain. Slight Hack: When we are asking for a selectable scheme for * a reference table, we must defer the 'table' variable to the parent. */ final SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { if (table == this) { return parent.getSelectableSchemeFor(column, original_column, parent); } else { return parent.getSelectableSchemeFor(column, original_column, table); } } /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. *

* Slight Hack: When we are asking for a selectable scheme for a reference * table, we must defer the 'table' variable to the parent. */ final void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { if (ancestor == this) { parent.setToRowTableDomain(column, row_set, parent); } else { parent.setToRowTableDomain(column, row_set, ancestor); } } /** * Return the list of DataTable and row sets that make up the raw information * in this table. This is identical to the DataTable method except it * puts this table as the owner of the row set. */ final RawTableInformation resolveToRawTable(RawTableInformation info) { IntegerVector row_set = new IntegerVector(); RowEnumeration e = rowEnumeration(); while (e.hasMoreRows()) { row_set.addInt(e.nextRowIndex()); } info.add(this, row_set); return info; } /** * Returns an object that represents the information in the given cell * in the table. This will generally be an expensive algorithm, so calls * to it should be kept to a minimum. Note that the offset between two * rows is not necessarily 1. */ public final TObject getCellContents(int column, int row) { return parent.getCellContents(column, row); } /** * Returns an Enumeration of the rows in this table. * The Enumeration is a fast way of retrieving consequtive rows in the table. */ public final RowEnumeration rowEnumeration() { return parent.rowEnumeration(); } /** * Returns a DataTableDef object that defines the name of the table and the * layout of the columns of the table. Note that for tables that are joined * with other tables, the table name and schema for this object become * mangled. For example, a table called 'PERSON' joined with a table called * 'MUSIC' becomes a table called 'PERSON#MUSIC' in a null schema. */ public DataTableDef getDataTableDef() { return parent.getDataTableDef(); } /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ void addDataTableListener(DataTableListener listener) { parent.addDataTableListener(listener); } /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ void removeDataTableListener(DataTableListener listener) { parent.removeDataTableListener(listener); } /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). * NOTE: This is nothing to do with the 'LockingMechanism' object. */ public void lockRoot(int lock_key) { parent.lockRoot(lock_key); } /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public void unlockRoot(int lock_key) { parent.unlockRoot(lock_key); } /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public boolean hasRootsLocked() { return parent.hasRootsLocked(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DataTableListener.java000066400000000000000000000024211330501023400261060ustar00rootroot00000000000000/** * com.mckoi.database.DataTableListener 19 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A DataTableListener is notified of all modifications to the raw entries * of the data table. This listener can be used for detecting changes in * VIEWs, for triggers or for caching of common queries. * * @author Tobias Downer */ interface DataTableListener { /** * Called before a row entry in the table is deleted. */ public void rowDeleted(DataTable table, int row_index); /** * Called after a row entry in the table is added. */ public void rowAdded(DataTable table, int row_index); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Database.java000066400000000000000000003062311330501023400242710ustar00rootroot00000000000000/** * com.mckoi.database.Database 02 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.sql.*; import java.io.File; import java.io.PrintStream; import java.io.IOException; import java.util.ArrayList; import java.util.Map; import com.mckoi.debug.*; import com.mckoi.util.Log; import com.mckoi.util.Stats; import com.mckoi.util.Cache; import com.mckoi.database.global.*; import com.mckoi.database.control.DefaultDBConfig; import com.mckoi.database.jdbc.MSQLException; import com.mckoi.store.Store; import com.mckoi.store.MutableArea; /** * The representation of a single database in the system. A database * is a set of schema, a set of tables and table definitions of tables in * the schema, and a description of the schema. *

* This class encapsulates the top level behaviour of a database. That is * of creating itself, initializing itself, shutting itself down, deleting * itself, creating/dropping a table, updating a table. It is not the * responsibility of this class to handle table behaviour above this. Top * level table behaviour is handled by DataTable through the DatabaseConnection * interface. *

* The Database object is also responsible for various database management * functions such a creating, editing and removing users, triggers, functions * and services. * * @author Tobias Downer */ public final class Database implements DatabaseConstants { // ---------- Statics ---------- /** * The name of the system schema that contains tables refering to system * information. */ public static final String SYSTEM_SCHEMA = TableDataConglomerate.SYSTEM_SCHEMA; /** * The name of the default schema. */ public static final String DEFAULT_SCHEMA = "APP"; /** * The name of the schema that contains JDBC helper tables. */ public static final String JDBC_SCHEMA = "SYS_JDBC"; /** * The password privs and grants table. */ public static final TableName SYS_PASSWORD = new TableName(SYSTEM_SCHEMA, "sUSRPassword"); public static final TableName SYS_USERCONNECT = new TableName(SYSTEM_SCHEMA, "sUSRUserConnectPriv"); public static final TableName SYS_USERPRIV = new TableName(SYSTEM_SCHEMA, "sUSRUserPriv"); public static final TableName SYS_GRANTS = new TableName(SYSTEM_SCHEMA, "sUSRGrant"); /** * The services table. */ public static final TableName SYS_SERVICE = new TableName(SYSTEM_SCHEMA, "sUSRService"); /** * The function factory table. */ public static final TableName SYS_FUNCTIONFACTORY = new TableName(SYSTEM_SCHEMA, "sUSRFunctionFactory"); /** * The function table. */ public static final TableName SYS_FUNCTION = new TableName(SYSTEM_SCHEMA, "sUSRFunction"); /** * The view table. */ public static final TableName SYS_VIEW = new TableName(SYSTEM_SCHEMA, "sUSRView"); /** * The label table. */ public static final TableName SYS_LABEL = new TableName(SYSTEM_SCHEMA, "sUSRLabel"); /** * The system internally generated 'sUSRTableColumns' table. */ public static final TableName SYS_TABLE_COLUMNS = new TableName(SYSTEM_SCHEMA, "sUSRTableColumns"); /** * The system internally generated 'sUSRTableInfo' table. */ public static final TableName SYS_TABLE_INFO = new TableName(SYSTEM_SCHEMA, "sUSRTableInfo"); /** * The system internally generated 'sUSRDataTrigger' table. */ public static final TableName SYS_DATA_TRIGGER = new TableName(SYSTEM_SCHEMA, "sUSRDataTrigger"); /** * The system internally generated 'sUSRDatabaseStatistics' table. */ public static final TableName SYS_DB_STATISTICS = new TableName(SYSTEM_SCHEMA, "sUSRDatabaseStatistics"); /** * The OLD table used inside a triggered procedure to represent a triggered * row before the operation occurs. */ public static final TableName OLD_TRIGGER_TABLE = new TableName(SYSTEM_SCHEMA, "OLD"); /** * The NEW table used inside a triggered procedure to represent a triggered * row after the operation occurs. */ public static final TableName NEW_TRIGGER_TABLE = new TableName(SYSTEM_SCHEMA, "NEW"); /** * The name of the lock group. If a user belongs to this group the user * account is locked and they are not allowed to log into the database. */ public static final String LOCK_GROUP = "#locked"; /** * THe name of the secure access group. If a user belongs to this group they * are permitted to perform a number of priviledged operations such as * shutting down the database, and adding and removing users. */ public static final String SECURE_GROUP = "secure access"; /** * The name of the user manager group. Users that belong in this group can * create, alter and drop users from the system. */ public static final String USER_MANAGER_GROUP = "user manager"; /** * The name of the schema manager group. Users that belong in this group can * create and drop schema from the system. */ public static final String SCHEMA_MANAGER_GROUP = "schema manager"; /** * The username of the internal secure user. The internal secure user is only * used for internal highly privileged operations. This user is given full * privs to everything and is used to manage the system tables, for * authentication, etc. */ public static final String INTERNAL_SECURE_USERNAME = "@SYSTEM"; // ---------- Members ---------- /** * The DatabaseSystem that this database is part of. */ private DatabaseSystem system; /** * The name of this database. */ private String name; /** * The TableDataConglomerate that contains the conglomerate of tables for * this database. */ private TableDataConglomerate conglomerate; /** * A flag which, when set to true, will cause the engine to delete the * database from the file system when it is shut down. */ private boolean delete_on_shutdown; /** * An internal secure User that is given full grant access to the entire * database. This user is used to execute system level queries such as * creating and updating system tables. */ private User internal_system_user; /** * The database wide TriggerManager object that dispatches trigger events * to the DatabaseConnection objects that are listening for the events. */ private TriggerManager trigger_manager; /** * The various log files. */ /** * This log file records the DQL commands executed on the server. */ private Log commands_log; /** * This is set to true when the 'init()' method is first called. */ private boolean initialised = false; /** * A table that has a single row but no columns. */ private final Table SINGLE_ROW_TABLE; /** * The Constructor. This takes a directory path in which the database is * stored. */ public Database(DatabaseSystem system, String name) { this.system = system; this.delete_on_shutdown = false; this.name = name; conglomerate = new TableDataConglomerate(system, system.storeSystem()); internal_system_user = new User(INTERNAL_SECURE_USERNAME, this, "", System.currentTimeMillis()); // Create the single row table TemporaryTable t; t = new TemporaryTable(this, "SINGLE_ROW_TABLE", new DataTableColumnDef[0]); t.newRow(); SINGLE_ROW_TABLE = t; trigger_manager = new TriggerManager(system); } /** * Returns the name of this database. */ public String getName() { return name; } /** * Returns true if this database is in read only mode. */ public boolean isReadOnly() { return getSystem().readOnlyAccess(); } /** * Returns the internal system user for this database. */ private User internalSystemUser() { return internal_system_user; } // ---------- Log accesses ---------- /** * Returns the log file where commands are recorded. */ public Log getCommandsLog() { return commands_log; } /** * Returns the conglomerate for this database. */ TableDataConglomerate getConglomerate() { return conglomerate; } /** * Returns a new DatabaseConnection instance that is used against this * database. *

* When a new connection is made on this database, this method is called * to create a new DatabaseConnection instance for the connection. This * connection handles all transactional queries and modifications to the * database. */ public DatabaseConnection createNewConnection( User user, DatabaseConnection.CallBack call_back) { if (user == null) { user = internalSystemUser(); } DatabaseConnection connection = new DatabaseConnection(this, user, call_back); // Initialize the connection connection.init(); return connection; } // ---------- Database user management functions ---------- /** * Tries to authenticate a username/password against this database. If we * fail to authenticate then a 'null' object is returned, otherwise a valid * User object is returned. If a valid object is returned, the user * will be logged into the engine via the UserManager object (in * DatabaseSystem). The developer must ensure that 'close' is called before * the object is disposed (logs out of the system). *

* This method also returns null if a user exists but was denied access from * the given host string. The given 'host_name' object is formatted in the * database host connection encoding. This method checks all the values * from the sUSRUserConnectPriv table for this user for the given protocol. * It first checks if the user is specifically DENIED access from the given * host. It then checks if the user is ALLOWED access from the given host. * If a host is neither allowed or denied then it is denied. */ public User authenticateUser(String username, String password, String connection_string) { // Create a temporary connection for authentication only... DatabaseConnection connection = createNewConnection(null, null); DatabaseQueryContext context = new DatabaseQueryContext(connection); connection.setCurrentSchema(SYSTEM_SCHEMA); LockingMechanism locker = connection.getLockingMechanism(); locker.setMode(LockingMechanism.EXCLUSIVE_MODE); try { try { Connection jdbc = connection.getJDBCConnection(); // Is the username/password in the database? PreparedStatement stmt = jdbc.prepareStatement( " SELECT \"UserName\" FROM \"sUSRPassword\" " + " WHERE \"sUSRPassword.UserName\" = ? " + " AND \"sUSRPassword.Password\" = ? "); stmt.setString(1, username); stmt.setString(2, password); ResultSet rs = stmt.executeQuery(); if (!rs.next()) { return null; } rs.close(); stmt.close(); // Now check if this user is permitted to connect from the given // host. if (userAllowedAccessFromHost(context, username, connection_string)) { // Successfully authenticated... User user = new User(username, this, connection_string, System.currentTimeMillis()); // Log the authenticated user in to the engine. system.getUserManager().userLoggedIn(user); return user; } return null; } catch (SQLException e) { if (e instanceof MSQLException) { MSQLException msqle = (MSQLException) e; Debug().write(Lvl.ERROR, this, msqle.getServerErrorStackTrace()); } Debug().writeException(Lvl.ERROR, e); throw new RuntimeException("SQL Error: " + e.getMessage()); } } finally { try { // Make sure we commit the connection. connection.commit(); } catch (TransactionException e) { // Just issue a warning... Debug().writeException(Lvl.WARNING, e); } finally { // Guarentee that we unluck from EXCLUSIVE locker.finishMode(LockingMechanism.EXCLUSIVE_MODE); } // And make sure we close (dispose) of the temporary connection. connection.close(); } } /** * Performs check to determine if user is allowed access from the given * host. See the comments of 'authenticateUser' for a description of * how this is determined. */ private boolean userAllowedAccessFromHost(DatabaseQueryContext context, String username, String connection_string) { // The system user is not allowed to login if (username.equals(INTERNAL_SECURE_USERNAME)) { return false; } // We always allow access from 'Internal/*' (connections from the // 'getConnection' method of a com.mckoi.database.control.DBSystem object) // ISSUE: Should we add this as a rule? if (connection_string.startsWith("Internal/")) { return true; } // What's the protocol? int protocol_host_deliminator = connection_string.indexOf("/"); String protocol = connection_string.substring(0, protocol_host_deliminator); String host = connection_string.substring(protocol_host_deliminator + 1); if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Checking host: protocol = " + protocol + ", host = " + host); } // The table to check DataTable connect_priv = context.getTable(SYS_USERCONNECT); Variable un_col = connect_priv.getResolvedVariable(0); Variable proto_col = connect_priv.getResolvedVariable(1); Variable host_col = connect_priv.getResolvedVariable(2); Variable access_col = connect_priv.getResolvedVariable(3); // Query: where UserName = %username% Table t = connect_priv.simpleSelect(context, un_col, Operator.get("="), new Expression(TObject.stringVal(username))); // Query: where %protocol% like Protocol Expression exp = Expression.simple(TObject.stringVal(protocol), Operator.get("like"), proto_col); t = t.exhaustiveSelect(context, exp); // Query: where %host% like Host exp = Expression.simple(TObject.stringVal(host), Operator.get("like"), host_col); t = t.exhaustiveSelect(context, exp); // Those that are DENY Table t2 = t.simpleSelect(context, access_col, Operator.get("="), new Expression(TObject.stringVal("DENY"))); if (t2.getRowCount() > 0) { return false; } // Those that are ALLOW Table t3 = t.simpleSelect(context, access_col, Operator.get("="), new Expression(TObject.stringVal("ALLOW"))); if (t3.getRowCount() > 0) { return true; } // No DENY or ALLOW entries for this host so deny access. return false; } /** * Returns true if a user exists in this database, otherwise returns * false. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public boolean userExists(DatabaseQueryContext context, String username) throws DatabaseException { DataTable table = context.getTable(SYS_PASSWORD); Variable c1 = table.getResolvedVariable(0); // All sUSRPassword where UserName = %username% Table t = table.simpleSelect(context, c1, Operator.get("="), new Expression(TObject.stringVal(username))); return t.getRowCount() > 0; } /** * Creates and adds a new user to this database. The User object for * the user is returned. *

* If the user is already defined by the database then an error is generated. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public void createUser(DatabaseQueryContext context, String username, String password) throws DatabaseException { if (username == null || password == null) { throw new DatabaseException("Username or password can not be NULL."); } // The username must be more than 1 character if (username.length() <= 1) { throw new DatabaseException("Username must be at least 2 characters."); } // The password must be more than 1 character if (password.length() <= 1) { throw new DatabaseException("Password must be at least 2 characters."); } // Check the user doesn't already exist if (userExists(context, username)) { throw new DatabaseException("User '" + username + "' already exists."); } // Some usernames are reserved words if (username.equalsIgnoreCase("public")) { throw new DatabaseException("User '" + username + "' not allowed - reserved."); } // Usernames starting with @, &, # and $ are reserved for system // identifiers char c = username.charAt(0); if (c == '@' || c == '&' || c == '#' || c == '$') { throw new DatabaseException("User name can not start with '" + c + "' character."); } // Add this user to the password table. DataTable table = context.getTable(SYS_PASSWORD); RowData rdat = new RowData(table); rdat.setColumnDataFromObject(0, username); rdat.setColumnDataFromObject(1, password); table.add(rdat); } /** * Deletes all the groups the user belongs to. This is intended for a user * alter command for setting the groups a user belongs to. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public void deleteAllUserGroups(DatabaseQueryContext context, String username) throws DatabaseException { Operator EQUALS_OP = Operator.get("="); Expression USER_EXPR = new Expression(TObject.stringVal(username)); DataTable table = context.getTable(SYS_USERPRIV); Variable c1 = table.getResolvedVariable(0); // All sUSRUserPriv where UserName = %username% Table t = table.simpleSelect(context, c1, EQUALS_OP, USER_EXPR); // Delete all the groups table.delete(t); } /** * Deletes the user from the system. This also deletes all information * associated with a user such as the groups they belong to. It does not * delete the privs a user has set up. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public void deleteUser(DatabaseQueryContext context, String username) throws DatabaseException { // PENDING: This should check if there are any tables the user has setup // and not allow the delete if there are. Operator EQUALS_OP = Operator.get("="); Expression USER_EXPR = new Expression(TObject.stringVal(username)); // First delete all the groups from the user priv table deleteAllUserGroups(context, username); // Now delete the username from the sUSRUserConnectPriv table DataTable table = context.getTable(SYS_USERCONNECT); Variable c1 = table.getResolvedVariable(0); Table t = table.simpleSelect(context, c1, EQUALS_OP, USER_EXPR); table.delete(t); // Finally delete the username from the sUSRPassword table table = context.getTable(SYS_PASSWORD); c1 = table.getResolvedVariable(0); t = table.simpleSelect(context, c1, EQUALS_OP, USER_EXPR); table.delete(t); } /** * Alters the password of the user but otherwise does not change any * information about the user. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public void alterUserPassword(DatabaseQueryContext context, String username, String password) throws DatabaseException { Operator EQUALS_OP = Operator.get("="); Expression USER_EXPR = new Expression(TObject.stringVal(username)); // Delete the current username from the sUSRPassword table DataTable table = context.getTable(SYS_PASSWORD); Variable c1 = table.getResolvedVariable(0); Table t = table.simpleSelect(context, c1, EQUALS_OP, USER_EXPR); if (t.getRowCount() == 1) { table.delete(t); // Add the new username table = context.getTable(SYS_PASSWORD); RowData rdat = new RowData(table); rdat.setColumnDataFromObject(0, username); rdat.setColumnDataFromObject(1, password); table.add(rdat); } else { throw new DatabaseException("Username '" + username + "' was not found."); } } /** * Returns the list of all user groups the user belongs to. */ public String[] groupsUserBelongsTo(DatabaseQueryContext context, String username) throws DatabaseException { DataTable table = context.getTable(SYS_USERPRIV); Variable c1 = table.getResolvedVariable(0); // All sUSRUserPriv where UserName = %username% Table t = table.simpleSelect(context, c1, Operator.get("="), new Expression(TObject.stringVal(username))); int sz = t.getRowCount(); String[] groups = new String[sz]; RowEnumeration row_enum = t.rowEnumeration(); int i = 0; while (row_enum.hasMoreRows()) { groups[i] = t.getCellContents(1, row_enum.nextRowIndex()).getObject().toString(); ++i; } return groups; } /** * Returns true if the given user belongs to the given group otherwise * returns false. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public boolean userBelongsToGroup(DatabaseQueryContext context, String username, String group) throws DatabaseException { DataTable table = context.getTable(SYS_USERPRIV); Variable c1 = table.getResolvedVariable(0); Variable c2 = table.getResolvedVariable(1); // All sUSRUserPriv where UserName = %username% Table t = table.simpleSelect(context, c1, Operator.get("="), new Expression(TObject.stringVal(username))); // All from this set where PrivGroupName = %group% t = t.simpleSelect(context, c2, Operator.get("="), new Expression(TObject.stringVal(group))); return t.getRowCount() > 0; } /** * Adds the user to the given group. This makes an entry in the sUSRUserPriv * for this user and the given group. If the user already belongs to the * group then no changes are made. *

* It is important that any security checks for ensuring the grantee is * allowed to give the user these privs are preformed before this method is * called. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public void addUserToGroup(DatabaseQueryContext context, String username, String group) throws DatabaseException { if (group == null) { throw new DatabaseException("Can add NULL group."); } // Groups starting with @, &, # and $ are reserved for system // identifiers char c = group.charAt(0); if (c == '@' || c == '&' || c == '#' || c == '$') { throw new DatabaseException("The group name can not start with '" + c + "' character."); } // Check the user doesn't belong to the group if (!userBelongsToGroup(context, username, group)) { // The user priv table DataTable table = context.getTable(SYS_USERPRIV); // Add this user to the group. RowData rdat = new RowData(table); rdat.setColumnDataFromObject(0, username); rdat.setColumnDataFromObject(1, group); table.add(rdat); } // NOTE: we silently ignore the case when a user already belongs to the // group. } /** * Sets the lock status for the given user. If a user account if locked, it * is rejected from logging in to the database. *

* It is important that any security checks to determine if the process * setting the user lock is allowed to do it is done before this method is * called. *

* NOTE: Assumes exclusive lock on DatabaseConnection. */ public void setUserLock(DatabaseQueryContext context, User user, boolean lock_status) throws DatabaseException { String username = user.getUserName(); // Internally we implement this by adding the user to the #locked group. DataTable table = context.getTable(SYS_USERPRIV); Variable c1 = table.getResolvedVariable(0); Variable c2 = table.getResolvedVariable(1); // All sUSRUserPriv where UserName = %username% Table t = table.simpleSelect(context, c1, Operator.get("="), new Expression(TObject.stringVal(username))); // All from this set where PrivGroupName = %group% t = t.simpleSelect(context, c2, Operator.get("="), new Expression(TObject.stringVal(LOCK_GROUP))); boolean user_belongs_to_lock_group = t.getRowCount() > 0; if (lock_status && !user_belongs_to_lock_group) { // Lock the user by adding the user to the lock group // Add this user to the locked group. RowData rdat = new RowData(table); rdat.setColumnDataFromObject(0, username); rdat.setColumnDataFromObject(1, LOCK_GROUP); table.add(rdat); } else if (!lock_status && user_belongs_to_lock_group) { // Unlock the user by removing the user from the lock group // Remove this user from the locked group. table.delete(t); } } /** * Grants the given user access to connect to the database from the * given host address. The 'protocol' string is the connecting protocol * which can be either 'TCP' or 'Local'. The 'host' string is the actual * host that is connecting. For example, if the protocol was TCP then * the client host may be '127.0.0.1' for localhost. */ public void grantHostAccessToUser(DatabaseQueryContext context, String user, String protocol, String host) throws DatabaseException { // The user connect priv table DataTable table = context.getTable(SYS_USERCONNECT); // Add the protocol and host to the table RowData rdat = new RowData(table); rdat.setColumnDataFromObject(0, user); rdat.setColumnDataFromObject(1, protocol); rdat.setColumnDataFromObject(2, host); rdat.setColumnDataFromObject(3, "ALLOW"); table.add(rdat); } /** * Returns true if the user belongs to the secure access priv group. */ private boolean userHasSecureAccess(DatabaseQueryContext context, User user) throws DatabaseException { // The internal secure user has full privs on everything if (user.getUserName().equals(INTERNAL_SECURE_USERNAME)) { return true; } return userBelongsToGroup(context, user.getUserName(), SECURE_GROUP); } /** * Returns true if the grant manager permits a schema operation (eg, * CREATE, ALTER and DROP table operations) for the given user. */ private boolean userHasSchemaGrant(DatabaseQueryContext context, User user, String schema, int grant) throws DatabaseException { // The internal secure user has full privs on everything if (user.getUserName().equals(INTERNAL_SECURE_USERNAME)) { return true; } // No users have schema access to the system schema. if (schema.equals(SYSTEM_SCHEMA)) { return false; } // Ask the grant manager if there are any privs setup for this user on the // given schema. GrantManager manager = context.getGrantManager(); Privileges privs = manager.userGrants( GrantManager.SCHEMA, schema, user.getUserName()); return privs.permits(grant); } /** * Returns true if the grant manager permits a table object operation (eg, * SELECT, INSERT, UPDATE, DELETE and COMPACT table operations) for the given * user. */ private boolean userHasTableObjectGrant(DatabaseQueryContext context, User user, TableName table_name, Variable[] columns, int grant) throws DatabaseException { // The internal secure user has full privs on everything if (user.getUserName().equals(INTERNAL_SECURE_USERNAME)) { return true; } // PENDING: Support column level privileges. // Ask the grant manager if there are any privs setup for this user on the // given schema. GrantManager manager = context.getGrantManager(); Privileges privs = manager.userGrants( GrantManager.TABLE, table_name.toString(), user.getUserName()); return privs.permits(grant); } /** * Returns true if the user is permitted to create, alter and drop user * information from the database, otherwise returns false. Only members of * the 'secure access' group, or the 'user manager' group can do this. */ public boolean canUserCreateAndDropUsers( DatabaseQueryContext context, User user) throws DatabaseException { return (userHasSecureAccess(context, user) || userBelongsToGroup(context, user.getUserName(), USER_MANAGER_GROUP)); } /** * Returns true if the user is permitted to create and drop schema's in the * database, otherwise returns false. Only members of the 'secure access' * group, or the 'schema manager' group can do this. */ public boolean canUserCreateAndDropSchema( DatabaseQueryContext context, User user, String schema) throws DatabaseException { // The internal secure user has full privs on everything if (user.getUserName().equals(INTERNAL_SECURE_USERNAME)) { return true; } // No user can create or drop the system schema. if (schema.equals(SYSTEM_SCHEMA)) { return false; } else { return (userHasSecureAccess(context, user) || userBelongsToGroup(context, user.getUserName(), SCHEMA_MANAGER_GROUP)); } } /** * Returns true if the user can shut down the database server. A user can * shut down the database if they are a member of the 'secure acces' group. */ public boolean canUserShutDown(DatabaseQueryContext context, User user) throws DatabaseException { return userHasSecureAccess(context, user); } /** * Returns true if the user is allowed to execute the given stored procedure. */ public boolean canUserExecuteStoredProcedure(DatabaseQueryContext context, User user, String procedure_name) throws DatabaseException { // Currently you can only execute a procedure if you are a member of the // secure access priv group. return userHasSecureAccess(context, user); } // ---- General schema level privs ---- /** * Returns true if the user can create a table or view with the given name, * otherwise returns false. */ public boolean canUserCreateTableObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.CREATE)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can alter a table or view with the given name, * otherwise returns false. */ public boolean canUserAlterTableObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.ALTER)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can drop a table or view with the given name, * otherwise returns false. */ public boolean canUserDropTableObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.DROP)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } // ---- Check table object privs ---- /** * Returns true if the user can select from a table or view with the given * name and given columns, otherwise returns false. */ public boolean canUserSelectFromTableObject( DatabaseQueryContext context, User user, TableName table, Variable[] columns) throws DatabaseException { if (userHasTableObjectGrant(context, user, table, columns, Privileges.SELECT)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can insert into a table or view with the given * name and given columns, otherwise returns false. */ public boolean canUserInsertIntoTableObject( DatabaseQueryContext context, User user, TableName table, Variable[] columns) throws DatabaseException { if (userHasTableObjectGrant(context, user, table, columns, Privileges.INSERT)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can update a table or view with the given * name and given columns, otherwise returns false. */ public boolean canUserUpdateTableObject( DatabaseQueryContext context, User user, TableName table, Variable[] columns) throws DatabaseException { if (userHasTableObjectGrant(context, user, table, columns, Privileges.UPDATE)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can delete from a table or view with the given * name and given columns, otherwise returns false. */ public boolean canUserDeleteFromTableObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasTableObjectGrant(context, user, table, null, Privileges.DELETE)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can compact a table with the given name, * otherwise returns false. */ public boolean canUserCompactTableObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasTableObjectGrant(context, user, table, null, Privileges.COMPACT)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can create a procedure with the given name, * otherwise returns false. */ public boolean canUserCreateProcedureObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.CREATE)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can drop a procedure with the given name, * otherwise returns false. */ public boolean canUserDropProcedureObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.DROP)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can create a sequence with the given name, * otherwise returns false. */ public boolean canUserCreateSequenceObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.CREATE)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } /** * Returns true if the user can drop a sequence with the given name, * otherwise returns false. */ public boolean canUserDropSequenceObject( DatabaseQueryContext context, User user, TableName table) throws DatabaseException { if (userHasSchemaGrant(context, user, table.getSchema(), Privileges.DROP)) { return true; } // If the user belongs to the secure access priv group, return true return userHasSecureAccess(context, user); } // ---------- Schema management ---------- /** * Creates the schema information tables introducted in version 0.90. The * schema information tables are; */ void createSchemaInfoTables(DatabaseConnection connection) throws DatabaseException { connection.createSchema(DEFAULT_SCHEMA, "DEFAULT"); connection.createSchema(JDBC_SCHEMA, "SYSTEM"); } /** * Creates all the system views. */ private void createSystemViews(DatabaseConnection connection) throws DatabaseException { // Obtain the JDBC interface. try { Connection jdbc = connection.getJDBCConnection(); // Is the username/password in the database? Statement stmt = jdbc.createStatement(); // This view shows the grants that the user has (no join, only priv_bit). stmt.executeUpdate( "CREATE VIEW SYS_JDBC.ThisUserSimpleGrant AS " + " SELECT \"priv_bit\", \"object\", \"param\", \"grantee\", " + " \"grant_option\", \"granter\" " + " FROM SYS_INFO.sUSRGrant " + " WHERE ( grantee = user() OR grantee = '@PUBLIC' )"); // This view shows the grants that the user is allowed to see stmt.executeUpdate( "CREATE VIEW SYS_JDBC.ThisUserGrant AS " + " SELECT \"description\", \"object\", \"param\", \"grantee\", " + " \"grant_option\", \"granter\" " + " FROM SYS_INFO.sUSRGrant, SYS_INFO.sUSRPrivMap " + " WHERE ( grantee = user() OR grantee = '@PUBLIC' )" + " AND sUSRGrant.priv_bit = sUSRPrivMap.priv_bit"); // A view that represents the list of schema this user is allowed to view // the contents of. stmt.executeUpdate( "CREATE VIEW SYS_JDBC.ThisUserSchemaInfo AS " + " SELECT * FROM SYS_INFO.sUSRSchemaInfo " + " WHERE \"name\" IN ( " + " SELECT \"param\" " + " FROM SYS_JDBC.ThisUserGrant " + " WHERE \"object\" = 65 " + " AND \"description\" = 'LIST' )"); // A view that exposes the sUSRTableColumn table but only for the tables // this user has read access to. stmt.executeUpdate( "CREATE VIEW SYS_JDBC.ThisUserTableColumns AS " + " SELECT * FROM SYS_INFO.sUSRTableColumns " + " WHERE \"schema\" IN ( " + " SELECT \"name\" FROM SYS_JDBC.ThisUserSchemaInfo )"); // A view that exposes the sUSRTableInfo table but only for the tables // this user has read access to. stmt.executeUpdate( "CREATE VIEW SYS_JDBC.ThisUserTableInfo AS " + " SELECT * FROM SYS_INFO.sUSRTableInfo " + " WHERE \"schema\" IN ( " + " SELECT \"name\" FROM SYS_JDBC.ThisUserSchemaInfo )"); // A JDBC helper view for the 'getTables' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.Tables AS " + " SELECT NULL AS \"TABLE_CAT\", \n" + " \"schema\" AS \"TABLE_SCHEM\", \n" + " \"name\" AS \"TABLE_NAME\", \n" + " \"type\" AS \"TABLE_TYPE\", \n" + " \"other\" AS \"REMARKS\", \n" + " NULL AS \"TYPE_CAT\", \n" + " NULL AS \"TYPE_SCHEM\", \n" + " NULL AS \"TYPE_NAME\", \n" + " NULL AS \"SELF_REFERENCING_COL_NAME\", \n" + " NULL AS \"REF_GENERATION\" \n" + " FROM SYS_JDBC.ThisUserTableInfo \n"); // A JDBC helper view for the 'getSchemas' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.Schemas AS " + " SELECT \"name\" AS \"TABLE_SCHEM\", \n" + " NULL AS \"TABLE_CATALOG\" \n" + " FROM SYS_JDBC.ThisUserSchemaInfo\n"); // A JDBC helper view for the 'getCatalogs' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.Catalogs AS " + " SELECT NULL AS \"TABLE_CAT\" \n" + " FROM SYS_INFO.sUSRSchemaInfo\n" + // Hacky, this will generate a 0 row " WHERE FALSE\n"); // table. // A JDBC helper view for the 'getColumns' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.Columns AS " + " SELECT NULL AS \"TABLE_CAT\",\n" + " \"schema\" AS \"TABLE_SCHEM\",\n" + " \"table\" AS \"TABLE_NAME\",\n" + " \"column\" AS \"COLUMN_NAME\",\n" + " \"sql_type\" AS \"DATA_TYPE\",\n" + " \"type_desc\" AS \"TYPE_NAME\",\n" + " IF(\"size\" = -1, 1024, \"size\") AS \"COLUMN_SIZE\",\n" + " NULL AS \"BUFFER_LENGTH\",\n" + " \"scale\" AS \"DECIMAL_DIGITS\",\n" + " IF(\"sql_type\" = -7, 2, 10) AS \"NUM_PREC_RADIX\",\n" + " IF(\"not_null\", 0, 1) AS \"NULLABLE\",\n" + " '' AS \"REMARKS\",\n" + " \"default\" AS \"COLUMN_DEF\",\n" + " NULL AS \"SQL_DATA_TYPE\",\n" + " NULL AS \"SQL_DATETIME_SUB\",\n" + " IF(\"size\" = -1, 1024, \"size\") AS \"CHAR_OCTET_LENGTH\",\n" + " \"seq_no\" + 1 AS \"ORDINAL_POSITION\",\n" + " IF(\"not_null\", 'NO', 'YES') AS \"IS_NULLABLE\"\n" + " FROM SYS_JDBC.ThisUserTableColumns\n"); // A JDBC helper view for the 'getColumnPrivileges' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.ColumnPrivileges AS " + " SELECT \"TABLE_CAT\",\n" + " \"TABLE_SCHEM\",\n" + " \"TABLE_NAME\",\n" + " \"COLUMN_NAME\",\n" + " IF(\"ThisUserGrant.granter\" = '@SYSTEM', \n" + " NULL, \"ThisUserGrant.granter\") AS \"GRANTOR\",\n" + " IF(\"ThisUserGrant.grantee\" = '@PUBLIC', \n" + " 'public', \"ThisUserGrant.grantee\") AS \"GRANTEE\",\n" + " \"ThisUserGrant.description\" AS \"PRIVILEGE\",\n" + " IF(\"grant_option\" = 'true', 'YES', 'NO') AS \"IS_GRANTABLE\" \n" + " FROM SYS_JDBC.Columns, SYS_JDBC.ThisUserGrant \n" + " WHERE CONCAT(Columns.TABLE_SCHEM, '.', Columns.TABLE_NAME) = \n" + " ThisUserGrant.param \n" + " AND SYS_JDBC.ThisUserGrant.object = 1 \n" + " AND SYS_JDBC.ThisUserGrant.description IS NOT NULL \n"); // A JDBC helper view for the 'getTablePrivileges' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.TablePrivileges AS " + " SELECT \"TABLE_CAT\",\n" + " \"TABLE_SCHEM\",\n" + " \"TABLE_NAME\",\n" + " IF(\"ThisUserGrant.granter\" = '@SYSTEM', \n" + " NULL, \"ThisUserGrant.granter\") AS \"GRANTOR\",\n" + " IF(\"ThisUserGrant.grantee\" = '@PUBLIC', \n" + " 'public', \"ThisUserGrant.grantee\") AS \"GRANTEE\",\n" + " \"ThisUserGrant.description\" AS \"PRIVILEGE\",\n" + " IF(\"grant_option\" = 'true', 'YES', 'NO') AS \"IS_GRANTABLE\" \n" + " FROM SYS_JDBC.Tables, SYS_JDBC.ThisUserGrant \n" + " WHERE CONCAT(Tables.TABLE_SCHEM, '.', Tables.TABLE_NAME) = \n" + " ThisUserGrant.param \n" + " AND SYS_JDBC.ThisUserGrant.object = 1 \n" + " AND SYS_JDBC.ThisUserGrant.description IS NOT NULL \n"); // A JDBC helper view for the 'getPrimaryKeys' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.PrimaryKeys AS " + " SELECT NULL \"TABLE_CAT\",\n" + " \"schema\" \"TABLE_SCHEM\",\n" + " \"table\" \"TABLE_NAME\",\n" + " \"column\" \"COLUMN_NAME\",\n" + " \"SYS_INFO.sUSRPrimaryColumns.seq_no\" \"KEY_SEQ\",\n" + " \"name\" \"PK_NAME\"\n" + " FROM SYS_INFO.sUSRPKeyInfo, SYS_INFO.sUSRPrimaryColumns\n" + " WHERE sUSRPKeyInfo.id = sUSRPrimaryColumns.pk_id\n" + " AND \"schema\" IN\n" + " ( SELECT \"name\" FROM SYS_JDBC.ThisUserSchemaInfo )\n"); // A JDBC helper view for the 'getImportedKeys' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.ImportedKeys AS " + " SELECT NULL \"PKTABLE_CAT\",\n" + " \"sUSRFKeyInfo.ref_schema\" \"PKTABLE_SCHEM\",\n" + " \"sUSRFKeyInfo.ref_table\" \"PKTABLE_NAME\",\n" + " \"sUSRForeignColumns.pcolumn\" \"PKCOLUMN_NAME\",\n" + " NULL \"FKTABLE_CAT\",\n" + " \"sUSRFKeyInfo.schema\" \"FKTABLE_SCHEM\",\n" + " \"sUSRFKeyInfo.table\" \"FKTABLE_NAME\",\n" + " \"sUSRForeignColumns.fcolumn\" \"FKCOLUMN_NAME\",\n" + " \"sUSRForeignColumns.seq_no\" \"KEY_SEQ\",\n" + " I_FRULE_CONVERT(\"sUSRFKeyInfo.update_rule\") \"UPDATE_RULE\",\n" + " I_FRULE_CONVERT(\"sUSRFKeyInfo.delete_rule\") \"DELETE_RULE\",\n" + " \"sUSRFKeyInfo.name\" \"FK_NAME\",\n" + " NULL \"PK_NAME\",\n" + " \"sUSRFKeyInfo.deferred\" \"DEFERRABILITY\"\n" + " FROM SYS_INFO.sUSRFKeyInfo, SYS_INFO.sUSRForeignColumns\n" + " WHERE sUSRFKeyInfo.id = sUSRForeignColumns.fk_id\n" + " AND \"sUSRFKeyInfo.schema\" IN\n" + " ( SELECT \"name\" FROM SYS_JDBC.ThisUserSchemaInfo )\n"); // A JDBC helper view for the 'getExportedKeys' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.ExportedKeys AS " + " SELECT NULL \"PKTABLE_CAT\",\n" + " \"sUSRFKeyInfo.ref_schema\" \"PKTABLE_SCHEM\",\n" + " \"sUSRFKeyInfo.ref_table\" \"PKTABLE_NAME\",\n" + " \"sUSRForeignColumns.pcolumn\" \"PKCOLUMN_NAME\",\n" + " NULL \"FKTABLE_CAT\",\n" + " \"sUSRFKeyInfo.schema\" \"FKTABLE_SCHEM\",\n" + " \"sUSRFKeyInfo.table\" \"FKTABLE_NAME\",\n" + " \"sUSRForeignColumns.fcolumn\" \"FKCOLUMN_NAME\",\n" + " \"sUSRForeignColumns.seq_no\" \"KEY_SEQ\",\n" + " I_FRULE_CONVERT(\"sUSRFKeyInfo.update_rule\") \"UPDATE_RULE\",\n" + " I_FRULE_CONVERT(\"sUSRFKeyInfo.delete_rule\") \"DELETE_RULE\",\n" + " \"sUSRFKeyInfo.name\" \"FK_NAME\",\n" + " NULL \"PK_NAME\",\n" + " \"sUSRFKeyInfo.deferred\" \"DEFERRABILITY\"\n" + " FROM SYS_INFO.sUSRFKeyInfo, SYS_INFO.sUSRForeignColumns\n" + " WHERE sUSRFKeyInfo.id = sUSRForeignColumns.fk_id\n" + " AND \"sUSRFKeyInfo.schema\" IN\n" + " ( SELECT \"name\" FROM SYS_JDBC.ThisUserSchemaInfo )\n"); // A JDBC helper view for the 'getCrossReference' meta-data method stmt.executeUpdate( " CREATE VIEW SYS_JDBC.CrossReference AS " + " SELECT NULL \"PKTABLE_CAT\",\n" + " \"sUSRFKeyInfo.ref_schema\" \"PKTABLE_SCHEM\",\n" + " \"sUSRFKeyInfo.ref_table\" \"PKTABLE_NAME\",\n" + " \"sUSRForeignColumns.pcolumn\" \"PKCOLUMN_NAME\",\n" + " NULL \"FKTABLE_CAT\",\n" + " \"sUSRFKeyInfo.schema\" \"FKTABLE_SCHEM\",\n" + " \"sUSRFKeyInfo.table\" \"FKTABLE_NAME\",\n" + " \"sUSRForeignColumns.fcolumn\" \"FKCOLUMN_NAME\",\n" + " \"sUSRForeignColumns.seq_no\" \"KEY_SEQ\",\n" + " I_FRULE_CONVERT(\"sUSRFKeyInfo.update_rule\") \"UPDATE_RULE\",\n" + " I_FRULE_CONVERT(\"sUSRFKeyInfo.delete_rule\") \"DELETE_RULE\",\n" + " \"sUSRFKeyInfo.name\" \"FK_NAME\",\n" + " NULL \"PK_NAME\",\n" + " \"sUSRFKeyInfo.deferred\" \"DEFERRABILITY\"\n" + " FROM SYS_INFO.sUSRFKeyInfo, SYS_INFO.sUSRForeignColumns\n" + " WHERE sUSRFKeyInfo.id = sUSRForeignColumns.fk_id\n" + " AND \"sUSRFKeyInfo.schema\" IN\n" + " ( SELECT \"name\" FROM SYS_JDBC.ThisUserSchemaInfo )\n"); } catch (SQLException e) { if (e instanceof MSQLException) { MSQLException msqle = (MSQLException) e; Debug().write(Lvl.ERROR, this, msqle.getServerErrorStackTrace()); } Debug().writeException(Lvl.ERROR, e); throw new RuntimeException("SQL Error: " + e.getMessage()); } } /** * Creates all the priv/password system tables. */ private void createSystemTables(DatabaseConnection connection) throws DatabaseException { // --- The user management tables --- DataTableDef sUSRPassword = new DataTableDef(); sUSRPassword.setTableName(SYS_PASSWORD); sUSRPassword.addColumn(DataTableColumnDef.createStringColumn("UserName")); sUSRPassword.addColumn(DataTableColumnDef.createStringColumn("Password")); DataTableDef sUSRUserPriv = new DataTableDef(); sUSRUserPriv.setTableName(SYS_USERPRIV); sUSRUserPriv.addColumn(DataTableColumnDef.createStringColumn("UserName")); sUSRUserPriv.addColumn( DataTableColumnDef.createStringColumn("PrivGroupName")); DataTableDef sUSRUserConnectPriv = new DataTableDef(); sUSRUserConnectPriv.setTableName(SYS_USERCONNECT); sUSRUserConnectPriv.addColumn( DataTableColumnDef.createStringColumn("UserName")); sUSRUserConnectPriv.addColumn( DataTableColumnDef.createStringColumn("Protocol")); sUSRUserConnectPriv.addColumn( DataTableColumnDef.createStringColumn("Host")); sUSRUserConnectPriv.addColumn( DataTableColumnDef.createStringColumn("Access")); DataTableDef sUSRGrant = new DataTableDef(); sUSRGrant.setTableName(SYS_GRANTS); sUSRGrant.addColumn(DataTableColumnDef.createNumericColumn("priv_bit")); sUSRGrant.addColumn(DataTableColumnDef.createNumericColumn("object")); sUSRGrant.addColumn(DataTableColumnDef.createStringColumn("param")); sUSRGrant.addColumn(DataTableColumnDef.createStringColumn("grantee")); sUSRGrant.addColumn(DataTableColumnDef.createStringColumn("grant_option")); sUSRGrant.addColumn(DataTableColumnDef.createStringColumn("granter")); DataTableDef sUSRService = new DataTableDef(); sUSRService.setTableName(SYS_SERVICE); sUSRService.addColumn(DataTableColumnDef.createStringColumn("name")); sUSRService.addColumn(DataTableColumnDef.createStringColumn("class")); sUSRService.addColumn(DataTableColumnDef.createStringColumn("type")); DataTableDef sUSRFunctionFactory = new DataTableDef(); sUSRFunctionFactory.setTableName(SYS_FUNCTIONFACTORY); sUSRFunctionFactory.addColumn( DataTableColumnDef.createStringColumn("name")); sUSRFunctionFactory.addColumn( DataTableColumnDef.createStringColumn("class")); sUSRFunctionFactory.addColumn( DataTableColumnDef.createStringColumn("type")); DataTableDef sUSRFunction = new DataTableDef(); sUSRFunction.setTableName(SYS_FUNCTION); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("schema")); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("name")); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("type")); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("location")); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("return_type")); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("args_type")); sUSRFunction.addColumn(DataTableColumnDef.createStringColumn("username")); DataTableDef sUSRView = new DataTableDef(); sUSRView.setTableName(SYS_VIEW); sUSRView.addColumn(DataTableColumnDef.createStringColumn("schema")); sUSRView.addColumn(DataTableColumnDef.createStringColumn("name")); sUSRView.addColumn(DataTableColumnDef.createBinaryColumn("query")); sUSRView.addColumn(DataTableColumnDef.createBinaryColumn("data")); sUSRView.addColumn(DataTableColumnDef.createStringColumn("username")); DataTableDef sUSRLabel = new DataTableDef(); sUSRLabel.setTableName(SYS_LABEL); sUSRLabel.addColumn(DataTableColumnDef.createNumericColumn("object_type")); sUSRLabel.addColumn(DataTableColumnDef.createStringColumn("object_name")); sUSRLabel.addColumn(DataTableColumnDef.createStringColumn("label")); DataTableDef sUSRDataTrigger = new DataTableDef(); sUSRDataTrigger.setTableName(SYS_DATA_TRIGGER); sUSRDataTrigger.addColumn(DataTableColumnDef.createStringColumn("schema")); sUSRDataTrigger.addColumn(DataTableColumnDef.createStringColumn("name")); sUSRDataTrigger.addColumn(DataTableColumnDef.createNumericColumn("type")); sUSRDataTrigger.addColumn(DataTableColumnDef.createStringColumn("on_object")); sUSRDataTrigger.addColumn(DataTableColumnDef.createStringColumn("action")); sUSRDataTrigger.addColumn(DataTableColumnDef.createBinaryColumn("misc")); sUSRDataTrigger.addColumn(DataTableColumnDef.createStringColumn("username")); // Create the tables connection.alterCreateTable(sUSRPassword, 91, 128); connection.alterCreateTable(sUSRUserPriv, 91, 128); connection.alterCreateTable(sUSRUserConnectPriv, 91, 128); connection.alterCreateTable(sUSRGrant, 195, 128); connection.alterCreateTable(sUSRService, 91, 128); connection.alterCreateTable(sUSRFunctionFactory, 91, 128); connection.alterCreateTable(sUSRFunction, 91, 128); connection.alterCreateTable(sUSRView, 91, 128); connection.alterCreateTable(sUSRLabel, 91, 128); connection.alterCreateTable(sUSRDataTrigger, 91, 128); } /** * Sets all the standard functions and procedures available to engine. * This creates an entry in the SYS_FUNCTION table for all the dynamic * functions and procedures. This may not include the functions exposed * though the FunctionFactory interface. */ public void setupSystemFunctions(DatabaseConnection connection, String admin_user) throws DatabaseException { final String GRANTER = INTERNAL_SECURE_USERNAME; // The manager handling the functions. ProcedureManager manager = connection.getProcedureManager(); // Define the SYSTEM_MAKE_BACKUP procedure Class c = com.mckoi.database.procedure.SystemBackup.class; manager.defineJavaProcedure( new ProcedureName(SYSTEM_SCHEMA, "SYSTEM_MAKE_BACKUP"), "com.mckoi.database.procedure.SystemBackup.invoke(ProcedureConnection, String)", TType.STRING_TYPE, new TType[] { TType.STRING_TYPE }, admin_user); // ----- // Set the grants for the procedures. GrantManager grants = connection.getGrantManager(); // Revoke all existing grants on the internal stored procedures. grants.revokeAllGrantsOnObject(GrantManager.TABLE, "SYS_INFO.SYSTEM_MAKE_BACKUP"); // Grant execute priv with grant option to administrator grants.addGrant(Privileges.PROCEDURE_EXECUTE_PRIVS, GrantManager.TABLE, "SYS_INFO.SYSTEM_MAKE_BACKUP", admin_user, true, GRANTER); } /** * Clears all the grant information in the sUSRGrant table. This should only * be used if we need to refresh the grant information for whatever reason * (such as when converting between different versions). */ private void clearAllGrants(DatabaseConnection connection) throws DatabaseException { DataTable grant_table = connection.getTable(SYS_GRANTS); grant_table.delete(grant_table); } /** * Set up the system table grants. *

* This gives the grantee user full access to sUSRPassword, * sUSRUserPriv, sUSRUserConnectPriv, sUSRService, sUSRFunctionFactory, * and sUSRFunction. All other sUSR tables are granted SELECT only. * If 'grant_option' is true then the user is given the option to give the * grants to other users. */ private void setSystemGrants(DatabaseConnection connection, String grantee) throws DatabaseException { final String GRANTER = INTERNAL_SECURE_USERNAME; // Add all priv grants to those that the system user is allowed to change GrantManager manager = connection.getGrantManager(); // Add schema grant for APP manager.addGrant(Privileges.SCHEMA_ALL_PRIVS, GrantManager.SCHEMA, "APP", grantee, true, GRANTER); // Add public grant for SYS_INFO manager.addGrant(Privileges.SCHEMA_READ_PRIVS, GrantManager.SCHEMA, "SYS_INFO", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); // Add public grant for SYS_JDBC manager.addGrant(Privileges.SCHEMA_READ_PRIVS, GrantManager.SCHEMA, "SYS_JDBC", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); // For all tables in the SYS_INFO schema, grant all privileges to the // system user. manager.addGrantToAllTablesInSchema("SYS_INFO", Privileges.TABLE_ALL_PRIVS, grantee, false, GRANTER); // Set the public grants for the system tables, manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_INFO.sUSRConnectionInfo", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_INFO.sUSRCurrentConnections", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_INFO.sUSRDatabaseStatistics", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_INFO.sUSRDatabaseVars", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_INFO.sUSRProductInfo", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_INFO.sUSRSQLTypeInfo", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); // Set public grants for the system views. manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ThisUserGrant", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ThisUserSimpleGrant", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ThisUserSchemaInfo", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ThisUserTableColumns", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ThisUserTableInfo", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.Tables", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.Schemas", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.Catalogs", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.Columns", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ColumnPrivileges", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.TablePrivileges", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.PrimaryKeys", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ImportedKeys", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.ExportedKeys", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); manager.addGrant(Privileges.TABLE_READ_PRIVS, GrantManager.TABLE, "SYS_JDBC.CrossReference", GrantManager.PUBLIC_USERNAME_STR, false, GRANTER); } /** * Sets the system table listeners on the SYS_INFO.sUSRView table. These * listeners are used to cache information * that is stored and retrieved from those tables. */ private void setSystemTableListeners() { // getSystem().addMasterTableListener(SYS_VIEW, new ViewTableListener()); } /** * Goes through all tables in the database not in the SYS_INFO schema and * adds an entry in the grant table for it. *

* This is for converting from a pre-grant database. * * @param connection the database transaction * @param grantee the grantee to apply the table privs to */ private void convertPreGrant(DatabaseConnection connection, String grantee) throws DatabaseException { String GRANTER = INTERNAL_SECURE_USERNAME; GrantManager manager = connection.getGrantManager(); // Setup grants for any user schema that have been created. SchemaDef[] all_schema = connection.getSchemaList(); for (int i = 0; i < all_schema.length; ++i) { SchemaDef schema = all_schema[i]; // The admin user is given full privs to all tables in USER or DEFAULT // schema. if (schema.getType().equals("USER") || schema.getType().equals("DEFAULT")) { // Don't set grants for default schema if (!schema.getType().equals("DEFAULT")) { manager.addGrant(Privileges.TABLE_ALL_PRIVS, GrantManager.SCHEMA, schema.getName(), grantee, true, GRANTER); } manager.addGrantToAllTablesInSchema(schema.getName(), Privileges.TABLE_ALL_PRIVS, grantee, true, GRANTER); } } } /** * Converts tables from a database that are pre database schema. */ private void convertPreSchema(DatabaseConnection connection) throws DatabaseException { throw new DatabaseException( "Converting from pre-schema no longer supported."); } /** * Creates and sets up a new database to an initial empty state. The * creation process involves creating all the system tables and views, adding * an administrator user account, creating schema, and setting up the initial * grant information for the administrator user. *

* The 'username' and 'password' parameter given are set for the administrator * account. */ public void create(String username, String password) { if (isReadOnly()) { throw new RuntimeException("Can not create database in read only mode."); } if (username == null || username.length() == 0 || password == null || password.length() == 0) { throw new RuntimeException( "Must have valid username and password String"); } try { // Create the conglomerate conglomerate.create(getName()); DatabaseConnection connection = createNewConnection(null, null); DatabaseQueryContext context = new DatabaseQueryContext(connection); connection.getLockingMechanism().setMode( LockingMechanism.EXCLUSIVE_MODE); connection.setCurrentSchema(SYSTEM_SCHEMA); // Create the schema information tables introduced in version 0.90 // and 0.94 createSchemaInfoTables(connection); // The system tables that are present in every conglomerate. createSystemTables(connection); // Create the system views createSystemViews(connection); // Creates the administrator user. createUser(context, username, password); // This is the admin user so add to the 'secure access' table. addUserToGroup(context, username, SECURE_GROUP); // Allow all localhost TCP connections. // NOTE: Permissive initial security! grantHostAccessToUser(context, username, "TCP", "%"); // Allow all Local connections (from within JVM). grantHostAccessToUser(context, username, "Local", "%"); // Sets the system grants for the administrator setSystemGrants(connection, username); // Set all default system procedures. setupSystemFunctions(connection, username); try { // Close and commit this transaction. connection.commit(); } catch (TransactionException e) { Debug().writeException(e); throw new Error("Transaction Error: " + e.getMessage()); } connection.getLockingMechanism().finishMode( LockingMechanism.EXCLUSIVE_MODE); connection.close(); // Close the conglomerate. conglomerate.close(); } catch (DatabaseException e) { Debug().writeException(e); throw new Error("Database Exception: " + e.getMessage()); } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } } /** * Initializes the database. This opens all the files that are required for * the operation of the database. If it finds that the version of the * data files are not a compatible version, this method throws an exception. *

* NOTE: Perhaps a better name for this method is 'open'. */ public void init() throws DatabaseException { if (initialised) { throw new RuntimeException("Init() method can only be called once."); } // Reset all session statistics. stats().resetSession(); try { File log_path = system.getLogDirectory(); if (log_path != null && system.logQueries()) { commands_log = new Log(new File(log_path.getPath(), "commands.log"), 256 * 1024, 5); } else { commands_log = Log.nullLog(); } // Check if the state file exists. If it doesn't, we need to report // incorrect version. if (!storeSystem().storeExists(getName() + "_sf")) { // If state store doesn't exist but the legacy style '.sf' state file // exists, if (system.getDatabasePath() != null && new File(system.getDatabasePath(), getName() + ".sf").exists()) { throw new DatabaseException( "The state store for this database doesn't exist. This means " + "the database version is pre version 1.0. Please see the " + "README for the details for converting this database."); } else { // If neither store or state file exist, assume database doesn't // exist. throw new DatabaseException("The database does not exist."); } } // Open the conglomerate conglomerate.open(getName()); // Check the state of the conglomerate, DatabaseConnection connection = createNewConnection(null, null); DatabaseQueryContext context = new DatabaseQueryContext(connection); connection.getLockingMechanism().setMode( LockingMechanism.EXCLUSIVE_MODE); if (!connection.tableExists(TableDataConglomerate.PERSISTENT_VAR_TABLE)) { throw new DatabaseException( "The sUSRDatabaseVars table doesn't exist. This means the " + "database is pre-schema version 1 or the table has been deleted." + "If you are converting an old version of the database, please " + "convert the database using an older release."); } // What version is the data? DataTable database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); Map vars = database_vars.toMap(); String db_version = vars.get("database.version").toString(); // If the version doesn't equal the current version, throw an error. if (!db_version.equals("1.4")) { throw new DatabaseException( "Incorrect data file version '" + db_version + "'. Please see " + "the README on how to convert the data files to the current " + "version."); } // Commit and close the connection. connection.commit(); connection.getLockingMechanism().finishMode( LockingMechanism.EXCLUSIVE_MODE); connection.close(); } catch (TransactionException e) { // This would be very strange error to receive for in initializing // database... throw new Error("Transaction Error: " + e.getMessage()); } catch (IOException e) { e.printStackTrace(System.err); throw new Error("IO Error: " + e.getMessage()); } // Sets up the system table listeners setSystemTableListeners(); initialised = true; } /** * Cleanly shuts down the database. It is important that this method is * called just before the system closes down. *

* The main purpose of this method is to ensure any tables that are backed * by files and in a 'safe' state and cleanly flushed to the file system. *

* If 'delete_on_shutdown' is true, the database will delete itself from the * file system when it shuts down. */ public void shutdown() throws DatabaseException { if (initialised == false) { throw new Error("The database is not initialized."); } try { if (delete_on_shutdown == true) { // Delete the conglomerate if the database is set to delete on // shutdown. conglomerate.delete(); } else { // Otherwise close the conglomerate. conglomerate.close(); } } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } // Shut down the logs... if (commands_log != null) { commands_log.close(); } initialised = false; } /** * Returns true if the database exists. This must be called before 'init' * and 'create'. It checks that the database files exist and we can boot * into the database. */ public boolean exists() { if (initialised == true) { throw new RuntimeException( "The database is initialised, so no point testing it's existance."); } try { // HACK: If the legacy style '.sf' state file exists then we must return // true here because technically the database exists but is not in the // correct version. if (conglomerate.exists(getName())) { return true; } else { boolean is_file_s_system = (system.storeSystem() instanceof V1FileStoreSystem); if (is_file_s_system && new File(system.getDatabasePath(), getName() + ".sf").exists()) { return true; } } return false; } catch (IOException e) { Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } } /** * If the 'deleteOnShutdown' flag is set, the database will delete the * database from the file system when it is shutdown. *

* NOTE: Use with care - if this is set to true and the database is shutdown * it will result in total loss of data. */ public final void setDeleteOnShutdown(boolean status) { delete_on_shutdown = status; } /** * Returns true if the database is initialised. */ public boolean isInitialized() { return initialised; } /** * Copies all the persistent data in this database (the conglomerate) to the * given destination path. This can copy information while the database * is 'live'. */ public void liveCopyTo(File path) throws IOException { if (initialised == false) { throw new Error("The database is not initialized."); } // Set up the destination conglomerate to copy all the data to, // Note that this sets up a typical destination conglomerate and changes // the cache size and disables the debug log. TransactionSystem copy_system = new TransactionSystem(); DefaultDBConfig config = new DefaultDBConfig(); config.setDatabasePath(path.getAbsolutePath()); config.setLogPath(""); config.setMinimumDebugLevel(50000); // Set data cache to 1MB config.setValue("data_cache_size", "1048576"); // Set io_safety_level to 1 for destination database // ISSUE: Is this a good assumption to make - // we don't care if changes are lost by a power failure when we are // backing up the database. Even if journalling is enabled, a power // failure will lose changes in the backup copy anyway. config.setValue("io_safety_level", "1"); java.io.StringWriter debug_output = new java.io.StringWriter(); copy_system.setDebugOutput(debug_output); copy_system.init(config); final TableDataConglomerate dest_conglomerate = new TableDataConglomerate(copy_system, copy_system.storeSystem()); // Open the congloemrate dest_conglomerate.minimalCreate("DefaultDatabase"); try { // Make a copy of this conglomerate into the destination conglomerate, conglomerate.liveCopyTo(dest_conglomerate); } finally { // Close the congloemrate when finished. dest_conglomerate.close(); // Dispose the TransactionSystem copy_system.dispose(); } } // ---------- Database convertion ---------- /** * Processes each table in user space and converts the format to the newest * version of the data file format. This is simply achieved by running the * 'compactTable' command on the transaction for each table. */ private void convertAllUserTables(DatabaseConnection connection, PrintStream out) throws TransactionException { out.println("Converting user table format to latest version."); // Convert all user tables in the database TableName[] all_tables = connection.getTableList(); for (int i = 0; i < all_tables.length; ++i) { TableName table_name = all_tables[i]; String schema_name = table_name.getSchema(); if (!schema_name.equals("SYS_INFO") && connection.getTableType(table_name).equals("TABLE")) { out.println("Converting: " + table_name); connection.compactTable(table_name); connection.commit(); } } } /** * Returns true if the given sql type is possibly a large object. */ private static boolean largeObjectTest(int sql_type) { return (sql_type == SQLTypes.CHAR || sql_type == SQLTypes.VARCHAR || sql_type == SQLTypes.LONGVARCHAR || sql_type == SQLTypes.BINARY || sql_type == SQLTypes.VARBINARY || sql_type == SQLTypes.LONGVARBINARY || sql_type == SQLTypes.BLOB || sql_type == SQLTypes.CLOB); } /** * Scans all the user tables for large objects and if a large object is * found, it is moved into the BlobStore. A large object is an object that * uses more than 16 kbytes of storage space. */ private void moveLargeObjectsToBlobStore(DatabaseConnection connection, PrintStream out) throws TransactionException, IOException, DatabaseException { out.println("Scanning user tables for large objects."); DatabaseQueryContext context = new DatabaseQueryContext(connection); BlobStore blob_store = conglomerate.getBlobStore(); // Scan all user tables in the database TableName[] all_tables = connection.getTableList(); for (int i = 0; i < all_tables.length; ++i) { TableName table_name = all_tables[i]; String schema_name = table_name.getSchema(); boolean table_changed = false; if (!schema_name.equals("SYS_INFO") && connection.getTableType(table_name).equals("TABLE")) { out.println("Processing: " + table_name); DataTable table = connection.getTable(table_name); DataTableDef table_def = table.getDataTableDef(); boolean possibly_has_large_objects = false; int column_count = table_def.columnCount(); for (int n = 0; n < column_count; ++n) { int sql_type = table_def.columnAt(n).getSQLType(); if (largeObjectTest(sql_type)) { possibly_has_large_objects = true; } } if (possibly_has_large_objects) { RowEnumeration e = table.rowEnumeration(); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); ArrayList changes = new ArrayList(4); for (int p = 0; p < column_count; ++p) { DataTableColumnDef col_def = table_def.columnAt(p); int sql_type = col_def.getSQLType(); if (largeObjectTest(sql_type)) { TObject tob = table.getCellContents(p, row_index); Object ob = tob.getObject(); if (ob != null) { // String type if (ob instanceof StringObject) { StringObject s_object = (StringObject) ob; if (s_object.length() > 4 * 1024) { ClobRef ref = blob_store.putStringInBlobStore(s_object.toString()); changes.add(new Assignment( new Variable(table_name, col_def.getName()), new Expression( new TObject(tob.getTType(), ref)))); } } // Binary type if (ob instanceof ByteLongObject) { ByteLongObject b_object = (ByteLongObject) ob; if (b_object.length() > 8 * 1024) { BlobRef ref = blob_store.putByteLongObjectInBlobStore(b_object); changes.add(new Assignment( new Variable(table_name, col_def.getName()), new Expression( new TObject(tob.getTType(), ref)))); } } } } } // If there was a change if (changes.size() > 0) { // Update the row Assignment[] assignments = (Assignment[]) changes.toArray( new Assignment[changes.size()]); Table st = table.singleRowSelect(row_index); table.update(context, st, assignments, -1); table_changed = true; } } // For each row if (table_changed) { // Commit the connection. connection.commit(); // Compact this table (will remove space from large objects). connection.compactTable(table_name); } // Commit the connection. connection.commit(); } } } } /** * Functionality for converting and old database format to the existing * format. This would typically be called from a convert tool program. *

* Returns true if the convert was successful or false if it wasn't (error * message is output to the PrintWriter). */ public boolean convertToCurrent(PrintStream out, String admin_username) throws IOException { // Reset all session statistics. stats().resetSession(); try { // Don't log commands (there shouldn't be any anyway). commands_log = Log.nullLog(); // Convert the state file if it is necessary. File legacy_state_file = new File(system.getDatabasePath(), getName() + ".sf"); if (legacy_state_file.exists()) { String state_store_fn = getName() + "_sf"; // If the state store file already exists if (storeSystem().storeExists(state_store_fn)) { throw new IOException( "Both legacy and version 1 state file exist. Please remove one."); } out.println("Converting state file to current version."); // Create the new store, Store new_ss = storeSystem().createStore(state_store_fn); StateStore ss = new StateStore(new_ss); // Convert the existing store long new_p = ss.convert(legacy_state_file, Debug()); // Set the fixed area in the store to point to this new structure MutableArea fixed_area = new_ss.getMutableArea(-1); fixed_area.putLong(new_p); fixed_area.checkOut(); // Flush the changes to the new store and close storeSystem().closeStore(new_ss); // Delete the old state file. legacy_state_file.delete(); out.println("State store written."); } out.println("Opening conglomerate."); // Open the conglomerate conglomerate.open(getName()); // Check the state of the conglomerate, DatabaseConnection connection = createNewConnection(null, null); DatabaseQueryContext context = new DatabaseQueryContext(connection); connection.getLockingMechanism().setMode(LockingMechanism.EXCLUSIVE_MODE); if (!connection.tableExists(TableDataConglomerate.PERSISTENT_VAR_TABLE)) { out.println( "The sUSRDatabaseVars table doesn't exist. This means the " + "database is pre-schema version 1 or the table has been deleted." + "If you are converting an old version of the database, please " + "convert the database using an older release."); return false; } // Check the user given exists if (!userExists(context, admin_username)) { out.println( "The admin username given (" + admin_username + ") does not exist in this database so I am unable to convert the " + "database."); return false; } // What version is the data? DataTable database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); Map vars = database_vars.toMap(); String db_version = vars.get("database.version").toString(); if (db_version.equals("1.0")) { // Convert from 1.0 to 1.4 out.println("Version 1.0 found."); out.println("Converting database to version 1.4 schema..."); try { // Drop the tables that were deprecated connection.dropTable(new TableName(SYSTEM_SCHEMA, "sUSRPrivAdd")); connection.dropTable(new TableName(SYSTEM_SCHEMA, "sUSRPrivAlter")); connection.dropTable(new TableName(SYSTEM_SCHEMA, "sUSRPrivRead")); } catch (Error e) { /* ignore */ } // Reset the sequence id for the tables. conglomerate.resetAllSystemTableID(); // Create/Update the conglomerate level tables. conglomerate.updateSystemTableSchema(); // Commit the changes so far. connection.commit(); // Create/Update the system tables that are present in every // conglomerate. createSystemTables(connection); // Commit the changes so far. connection.commit(); // Creating the system JDBC system schema connection.createSchema(JDBC_SCHEMA, "SYSTEM"); // Create the system views createSystemViews(connection); // Sets the system grants for the administrator setSystemGrants(connection, admin_username); // Sets the table grants for the administrator convertPreGrant(connection, admin_username); // Allow all localhost TCP connections. // NOTE: Permissive initial security! grantHostAccessToUser(context, admin_username, "TCP", "%"); // Allow all Local connections (from within JVM). grantHostAccessToUser(context, admin_username, "Local", "%"); // Convert all tables in the database to the current table format. convertAllUserTables(connection, out); // Move any large binary or string objects into the blob store. moveLargeObjectsToBlobStore(connection, out); // Set all default system procedures. setupSystemFunctions(connection, admin_username); // Commit the changes so far. connection.commit(); // Update to version 1.4 database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); updateDatabaseVars(context, database_vars, "database.version", "1.4"); db_version = "1.4"; } else if (db_version.equals("1.1")) { // Convert from 1.1 to 1.4 out.println("Version 1.1 found."); out.println("Converting database to version 1.4 schema..."); // Reset the sequence id for the tables. conglomerate.resetAllSystemTableID(); // Create/Update the conglomerate level tables. conglomerate.updateSystemTableSchema(); // Commit the changes so far. connection.commit(); // Create/Update the system tables that are present in every // conglomerate. createSystemTables(connection); // Commit the changes so far. connection.commit(); // Update the 'database_vars' table. database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); // Creating the system JDBC system schema connection.createSchema(JDBC_SCHEMA, "SYSTEM"); // Create the system views createSystemViews(connection); // Clear all grants. clearAllGrants(connection); // Sets the system grants for the administrator setSystemGrants(connection, admin_username); // Sets the table grants for the administrator convertPreGrant(connection, admin_username); // Convert all tables in the database to the current table format. convertAllUserTables(connection, out); // Move any large binary or string objects into the blob store. moveLargeObjectsToBlobStore(connection, out); // Set all default system procedures. setupSystemFunctions(connection, admin_username); // Commit the changes so far. connection.commit(); // Update to version 1.4 database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); updateDatabaseVars(context, database_vars, "database.version", "1.4"); db_version = "1.4"; } else if (db_version.equals("1.2")) { // Convert from 1.2 to 1.4 out.println("Version 1.2 found."); out.println("Converting database to version 1.4 schema..."); // Create/Update the conglomerate level tables. conglomerate.updateSystemTableSchema(); // Commit the changes so far. connection.commit(); // Create/Update the system tables that are present in every // conglomerate. createSystemTables(connection); // Commit the changes so far. connection.commit(); // Convert all tables in the database to the current table format. convertAllUserTables(connection, out); // Move any large binary or string objects into the blob store. moveLargeObjectsToBlobStore(connection, out); // Commit the changes so far. connection.commit(); // Set all default system procedures. setupSystemFunctions(connection, admin_username); // Commit the changes so far. connection.commit(); // Update to version 1.4 database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); updateDatabaseVars(context, database_vars, "database.version", "1.4"); db_version = "1.4"; } else if (db_version.equals("1.3")) { out.println("Version 1.3 found."); out.println("Converting database to version 1.4 schema..."); // Create/Update the conglomerate level tables. conglomerate.updateSystemTableSchema(); // Commit the changes so far. connection.commit(); // Create/Update the system tables that are present in every // conglomerate. createSystemTables(connection); // Commit the changes so far. connection.commit(); // Drop the 'sUSRSystemTrigger' table that was erroniously added in 1.3 try { connection.dropTable(new TableName(SYSTEM_SCHEMA, "sUSRSystemTrigger")); } catch (Error e) { /* ignore */ } // Set all default system procedures. setupSystemFunctions(connection, admin_username); // Commit the changes so far. connection.commit(); // Update to version 1.4 database_vars = connection.getTable(TableDataConglomerate.PERSISTENT_VAR_TABLE); updateDatabaseVars(context, database_vars, "database.version", "1.4"); db_version = "1.4"; } else if (db_version.equals("1.4")) { out.println("Version 1.4 found."); out.println("Version of data files is current."); } else if (!db_version.equals("1.4")) { // This means older versions of the database will not support the data // format of newer versions. out.println("Version " + db_version + " found."); out.println("This is not a recognized version number and can not be " + "converted. Perhaps this is a future version? I can " + "not convert backwards from a future version."); return false; } // Commit and close the connection. connection.commit(); connection.getLockingMechanism().finishMode( LockingMechanism.EXCLUSIVE_MODE); connection.close(); return true; } catch (TransactionException e) { // This would be very strange error to receive for in initializing // database... out.println("Transaction Error: " + e.getMessage()); e.printStackTrace(out); return false; } catch (DatabaseException e) { out.println("Database Error: " + e.getMessage()); e.printStackTrace(out); return false; } finally { try { conglomerate.close(); } catch (Throwable e) { // ignore } } } // ---------- Server side procedures ---------- /** * Resolves a procedure name into a DBProcedure object. This is used for * finding a server side script. It throws a DatabaseException if the * procedure could not be resolved or there was an error retrieving it. *

* ISSUE: Move this to DatabaseSystem? */ public DatabaseProcedure getDBProcedure( String procedure_name, DatabaseConnection connection) throws DatabaseException { // The procedure we are getting. DatabaseProcedure procedure_instance; // See if we can find the procedure as a .js (JavaScript) file in the // procedure resources. String p = "/" + procedure_name.replace('.', '/'); // If procedure doesn't starts with '/com/mckoi/procedure/' then add it // on here. if (!p.startsWith("/com/mckoi/procedure/")) { p = "/com/mckoi/procedure/" + p; } p = p + ".js"; // Is there a resource available? java.net.URL url = getClass().getResource(p); if (url != null) { // Create a server side procedure for the .js file // ( This code is not included in the free release ) procedure_instance = null; } else { try { // Couldn't find the javascript script, so try and resolve as an // actual Java class file. // Find the procedure Class proc = Class.forName("com.mckoi.procedure." + procedure_name); // Instantiate a new instance of the procedure procedure_instance = (DatabaseProcedure) proc.newInstance(); Debug().write(Lvl.INFORMATION, this, "Getting raw Java class file: " + procedure_name); } catch (IllegalAccessException e) { Debug().writeException(e); throw new DatabaseException("Illegal Access: " + e.getMessage()); } catch (InstantiationException e) { Debug().writeException(e); throw new DatabaseException("Instantiation Error: " + e.getMessage()); } catch (ClassNotFoundException e) { Debug().writeException(e); throw new DatabaseException("Class Not Found: " + e.getMessage()); } } // Return the procedure. return procedure_instance; } // ---------- System access ---------- /** * Returns the DatabaseSystem that this Database is from. */ public final DatabaseSystem getSystem() { return system; } /** * Returns the StoreSystem for this Database. */ public final StoreSystem storeSystem() { return system.storeSystem(); } /** * Convenience static for accessing the global Stats object. Perhaps this * should be deprecated? */ public final Stats stats() { return getSystem().stats(); } /** * Returns the DebugLogger implementation from the DatabaseSystem. */ public final DebugLogger Debug() { return getSystem().Debug(); } /** * Returns the system trigger manager. */ public final TriggerManager getTriggerManager() { return trigger_manager; } /** * Returns the system user manager. */ public final UserManager getUserManager() { return getSystem().getUserManager(); } /** * Creates an event for the database dispatcher. */ public final Object createEvent(Runnable runner) { return getSystem().createEvent(runner); } /** * Posts an event on the database dispatcher. */ public final void postEvent(int time, Object event) { getSystem().postEvent(time, event); } /** * Returns the system DataCellCache. */ public final DataCellCache getDataCellCache() { return getSystem().getDataCellCache(); } /** * Returns true if the database has shut down. */ public final boolean hasShutDown() { return getSystem().hasShutDown(); } /** * Starts the shutdown thread which should contain delegates that shut the * database and all its resources down. This method returns immediately. */ public final void startShutDownThread() { getSystem().startShutDownThread(); } /** * Blocks until the database has shut down. */ public final void waitUntilShutdown() { getSystem().waitUntilShutdown(); } /** * Executes database functions from the 'run' method of the given runnable * instance on the first available worker thread. All database functions * must go through a worker thread. If we ensure this, we can easily stop * all database functions from executing if need be. Also, we only need to * have a certain number of threads active at any one time rather than a * unique thread for each connection. */ public final void execute(User user, DatabaseConnection database, Runnable runner) { getSystem().execute(user, database, runner); } /** * Registers the delegate that is executed when the shutdown thread is * activated. */ public final void registerShutDownDelegate(Runnable delegate) { getSystem().registerShutDownDelegate(delegate); } /** * Controls whether the database is allowed to execute commands or not. If * this is set to true, then calls to 'execute' will be executed * as soon as there is a free worker thread available. Otherwise no * commands are executed until this is enabled. */ public final void setIsExecutingCommands(boolean status) { getSystem().setIsExecutingCommands(status); } /** * Returns a static table that has a single row but no columns. This table * is useful for certain database operations. */ public final Table getSingleRowTable() { return SINGLE_ROW_TABLE; } // ---------- Static methods ---------- /** * Given the sUSRDatabaseVars table, this will update the given key with * the given value in the table in the current transaction. */ private static void updateDatabaseVars(QueryContext context, DataTable database_vars, String key, String value) throws DatabaseException { // The references to the first and second column (key/value) Variable c1 = database_vars.getResolvedVariable(0); // First column Variable c2 = database_vars.getResolvedVariable(1); // Second column // Assignment: second column = value Assignment assignment = new Assignment(c2, new Expression(TObject.stringVal(value))); // All rows from database_vars where first column = the key Table t1 = database_vars.simpleSelect(context, c1, Operator.get("="), new Expression(TObject.stringVal(key))); // Update the variable database_vars.update(context, t1, new Assignment[] { assignment }, -1); } public void finalize() throws Throwable { super.finalize(); if (isInitialized()) { System.err.println("Database object was finalized and is initialized!"); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseConnection.java000066400000000000000000001714471330501023400263220ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseConnection 21 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; import com.mckoi.util.Cache; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.Ref; import com.mckoi.database.jdbc.SQLQuery; import java.util.HashMap; import java.util.ArrayList; import java.math.BigDecimal; /** * An object that represents a connection to a Database. This object handles * all transactional queries and modifications to the database. * * @author Tobias Downer */ public class DatabaseConnection implements TriggerListener { /** * The User that this connection has been made by. */ private User user; /** * The Database object that this connection is on. */ private Database database; /** * The DebugLogger object that we can use to log messages to. */ private DebugLogger logger; /** * A loop-back object that is managing this connection. This typically is * the session protocol. This is notified of all connection events, such as * triggers. */ private CallBack call_back; /** * The locking mechanism within this connection. */ private LockingMechanism locking_mechanism; /** * The TableDataConglomerate object that is used for transactional access * to the data. */ private TableDataConglomerate conglomerate; /** * The current Transaction that this connection is operating within. */ private Transaction transaction; /** * The current java.sql.Connection object that can be used to access the * transaction internally. */ private java.sql.Connection jdbc_connection; /** * A HashMap of DataTable objects that have been created within this * connection. */ private HashMap tables_cache; /** * A buffer of triggers. This contains triggers that can't fire until * the current transaction has closed. These triggers were generated by * external actions outside of the context of this transaction. */ private ArrayList trigger_event_buffer; /** * A list of triggers that are fired by actions taken on tables in this * transaction. When the transaction is successfully committed, these * trigger events need to be propogated to other connections in the database * listening for trigger events on the triggered objects. */ private ArrayList trigger_event_list; /** * If this is true then the database connection is in 'auto-commit' mode. * This implies a COMMIT instruction is executed after every complete * statement in the language grammar. By default this is true. */ private boolean auto_commit; /** * The current transaction isolation level this connect is operating under. * 1 = READ UNCOMMITTED, 2 = READ COMMITTED, 3 = REPEATABLE READ, * 4 = SERIALIZABLE. */ private int transaction_isolation; /** * A flag which if set to true, will not allow 'commit' or 'rollback' * commands on the transaction to occur and therefore prevent any open * transaction from closing. This is useful for restricting the ability * of a stored procedure to close. */ private boolean close_transaction_disabled; /** * The name of the schema that this connection is currently in. If the * schema is "" then this connection is in the default schema (effectively * no schema). */ private String current_schema; /** * The GrantManager object for this connection. */ private GrantManager grant_manager; /** * The procedure manager object for this connection. */ private ProcedureManager procedure_manager; /** * The connection trigger manager that handles actions that cause triggers * to fire on this connection. */ private ConnectionTriggerManager connection_trigger_manager; /** * The connection view manager that handles view information through this * connection. */ private ViewManager view_manager; /** * The list of all TableBackedCache objects that have been attached to this * DatabaseConnection and are to be notified of transaction start/end * events. */ private ArrayList table_backed_cache_list; /** * A local member that represents the static list of internal tables * that represent connection specific properties such as username, * connection, statistics, etc. */ private ConnectionInternalTableInfo connection_internal_table_info; // ----- Local flags ----- /** * True if transactions through this connection generate an error when * there is a dirty select on a table. */ private boolean error_on_dirty_select; /** * True if this connection resolves identifiers case insensitive. */ private boolean case_insensitive_identifiers; // ----- OLD and NEW table information for triggers ----- /** * A local member that represents the OLD and NEW system tables that * represent the OLD and NEW data in a triggered action. */ private OldAndNewInternalTableInfo old_new_table_info; /** * The current state of the OLD and NEW system tables including any cached * information about the tables. */ private OldNewTableState current_old_new_state = new OldNewTableState(); /** * (package protected) Constructs the connection. */ DatabaseConnection(Database database, User user, CallBack call_back) { this.database = database; this.user = user; this.logger = database.Debug(); this.call_back = call_back; this.conglomerate = database.getConglomerate(); this.locking_mechanism = new LockingMechanism(Debug()); this.trigger_event_buffer = new ArrayList(); this.trigger_event_list = new ArrayList(); tables_cache = new HashMap(); auto_commit = true; current_schema = Database.DEFAULT_SCHEMA; this.close_transaction_disabled = false; this.table_backed_cache_list = new ArrayList(); connection_internal_table_info = new ConnectionInternalTableInfo(); old_new_table_info = new OldAndNewInternalTableInfo(); error_on_dirty_select = database.getSystem().transactionErrorOnDirtySelect(); case_insensitive_identifiers = database.getSystem().ignoreIdentifierCase(); } /** * Initializes this DatabaseConnection (possibly by initializing state from * the database). */ void init() { // Create the grant manager for this connection. grant_manager = new GrantManager(this); // Create the procedure manager for this connection. procedure_manager = new ProcedureManager(this); // Create the connection trigger manager object connection_trigger_manager = new ConnectionTriggerManager(this); // Create the view manager view_manager = new ViewManager(this); } /** * Returns the transaction. If 'transaction' is null then it opens a * new transaction within the conglomerate. */ private Transaction getTransaction() { synchronized (this) { if (transaction == null) { transaction = conglomerate.createTransaction(); transaction.setErrorOnDirtySelect(error_on_dirty_select); // Internal tables (connection statistics, etc) transaction.addInternalTableInfo(connection_internal_table_info); // OLD and NEW system tables (if applicable) transaction.addInternalTableInfo(old_new_table_info); // Model views as tables (obviously) transaction.addInternalTableInfo( ViewManager.createInternalTableInfo(view_manager, transaction)); // Model procedures as tables transaction.addInternalTableInfo( ProcedureManager.createInternalTableInfo(transaction)); // Model sequences as tables transaction.addInternalTableInfo( SequenceManager.createInternalTableInfo(transaction)); // Model triggers as tables transaction.addInternalTableInfo( ConnectionTriggerManager.createInternalTableInfo(transaction)); // Notify any table backed caches that this transaction has started. int sz = table_backed_cache_list.size(); for (int i = 0; i < sz; ++i) { TableBackedCache cache = (TableBackedCache) table_backed_cache_list.get(i); cache.transactionStarted(); } } } return transaction; } /** * Returns a freshly deserialized QueryPlanNode object for the given view * object. */ QueryPlanNode createViewQueryPlanNode(TableName table_name) { return view_manager.createViewQueryPlanNode(table_name); } /** * Returns a java.sql.Connection object that can be used as a JDBC * interface to access the current transaction of this DatabaseConnection. *

* There are a few important considerations when using the JDBC connection; *

*/ public java.sql.Connection getJDBCConnection() { if (jdbc_connection == null) { jdbc_connection = InternalJDBCHelper.createJDBCConnection(getUser(), this); } return jdbc_connection; } /** * Creates an object that implements ProcedureConnection that provides access * to this connection. *

* Note that this connection is set to the user of the privs that the * procedure executes under when this method returns. *

* There must be a 100% guarentee that after this method is called, a call to * 'disposeProcedureConnection' is called which cleans up the state of this * object. */ ProcedureConnection createProcedureConnection(User user) { // Create the ProcedureConnection object, DCProcedureConnection c = new DCProcedureConnection(); // Record the current user c.previous_user = getUser(); // Record the current 'close_transaction_disabled' flag c.transaction_disabled_flag = close_transaction_disabled; // Set the new user setUser(user); // Disable the ability to close a transaction close_transaction_disabled = true; // Return return c; } /** * Disposes of the ProcedureConnection that was previously created by the * 'createProcedure' method. */ void disposeProcedureConnection(ProcedureConnection connection) { DCProcedureConnection c = (DCProcedureConnection) connection; // Revert back to the previous user. setUser(c.previous_user); // Revert back to the previous transaction disable status. close_transaction_disabled = c.transaction_disabled_flag; // Dispose of the connection c.dispose(); } /** * Returns the DatabaseSystem object for this connection. */ public DatabaseSystem getSystem() { return database.getSystem(); } /** * Returns the Database object for this connection. */ public Database getDatabase() { return database; } /** * Returns the conglomerate of this connection. */ TableDataConglomerate getConglomerate() { return conglomerate; } /** * Sets the User object for this connection. This is necessary because we * may want to temporarily change the user on this connection to allow * top level queries in a different privilege space. */ void setUser(User user) { this.user = user; } /** * Returns the User object for this connection. */ public User getUser() { return user; } /** * Returns a DebugLogger object that we can use to log debug messages to. */ public final DebugLogger Debug() { return logger; } /** * Returns the connection trigger manager for this connection. */ public ConnectionTriggerManager getConnectionTriggerManager() { return connection_trigger_manager; } /** * Returns the GrantManager object that manages grants for tables in the * database for this connection/user. */ public GrantManager getGrantManager() { return grant_manager; } /** * Returns the ProcedureManager object that manages database functions and * procedures in the database for this connection/user. */ public ProcedureManager getProcedureManager() { return procedure_manager; } /** * Sets the auto-commit mode. */ public void setAutoCommit(boolean status) { auto_commit = status; } /** * Sets the transaction isolation level from a string. */ public void setTransactionIsolation(String name) { if (name.equals("serializable")) { transaction_isolation = 4; } else { throw new Error("Can not set transaction isolation to " + name); } } /** * Assigns a variable to the expression for this connection. This is a * generic way of setting properties of the connection. Currently supported * variables are; *

* ERROR_ON_DIRTY_SELECT - set to Boolean.TRUE for turning this transaction * conflict off on this connection. * CASE_INSENSITIVE_IDENTIFIERS - Boolean.TRUE means the grammar becomes * case insensitive for identifiers resolved by the grammar. */ public void setVar(String name, Expression exp) { if (name.toUpperCase().equals("ERROR_ON_DIRTY_SELECT")) { error_on_dirty_select = toBooleanValue(exp); } else if (name.toUpperCase().equals("CASE_INSENSITIVE_IDENTIFIERS")) { case_insensitive_identifiers = toBooleanValue(exp); } } /** * Evaluates the expression to a boolean value (true or false). */ private static boolean toBooleanValue(Expression exp) { Boolean b = exp.evaluate(null, null, null).toBoolean(); if (b == null) { throw new StatementException( "Expression does not evaluate to a boolean (true or false)."); } return b.booleanValue(); } /** * Returns the auto-commit status of this connection. If this is true then * the language layer must execute a COMMIT after every statement. */ public boolean getAutoCommit() { return auto_commit; } /** * Returns the transaction isolation level of this connection. */ public int getTransactionIsolation() { return transaction_isolation; } /** * Returns the transaction isolation level of this connection as a string. */ public String getTransactionIsolationAsString() { int il = getTransactionIsolation(); if (il == 1) { return "read uncommitted"; } else if (il == 2) { return "read committed"; } else if (il == 3) { return "repeatable read"; } else if (il == 4) { return "serializable"; } else { return "unknown isolation level"; } } /** * Returns the name of the schema that this connection is within. */ public String getCurrentSchema() { return current_schema; } /** * Returns true if the connection is in case insensitive mode. In case * insensitive mode the case of identifier strings is not important. */ public boolean isInCaseInsensitiveMode() { return case_insensitive_identifiers; } /** * Sets the schema that this connection is within. */ public void setCurrentSchema(String current_schema) { this.current_schema = current_schema; } /** * Returns the LockingMechanism object that is within the context of this * database connection. This manages read/write locking within this * connection. */ public LockingMechanism getLockingMechanism() { return locking_mechanism; } /** * Attaches a TableBackedCache object to this DatabaseConnection which is * notified when a transaction is started and stopped, and when the table * being backed has changes made to it. */ void attachTableBackedCache(TableBackedCache cache) { cache.attachTo(conglomerate); table_backed_cache_list.add(cache); } /** * Returns a TableName[] array that contains the list of database * tables that are visible by this transaction. *

* This returns the list of all objects that represent queriable tables in * the database. */ public TableName[] getTableList() { return getTransaction().getTableList(); } /** * Returns true if the table exists within this connection transaction. */ public boolean tableExists(String table_name) { return tableExists(new TableName(current_schema, table_name)); } /** * Returns true if the table exists within this connection transaction. */ public boolean tableExists(TableName table_name) { table_name = substituteReservedTableName(table_name); return getTransaction().tableExists(table_name); } /** * Returns the type of the table object. Currently this is either "TABLE" * or "VIEW". */ public String getTableType(TableName table_name) { table_name = substituteReservedTableName(table_name); return getTransaction().getTableType(table_name); } /** * Attempts to resolve the given table name to its correct case assuming * the table name represents a case insensitive version of the name. For * example, "aPP.CuSTOMer" may resolve to "APP.Customer". If the table * name can not resolve to a valid identifier it returns the input table * name, therefore the actual presence of the table should always be * checked by calling 'tableExists' after this method returns. */ public TableName tryResolveCase(TableName table_name) { table_name = substituteReservedTableName(table_name); table_name = getTransaction().tryResolveCase(table_name); return table_name; } /** * Resolves a TableName string (eg. 'Customer' 'APP.Customer' ) to a * TableName object. If the schema part of the table name is not present * then it is set to the current schema of the database connection. If the * database is ignoring the case then this will correctly resolve the table * to the cased version of the table name. */ public TableName resolveTableName(String name) { TableName table_name = TableName.resolve(getCurrentSchema(), name); table_name = substituteReservedTableName(table_name); if (isInCaseInsensitiveMode()) { // Try and resolve the case of the table name, table_name = tryResolveCase(table_name); } return table_name; } /** * Resolves the given string to a table name, throwing an exception if * the reference is ambiguous. This also generates an exception if the * table object is not found. */ public TableName resolveToTableName(String name) { TableName table_name = TableName.resolve(getCurrentSchema(), name); if (table_name.getName().equalsIgnoreCase("OLD")) { return Database.OLD_TRIGGER_TABLE; } else if (table_name.getName().equalsIgnoreCase("NEW")) { return Database.NEW_TRIGGER_TABLE; } return getTransaction().resolveToTableName(getCurrentSchema(), name, isInCaseInsensitiveMode()); } /** * Returns the DataTableDef for the table with the given name. */ public DataTableDef getDataTableDef(TableName name) { name = substituteReservedTableName(name); return getTransaction().getDataTableDef(name); } /** * Returns a DataTable that represents the table from the given schema, * name in the database. */ public DataTable getTable(TableName name) { name = substituteReservedTableName(name); try { // Special handling of NEW and OLD table, we cache the DataTable in the // OldNewTableState object, if (name.equals(Database.OLD_TRIGGER_TABLE)) { if (current_old_new_state.OLD_data_table == null) { current_old_new_state.OLD_data_table = new DataTable(this, getTransaction().getTable(name)); } return current_old_new_state.OLD_data_table; } else if (name.equals(Database.NEW_TRIGGER_TABLE)) { if (current_old_new_state.NEW_data_table == null) { current_old_new_state.NEW_data_table = new DataTable(this, getTransaction().getTable(name)); } return current_old_new_state.NEW_data_table; } // Ask the transaction for the table MutableTableDataSource table = getTransaction().getTable(name); // Is this table in the tables_cache? DataTable dtable = (DataTable) tables_cache.get(table); // No, so wrap it around a Datatable and put it in the cache if (dtable == null) { dtable = new DataTable(this, table); tables_cache.put(table, dtable); } // Return the DataTable return dtable; } catch (DatabaseException e) { Debug().writeException(e); throw new Error("Database Exception: " + e.getMessage()); } } /** * Returns a DataTable that represents the table with the given name in the * database from the current connection schema. */ public DataTable getTable(String table_name) { return getTable(new TableName(current_schema, table_name)); } /** * Create a new table within the context of the current connection * transaction. */ public void createTable(DataTableDef table_def) { checkAllowCreate(table_def.getTableName()); getTransaction().createTable(table_def); } /** * Create a new table with a starting initial sector size. This should * only be used as very fine grain optimization for creating tables. If * in the future the underlying table model is changed so that the given * 'sector_size' value is unapplicable, then the value will be ignored. */ public void createTable(DataTableDef table_def, int data_sector_size, int index_sector_size) { checkAllowCreate(table_def.getTableName()); getTransaction().createTable(table_def, data_sector_size, index_sector_size); } /** * Creates a new view. This takes the information in the ViewDef object and * adds it to the system view table. *

* Note that this is a transactional operation. You need to commit for the * view to be visible to other transactions. */ public void createView(SQLQuery query, ViewDef view) { checkAllowCreate(view.getDataTableDef().getTableName()); try { view_manager.defineView(view, query, getUser()); } catch (DatabaseException e) { Debug().writeException(e); throw new RuntimeException("Database Exception: " + e.getMessage()); } } /** * Drops the view with the given name and returns true if the drop succeeded. * Returns false if the view was not found. *

* Note that this is a transactional operation. You need to commit for the * change to be visible to other transactions. */ public boolean dropView(TableName view_name) { try { return view_manager.deleteView(view_name); } catch (DatabaseException e) { Debug().writeException(e); throw new RuntimeException("Database Exception: " + e.getMessage()); } } /** * Updates a given table within the context of the current connection * transaction. */ public void updateTable(DataTableDef table_def) { checkAllowCreate(table_def.getTableName()); getTransaction().alterTable(table_def.getTableName(), table_def); } /** * Updates a given table within the context of the current connection * transaction. This should only be used as very fine grain optimization * for creating tables.If in the future the underlying table model is * changed so that the given 'sector_size' value is unapplicable, then the * value will be ignored. */ public void updateTable(DataTableDef table_def, int data_sector_size, int index_sector_size) { checkAllowCreate(table_def.getTableName()); getTransaction().alterTable(table_def.getTableName(), table_def, data_sector_size, index_sector_size); } /** * Given a DataTableDef, if the table exists then it is updated otherwise * if it doesn't exist then it is created. *

* This should only be used as very fine grain optimization for creating/ * altering tables. If in the future the underlying table model is changed * so that the given 'sector_size' value is unapplicable, then the value * will be ignored. */ public void alterCreateTable(DataTableDef table_def, int data_sector_size, int index_sector_size) { if (!tableExists(table_def.getTableName())) { createTable(table_def, data_sector_size, index_sector_size); } else { updateTable(table_def, data_sector_size, index_sector_size); } } /** * Given a DataTableDef, if the table exists then it is updated otherwise * if it doesn't exist then it is created. */ public void alterCreateTable(DataTableDef table_def) { if (!tableExists(table_def.getTableName())) { createTable(table_def); } else { updateTable(table_def); } } /** * Notifies this transaction that a database object with the given name has * successfully been created. */ void databaseObjectCreated(TableName table_name) { getTransaction().databaseObjectCreated(table_name); } /** * Notifies this transaction that a database object with the given name has * successfully been dropped. */ void databaseObjectDropped(TableName table_name) { getTransaction().databaseObjectDropped(table_name); } /** * Checks all the rows in the table for immediate constraint violations * and when the transaction is next committed check for all deferred * constraint violations. This method is used when the constraints on a * table changes and we need to determine if any constraint violations * occurred. To the constraint checking system, this is like adding all * the rows to the given table. */ public void checkAllConstraints(TableName table_name) { // Assert checkExclusive(); getTransaction().checkAllConstraints(table_name); } /** * Drops a table from within the context of the current connection * transaction. */ public void dropTable(String table_name) { dropTable(new TableName(current_schema, table_name)); } /** * Drops a table from within the context of the current connection * transaction. */ public void dropTable(TableName table_name) { getTransaction().dropTable(table_name); } /** * Compacts the table with the given name. Throws an exception if the * table doesn't exist. */ public void compactTable(String table_name) { compactTable(new TableName(current_schema, table_name)); } /** * Compacts the table with the given name. Throws an exception if the * table doesn't exist. */ public void compactTable(TableName table_name) { getTransaction().compactTable(table_name); } /** * Adds the given table name to the list of tables that are selected from * within the transaction in this connection. */ public void addSelectedFromTable(String table_name) { addSelectedFromTable(new TableName(current_schema, table_name)); } /** * Adds the given table name to the list of tables that are selected from * within the transaction in this connection. */ public void addSelectedFromTable(TableName name) { getTransaction().addSelectedFromTable(name); } /** * Requests of the sequence generator the next value from the sequence. *

* NOTE: This does NOT check that the user owning this connection has the * correct privs to perform this operation. */ public long nextSequenceValue(String name) { // Resolve and ambiguity test TableName seq_name = resolveToTableName(name); return getTransaction().nextSequenceValue(seq_name); } /** * Returns the current sequence value for the given sequence generator that * was last returned by a call to 'nextSequenceValue'. If a value was not * last returned by a call to 'nextSequenceValue' then a statement exception * is generated. *

* NOTE: This does NOT check that the user owning this connection has the * correct privs to perform this operation. */ public long lastSequenceValue(String name) { // Resolve and ambiguity test TableName seq_name = resolveToTableName(name); return getTransaction().lastSequenceValue(seq_name); } /** * Sets the sequence value for the given sequence generator. If the generator * does not exist or it is not possible to set the value for the generator * then an exception is generated. *

* NOTE: This does NOT check that the user owning this connection has the * correct privs to perform this operation. */ public void setSequenceValue(String name, long value) { // Resolve and ambiguity test TableName seq_name = resolveToTableName(name); getTransaction().setSequenceValue(seq_name, value); } /** * Returns the next unique identifier for the given table from the schema. */ public long nextUniqueID(TableName name) { return getTransaction().nextUniqueID(name); } /** * Returns the next unique identifier for the given table in the connection * schema. */ public long nextUniqueID(String table_name) { TableName tname = TableName.resolve(current_schema, table_name); return nextUniqueID(tname); } /** * If the given table name is a reserved name, then we must substitute it * with its correct form. For example, 'APP.NEW' becomes 'SYS_INFO.NEW', * etc. */ static TableName substituteReservedTableName(TableName table_name) { // We do not allow tables to be created with a reserved name String name = table_name.getName(); if (name.equalsIgnoreCase("OLD")) { return Database.OLD_TRIGGER_TABLE; } if (name.equalsIgnoreCase("NEW")) { return Database.NEW_TRIGGER_TABLE; } return table_name; } /** * Generates an exception if the name of the table is reserved and the * creation of the table should be prevented. For example, the table * names 'OLD' and 'NEW' are reserved. */ static void checkAllowCreate(TableName table_name) { // We do not allow tables to be created with a reserved name String name = table_name.getName(); if (name.equalsIgnoreCase("OLD") || name.equalsIgnoreCase("NEW")) { throw new StatementException("Table name '" + table_name + "' is reserved."); } } /** * Creates a new sequence generator with the given TableName and * initializes it with the given details. This does NOT check if the * given name clashes with an existing database object. */ public void createSequenceGenerator( TableName name, long start_value, long increment_by, long min_value, long max_value, long cache, boolean cycle) { // Check the name of the database object isn't reserved (OLD/NEW) checkAllowCreate(name); getTransaction().createSequenceGenerator(name, start_value, increment_by, min_value, max_value, cache, cycle); } /** * Drops an existing sequence generator with the given name. */ public void dropSequenceGenerator(TableName name) { getTransaction().dropSequenceGenerator(name); } /** * Adds a type of trigger for the given trigger source (usually the * name of the table). *

* Adds a type of trigger to the given Table. When the event is fired, the * UserCallBack method is notified of the event. */ public void createTrigger(String trigger_name, String trigger_source, int type) { database.getTriggerManager().addTriggerListener( this, trigger_name, type, trigger_source, this); } /** * Removes a type of trigger for the given trigger source (usually the * name of the table). */ public void deleteTrigger(String trigger_name) { database.getTriggerManager().removeTriggerListener(this, trigger_name); } /** * Informs the underlying transaction that a high level transaction event * has occurred and should be dispatched to any listeners occordingly. */ public void notifyTriggerEvent(TriggerEvent evt) { trigger_event_list.add(evt); } /** * Allocates a new large object in the Blob store of this conglomerate of the * given type and size. The blob data must be written through the * Ref after the large object is created. Once the data has been written * the 'complete' method in Ref is called. *

* Once a large object is created and written to, it may be allocated in one * or more tables in the conglomerate. */ public Ref createNewLargeObject(byte type, long object_size) { // Enable compression for string types (but not binary types). if (type == 3 || type == 4) { type = (byte) (type | 0x010); } return conglomerate.createNewLargeObject(type, object_size); } /** * Tells the conglomerate to flush the blob store. This should be called * after one or more blobs have been created and the data for the blob(s) are * set. It is an important step to perform AFTER blobs have been written. *

* If this is not called and the database closes (or crashes) before a flush * occurs then the blob may not be recoverable. */ public void flushBlobStore() { conglomerate.flushBlobStore(); } /** * Returns a TableQueryDef object that describes the characteristics of a * table including the name (TableName), the columns (DataTableDef) and the * query plan to produce the table (QueryPlanNode). This object can be used * to resolve information about a particular table, and to evaluate the * query plan to produce the table itself. *

* This produces TableQueryDef objects for all table objects in the database * including data tables and views. *

* The 'aliased_as' parameter is used to overwrite the default name of the * table object. */ public TableQueryDef getTableQueryDef(final TableName table_name, final TableName aliased_as) { // Produce the data table def for this database object. DataTableDef dtf = getDataTableDef(table_name); // If the table is aliased, set a new DataTableDef with the given name if (aliased_as != null) { dtf = new DataTableDef(dtf); dtf.setTableName(aliased_as); dtf.setImmutable(); } final DataTableDef data_table_def = dtf; // final String aliased_name = // aliased_as == null ? null : aliased_as.getName(); return new TableQueryDef() { public DataTableDef getDataTableDef() { return data_table_def; } public QueryPlanNode getQueryPlanNode() { return createObjectFetchQueryPlan(table_name, aliased_as); } }; } /** * Creates a QueryPlanNode to fetch the given table object from this * connection. */ public QueryPlanNode createObjectFetchQueryPlan(TableName table_name, TableName aliased_name) { String table_type = getTableType(table_name); if (table_type.equals("VIEW")) { return new QueryPlan.FetchViewNode(table_name, aliased_name); } else { return new QueryPlan.FetchTableNode(table_name, aliased_name); } } // ---------- Schema management and constraint methods ---------- // Methods that handle getting/setting schema information such as; // * Creating/dropping/querying schema // * Creating/dropping/querying constraint information including; // check constraints, unique constraints, primary key constraints, // foreign key constraints, etc. /** * Changes the default schema to the given schema. */ public void setDefaultSchema(String schema_name) { boolean ignore_case = isInCaseInsensitiveMode(); SchemaDef schema = resolveSchemaCase(schema_name, ignore_case); if (schema == null) { throw new Error("Schema '" + schema_name + "' does not exist."); } else { // Set the default schema for this connection setCurrentSchema(schema.getName()); } } // NOTE: These methods are copied because they simply call through to the // Transaction implementation of the method with the same signature. private void checkExclusive() { if (!getLockingMechanism().isInExclusiveMode()) { throw new Error("Assertion failed: Expected to be in exclusive mode."); } } /** * Same as the Transaction.createSchema method. */ public void createSchema(String name, String type) { // Assert checkExclusive(); getTransaction().createSchema(name, type); } /** * Same as the Transaction.dropSchema method. */ public void dropSchema(String name) { // Assert checkExclusive(); getTransaction().dropSchema(name); } /** * Same as the Transaction.schemaExists method. */ public boolean schemaExists(String name) { return getTransaction().schemaExists(name); } /** * Same as the Transaction.resolveSchemaCase method. */ public SchemaDef resolveSchemaCase(String name, boolean ignore_case) { return getTransaction().resolveSchemaCase(name, ignore_case); } /** * Convenience - returns the SchemaDef object given the name of the schema. * If identifiers are case insensitive, we resolve the case of the schema * name also. */ public SchemaDef resolveSchemaName(String name) { boolean ignore_case = isInCaseInsensitiveMode(); return resolveSchemaCase(name, ignore_case); } /** * Same as the Transaction.getSchemaList method. */ public SchemaDef[] getSchemaList() { return getTransaction().getSchemaList(); } /** * Same as the Transaction.setPersistentVar method. */ public void setPersistentVar(String variable, String value) { // Assert checkExclusive(); getTransaction().setPersistentVar(variable, value); } /** * Same as the Transaction.getPersistentVar method. */ public String getPersistentVar(String variable) { return getTransaction().getPersistantVar(variable); } /** * Same as the Transaction.addUniqueConstraint method. */ public void addUniqueConstraint(TableName table_name, String[] cols, short deferred, String constraint_name) { // Assert checkExclusive(); getTransaction().addUniqueConstraint(table_name, cols, deferred, constraint_name); } /** * Same as the Transaction.addForeignKeyConstraint method. */ public void addForeignKeyConstraint(TableName table, String[] cols, TableName ref_table, String[] ref_cols, String delete_rule, String update_rule, short deferred, String constraint_name) { // Assert checkExclusive(); getTransaction().addForeignKeyConstraint(table, cols, ref_table, ref_cols, delete_rule, update_rule, deferred, constraint_name); } /** * Same as the Transaction.addPrimaryKeyConstraint method. */ public void addPrimaryKeyConstraint(TableName table_name, String[] cols, short deferred, String constraint_name) { // Assert checkExclusive(); getTransaction().addPrimaryKeyConstraint(table_name, cols, deferred, constraint_name); } /** * Same as the Transaction.addCheckConstraint method. */ public void addCheckConstraint(TableName table_name, Expression expression, short deferred, String constraint_name) { // Assert checkExclusive(); getTransaction().addCheckConstraint(table_name, expression, deferred, constraint_name); } /** * Same as the Transaction.dropAllConstraintsForTable method. */ public void dropAllConstraintsForTable(TableName table_name) { // Assert checkExclusive(); getTransaction().dropAllConstraintsForTable(table_name); } /** * Same as the Transaction.dropNamedConstraint method. */ public int dropNamedConstraint(TableName table_name, String constraint_name) { // Assert checkExclusive(); return getTransaction().dropNamedConstraint(table_name, constraint_name); } /** * Same as the Transaction.dropPrimaryKeyConstraintForTable method. */ public boolean dropPrimaryKeyConstraintForTable( TableName table_name, String constraint_name) { // Assert checkExclusive(); return getTransaction().dropPrimaryKeyConstraintForTable(table_name, constraint_name); } /** * Same as the Transaction.queryTablesRelationallyLinkedTo method. */ public TableName[] queryTablesRelationallyLinkedTo(TableName table) { return Transaction.queryTablesRelationallyLinkedTo(getTransaction(), table); } /** * Same as the Transaction.queryTableUniqueGroups method. */ public Transaction.ColumnGroup[] queryTableUniqueGroups( TableName table_name) { return Transaction.queryTableUniqueGroups(getTransaction(), table_name); } /** * Same as the Transaction.queryTablePrimaryKeyGroup method. */ public Transaction.ColumnGroup queryTablePrimaryKeyGroup( TableName table_name) { return Transaction.queryTablePrimaryKeyGroup(getTransaction(), table_name); } /** * Same as the Transaction.queryTableCheckExpression method. */ public Transaction.CheckExpression[] queryTableCheckExpressions( TableName table_name) { return Transaction.queryTableCheckExpressions(getTransaction(), table_name); } /** * Same as the Transaction.queryTableForeignKeyReferences method. */ public Transaction.ColumnGroupReference[] queryTableForeignKeyReferences( TableName table_name) { return Transaction.queryTableForeignKeyReferences(getTransaction(), table_name); } /** * Same as the Transaction.queryTableImportedForeignKeyReferences method. */ public Transaction.ColumnGroupReference[] queryTableImportedForeignKeyReferences(TableName table_name) { return Transaction.queryTableImportedForeignKeyReferences(getTransaction(), table_name); } // ---------- Triggered OLD/NEW table handling ---------- // These methods are used by the ConnectionTriggerManager object to // temporarily create OLD and NEW tables in this connection from inside a // triggered action. In some cases (before the operation) the OLD table // is mutable. /** * Returns the current state of the old/new tables. */ OldNewTableState getOldNewTableState() { return current_old_new_state; } /** * Sets the current state of the old/new tables. When nesting OLD/NEW * tables for nested stored procedures, the current state should be first * recorded and reverted back when the nested procedure finishes. */ void setOldNewTableState(OldNewTableState state) { current_old_new_state = state; } // ---------- Trigger methods ---------- /** * Notifies this connection that an insert/delete or update operation has * occurred on some table of this DatabaseConnection. This should notify * the trigger connection manager of this event so that it may perform any * action that may have been set up to occur on this event. */ void fireTableEvent(TableModificationEvent evt) { connection_trigger_manager.performTriggerAction(evt); } // ---------- Implemented from TriggerListener ---------- /** * Notifies when a trigger has fired for this user. If there are no open * transactions on this connection then we do a straight call back trigger * notify. If there is a transaction open then trigger events are added * to the 'trigger_event_buffer' which fires when the connection transaction * is committed or rolled back. */ public void fireTrigger(DatabaseConnection database, String trigger_name, TriggerEvent evt) { if (this != database) { throw new Error("User object mismatch."); } try { // Did we pass in a call back interface? if (call_back != null) { synchronized (trigger_event_buffer) { // If there is no active transaction then fire trigger immediately. if (transaction == null) { call_back.triggerNotify(trigger_name, evt.getType(), evt.getSource(), evt.getCount()); } // Otherwise add to buffer else { trigger_event_buffer.add(trigger_name); trigger_event_buffer.add(evt); } } } } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "TRIGGER Exception: " + e.getMessage()); } } /** * Fires any triggers that are pending in the trigger buffer. */ private void firePendingTriggerEvents() { int sz; synchronized (trigger_event_buffer) { sz = trigger_event_buffer.size(); } if (sz > 0) { // Post an event that fires the triggers for each listener. Runnable runner = new Runnable() { public void run() { synchronized (trigger_event_buffer) { // Fire all pending trigger events in buffer for (int i = 0; i < trigger_event_buffer.size(); i += 2) { String trigger_name = (String) trigger_event_buffer.get(i); TriggerEvent evt = (TriggerEvent) trigger_event_buffer.get(i + 1); call_back.triggerNotify(trigger_name, evt.getType(), evt.getSource(), evt.getCount()); } // Clear the buffer trigger_event_buffer.clear(); } } }; // Post the event to go off approx 3ms from now. database.postEvent(3, database.createEvent(runner)); } } /** * Private method that disposes the current transaction. */ private void disposeTransaction() { // Set the transaction to null transaction = null; // Fire any pending trigger events in the trigger buffer. firePendingTriggerEvents(); // Clear the trigger events in this object trigger_event_list.clear(); // Notify any table backed caches that this transaction has finished. int sz = table_backed_cache_list.size(); for (int i = 0; i < sz; ++i) { TableBackedCache cache = (TableBackedCache) table_backed_cache_list.get(i); cache.transactionFinished(); } } /** * Tries to commit the current transaction. If the transaction can not be * committed because there were concurrent changes that interfered with * each other then a TransactionError is thrown and the transaction is * rolled back. *

* NOTE: It's guarenteed that the transaction will be closed even if a * transaction exception occurs. *

* Synchronization is implied on this method, because the locking mechanism * should be exclusive when this is called. */ public void commit() throws TransactionException { // Are we currently allowed to commit/rollback? if (close_transaction_disabled) { throw new RuntimeException("Commit is not allowed."); } if (user != null) { user.refreshLastCommandTime(); } // NOTE, always connection exclusive op. getLockingMechanism().reset(); tables_cache.clear(); if (transaction != null) { try { // Close and commit the transaction transaction.closeAndCommit(); // Fire all SQL action level triggers that were generated on actions. database.getTriggerManager().flushTriggerEvents(trigger_event_list); } finally { // Dispose the current transaction disposeTransaction(); } } } /** * Rolls back the current transaction operating within this connection. *

* NOTE: It's guarenteed that the transaction will be closed even if an * exception occurs. *

* Synchronization is implied on this method, because the locking mechanism * should be exclusive when this is called. */ public void rollback() { // Are we currently allowed to commit/rollback? if (close_transaction_disabled) { throw new RuntimeException("Rollback is not allowed."); } if (user != null) { user.refreshLastCommandTime(); } // NOTE, always connection exclusive op. tables_cache.clear(); if (transaction != null) { getLockingMechanism().reset(); try { transaction.closeAndRollback(); } finally { // Dispose the current transaction disposeTransaction(); // Dispose the jdbc connection if (jdbc_connection != null) { try { InternalJDBCHelper.disposeJDBCConnection(jdbc_connection); } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Error disposing internal JDBC connection."); Debug().writeException(Lvl.ERROR, e); // We don't wrap this exception } jdbc_connection = null; } } } } /** * Closes this database connection. */ public void close() { try { rollback(); } catch (Throwable e) { e.printStackTrace(System.err); } finally { if (table_backed_cache_list != null) { try { int sz = table_backed_cache_list.size(); for (int i = 0; i < sz; ++i) { TableBackedCache cache = (TableBackedCache) table_backed_cache_list.get(i); cache.detatchFrom(conglomerate); } table_backed_cache_list = null; } catch (Throwable e) { e.printStackTrace(System.err); } } // Remove any trigger listeners set for this connection, database.getTriggerManager().clearAllDatabaseConnectionTriggers(this); } } public void finalize() throws Throwable { super.finalize(); close(); } // ---------- Inner classes ---------- /** * An implementation of ProcedureConnection generated from this object. */ private class DCProcedureConnection implements ProcedureConnection { /** * The User of this connection before this procedure was started. */ private User previous_user; /** * The 'close_transaction_disabled' flag when this connection was created. */ private boolean transaction_disabled_flag; /** * The JDBCConnection created by this object. */ private java.sql.Connection jdbc_connection; public java.sql.Connection getJDBCConnection() { if (jdbc_connection == null) { jdbc_connection = InternalJDBCHelper.createJDBCConnection(getUser(), DatabaseConnection.this); } return jdbc_connection; } public Database getDatabase() { return DatabaseConnection.this.getDatabase(); } void dispose() { previous_user = null; if (jdbc_connection != null) { try { InternalJDBCHelper.disposeJDBCConnection(jdbc_connection); } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Error disposing internal JDBC connection."); Debug().writeException(Lvl.ERROR, e); // We don't wrap this exception } } } } /** * An internal table info object that handles OLD and NEW tables for * triggered actions. */ private class OldAndNewInternalTableInfo implements InternalTableInfo { private boolean hasOLDTable() { return current_old_new_state.OLD_row_index != -1; } private boolean hasNEWTable() { return current_old_new_state.NEW_row_data != null; } public int getTableCount() { int count = 0; if (hasOLDTable()) { ++count; } if (hasNEWTable()) { ++count; } return count; } public int findTableName(TableName name) { if (hasOLDTable() && name.equals(Database.OLD_TRIGGER_TABLE)) { return 0; } if (hasNEWTable() && name.equals(Database.NEW_TRIGGER_TABLE)) { if (hasOLDTable()) { return 1; } else { return 0; } } return -1; } public TableName getTableName(int i) { if (hasOLDTable()) { if (i == 0) { return Database.OLD_TRIGGER_TABLE; } } return Database.NEW_TRIGGER_TABLE; } public boolean containsTableName(TableName name) { return findTableName(name) != -1; } public String getTableType(int i) { return "SYSTEM TABLE"; } public DataTableDef getDataTableDef(int i) { DataTableDef table_def = DatabaseConnection.this.getDataTableDef( current_old_new_state.trigger_source); DataTableDef new_table_def = new DataTableDef(table_def); new_table_def.setTableName(getTableName(i)); return new_table_def; } public MutableTableDataSource createInternalTable(int index) { DataTableDef t_def = getDataTableDef(index); TriggeredOldNewDataSource table = new TriggeredOldNewDataSource(getSystem(), t_def); if (hasOLDTable()) { if (index == 0) { // Copy data from the table to the new table DataTable dtable = DatabaseConnection.this.getTable( current_old_new_state.trigger_source); RowData old_row_data = new RowData(table); int row_index = current_old_new_state.OLD_row_index; for (int i = 0; i < t_def.columnCount(); ++i) { old_row_data.setColumnDataFromTObject(i, dtable.getCellContents(i, row_index)); } // All OLD tables are immutable table.setImmutable(true); table.setRowData(old_row_data); return table; } } table.setImmutable(!current_old_new_state.mutable_NEW); table.setRowData(current_old_new_state.NEW_row_data); return table; } } /** * A MutableTableDataSource implementation that is used for trigger actions * to represent the data in the OLD and NEW tables. */ private static class TriggeredOldNewDataSource extends GTDataSource { private DataTableDef table_def; private RowData content; private boolean immutable; /** * Constructor. */ public TriggeredOldNewDataSource(TransactionSystem system, DataTableDef table_def) { super(system); this.table_def = table_def; } void setImmutable(boolean im) { this.immutable = im; } void setRowData(RowData row_data) { this.content = row_data; } public DataTableDef getDataTableDef() { return table_def; } public int getRowCount() { return 1; } public TObject getCellContents(final int column, final int row) { if (row < 0 || row > 0) { throw new RuntimeException("Row index out of bounds."); } return content.getCellData(column); } public int addRow(RowData row_data) { throw new RuntimeException("Inserting into table '" + getDataTableDef().getTableName() + "' is not permitted."); } public void removeRow(int row_index) { throw new RuntimeException("Deleting from table '" + getDataTableDef().getTableName() + "' is not permitted."); } public int updateRow(int row_index, RowData row_data) { if (immutable) { throw new RuntimeException("Updating table '" + getDataTableDef().getTableName() + "' is not permitted."); } if (row_index < 0 || row_index > 0) { throw new RuntimeException("Row index out of bounds."); } int sz = getDataTableDef().columnCount(); for (int i = 0; i < sz; ++i) { content.setColumnDataFromTObject(i, row_data.getCellData(i)); } return 0; } public MasterTableJournal getJournal() { // Shouldn't be used... throw new RuntimeException("Invalid method used."); } public void flushIndexChanges() { // Shouldn't be used... throw new RuntimeException("Invalid method used."); } public void constraintIntegrityCheck() { // Should always pass (not integrity check needed for OLD/NEW table. } } /** * A list of DataTableDef system table definitions for tables internal to * the database connection. */ private final static DataTableDef[] INTERNAL_DEF_LIST; static { INTERNAL_DEF_LIST = new DataTableDef[5]; INTERNAL_DEF_LIST[0] = GTStatisticsDataSource.DEF_DATA_TABLE_DEF; INTERNAL_DEF_LIST[1] = GTConnectionInfoDataSource.DEF_DATA_TABLE_DEF; INTERNAL_DEF_LIST[2] = GTCurrentConnectionsDataSource.DEF_DATA_TABLE_DEF; INTERNAL_DEF_LIST[3] = GTSQLTypeInfoDataSource.DEF_DATA_TABLE_DEF; INTERNAL_DEF_LIST[4] = GTPrivMapDataSource.DEF_DATA_TABLE_DEF; } /** * An internal table info object that handles tables internal to a * DatabaseConnection object. */ private class ConnectionInternalTableInfo extends AbstractInternalTableInfo { /** * Constructor. */ public ConnectionInternalTableInfo() { super("SYSTEM TABLE", INTERNAL_DEF_LIST); } // ---------- Implemented ---------- public MutableTableDataSource createInternalTable(int index) { if (index == 0) { return new GTStatisticsDataSource(DatabaseConnection.this).init(); } else if (index == 1) { return new GTConnectionInfoDataSource(DatabaseConnection.this).init(); } else if (index == 2) { return new GTCurrentConnectionsDataSource( DatabaseConnection.this).init(); } else if (index == 3) { return new GTSQLTypeInfoDataSource(DatabaseConnection.this).init(); } else if (index == 4) { return new GTPrivMapDataSource(DatabaseConnection.this); } else { throw new RuntimeException(); } } } /** * Call back interface for events that occur within the connection instance. */ public static interface CallBack { /** * Notifies the callee that a trigger event was fired that this user * is listening for. */ void triggerNotify(String trigger_name, int trigger_event, String trigger_source, int fire_count); } /** * An object that stores state about the trigger table OLD and NEW when * the connection is set up to execute a stored procedure. */ static class OldNewTableState { /** * The name of the table that is the trigger source. */ TableName trigger_source; /** * The row index of the OLD data that is being updated or deleted in the * trigger source table. */ int OLD_row_index = -1; /** * The RowData of the new data that is being inserted/updated in the trigger * source table. */ RowData NEW_row_data; /** * If true then the 'new_data' information is mutable which would be true for * a BEFORE trigger. For example, we would want to change the data in the * row that caused the trigger to fire. */ boolean mutable_NEW; /** * The DataTable object that represents the OLD table, if set. */ DataTable OLD_data_table; /** * The DataTable object that represents the NEW table, if set. */ DataTable NEW_data_table; /** * Constructor. */ OldNewTableState(TableName table_source, int old_d, RowData new_d, boolean is_mutable) { this.trigger_source = table_source; this.OLD_row_index = old_d; this.NEW_row_data = new_d; this.mutable_NEW = is_mutable; } /** * Default constructor. */ OldNewTableState() { } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseConstants.java000066400000000000000000000040401330501023400261570ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseConstants 04 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Contant static values that determine several parameters of the database * operation. It is important that a database data generated from a * compilation from one set of constants is not used with the same database * with different constants. *

* @author Tobias Downer */ public interface DatabaseConstants { /** * The maximum length in characters of the string that represents the name * of the database. */ public static final int MAX_DATABASE_NAME_LENGTH = 50; /** * The maximum length in characters of the string that represents the name * of a privaledge group. */ public static final int MAX_PRIVGROUP_NAME_LENGTH = 50; /** * The maximum length in characters of the string that holds the table * name. The table name is used to reference a Table object in a Database. */ public static final int MAX_TABLE_NAME_LENGTH = 50; /** * The maximum length in characters of the string that holds the user * name. The user name is used in many security and priviledge operations. */ public static final int MAX_USER_NAME_LENGTH = 50; /** * The maximum length in character of the string that holds a users * password. The password is used when logging into the database. */ public static final int MAX_PASSWORD_LENGTH = 80; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseConstraintViolationException.java000066400000000000000000000045711330501023400321040ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseConstraintViolationException 02 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A database exception that represents a constraint violation. * * @author Tobias Downer */ public class DatabaseConstraintViolationException extends RuntimeException { // ---------- Statics ---------- /** * A Primary Key constraint violation error code. */ public static final int PRIMARY_KEY_VIOLATION = 20; /** * A Unique constraint violation error code. */ public static final int UNIQUE_VIOLATION = 21; /** * A Check constraint violation error code. */ public static final int CHECK_VIOLATION = 22; /** * A Foreign Key constraint violation error code. */ public static final int FOREIGN_KEY_VIOLATION = 23; /** * A Nullable constraint violation error code (data added to not null * columns that was null). */ public static final int NULLABLE_VIOLATION = 24; /** * Java type constraint violation error code (tried to insert a Java object * that wasn't derived from the java object type defined for the column). */ public static final int JAVA_TYPE_VIOLATION = 25; /** * Tried to drop a table that is referenced by another source. */ public static final int DROP_TABLE_VIOLATION = 26; /** * Column can't be dropped before of an reference to it. */ public static final int DROP_COLUMN_VIOLATION = 27; /** * The error code. */ private int error_code; /** * Constructor. */ public DatabaseConstraintViolationException(int err_code, String msg) { super(msg); this.error_code = err_code; } /** * Returns the violation error code. */ public int getErrorCode() { return error_code; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseDispatcher.java000066400000000000000000000112551330501023400262770ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseDispatcher 19 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.*; import com.mckoi.debug.*; /** * This is the database system dispatcher thread. This is a thread that * runs in the background servicing delayed events. This thread serves a * number of purposes. It can be used to perform optimizations/clean ups in * the background (similar to hotspot). It could be used to pause until * sufficient information has been collected or there is a lull in * work before it does a query in the background. For example, if a VIEW * is invalidated because the underlying data changes, then we can wait * until the data has finished updating, then perform the view query to * update it correctly. * * @author Tobias Downer */ class DatabaseDispatcher extends Thread { private ArrayList event_queue = new ArrayList(); private TransactionSystem system; private boolean finished; /** * NOTE: Constructing this object will start the thread. */ DatabaseDispatcher(TransactionSystem system) { this.system = system; setDaemon(true); setName("Mckoi - Database Dispatcher"); finished = false; start(); } /** * Creates an event object that is passed into 'addEventToDispatch' method * to run the given Runnable method after the time has passed. *

* The event created here can be safely posted on the event queue as many * times as you like. It's useful to create an event as a persistant object * to service some event. Just post it on the dispatcher when you want * it run! */ Object createEvent(Runnable runnable) { return new DatabaseEvent(runnable); } /** * Adds a new event to be dispatched on the queue after 'time_to_wait' * milliseconds has passed. */ synchronized void postEvent(int time_to_wait, Object event) { DatabaseEvent evt = (DatabaseEvent) event; // Remove this event from the queue, event_queue.remove(event); // Set the correct time for the event. evt.time_to_run_event = System.currentTimeMillis() + time_to_wait; // Add to the queue in correct order int index = Collections.binarySearch(event_queue, event); if (index < 0) { index = -(index + 1); } event_queue.add(index, event); notifyAll(); } /** * Ends this dispatcher thread. */ synchronized void finish() { finished = true; notifyAll(); } public void run() { while (true) { try { DatabaseEvent evt = null; synchronized (this) { while (evt == null) { // Return if finished if (finished) { return; } if (event_queue.size() > 0) { // Get the top entry, do we execute it yet? evt = (DatabaseEvent) event_queue.get(0); long diff = evt.time_to_run_event - System.currentTimeMillis(); // If we got to wait for the event then do so now... if (diff >= 0) { evt = null; wait((int) diff); } } else { // Event queue empty so wait for someone to put an event on it. wait(); } } // Remove the top event from the list, event_queue.remove(0); } // 'evt' is our event to run, evt.runnable.run(); } catch (Throwable e) { system.Debug().write(Lvl.ERROR, this, "SystemDispatchThread error"); system.Debug().writeException(e); } } } // ---------- Inner classes ---------- class DatabaseEvent implements Comparable { private long time_to_run_event; private Runnable runnable; DatabaseEvent(Runnable runnable) { this.runnable = runnable; } public int compareTo(Object ob) { DatabaseEvent evt2 = (DatabaseEvent) ob; long dif = time_to_run_event - evt2.time_to_run_event; if (dif > 0) { return 1; } else if (dif < 0) { return -1; } return 0; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseException.java000066400000000000000000000024531330501023400261470ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseException 02 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Exception thrown where various problems occur within the database. *

* @author Tobias Downer */ public class DatabaseException extends Exception { private int error_code; // ---------- Members ---------- public DatabaseException(int error_code, String message) { super(message); this.error_code = error_code; } public DatabaseException(String message) { this(-1, message); } /** * Returns the error code. -1 means no error code was given. */ public int getErrorCode() { return error_code; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseProcedure.java000066400000000000000000000042421330501023400261370ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseProcedure 10 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * This interface represents a database procedure that is executed on the * server side. It is used to perform database specific functions that can * only be performed on the server. *

* A procedure must manage its own table locking. * * @author Tobias Downer */ public interface DatabaseProcedure { /** * Executes the procudure and returns the resultant table. Note, the * args have to be serializable. There may be only 0 to 16 arguments. * The method may throw a 'DatabaseException' if the procedure failed. */ Table execute(User user, Object[] args) throws DatabaseException; /** * This returns a DataTable[] array that lists the DataTables that are read * during this procedure. */ DataTable[] getReadTables(DatabaseConnection db) throws DatabaseException; /** * Returns a DataTable[] array that lists the DataTables that are written * to during this procedure. */ DataTable[] getWriteTables(DatabaseConnection db) throws DatabaseException; /** * Returns the locking mode in which the database operates. This is either * LockingMechanism.SHARED_MODE or LockingMechanism.EXCLUSIVE_MODE. In most * cases this will be SHARED_MODE. */ int getLockingMode(); /** * Sets the LockHandle object for this procedure. This should be called * after the tables that this procedure uses have been locked. */ void setLockHandle(LockHandle lock_handle); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseQueryContext.java000066400000000000000000000064041330501023400266630ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseQueryContext 25 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An implementation of a QueryContext based on a DatabaseConnection object. * * @author Tobias Downer */ public class DatabaseQueryContext extends AbstractQueryContext { /** * The DatabaseConnection. */ private DatabaseConnection database; /** * Constructs the QueryContext. */ public DatabaseQueryContext(DatabaseConnection database) { this.database = database; } /** * Returns the Database object that this context is a child of. */ public Database getDatabase() { return database.getDatabase(); } /** * Returns a TransactionSystem object that is used to determine information * about the transactional system. */ public TransactionSystem getSystem() { return getDatabase().getSystem(); } /** * Returns the system FunctionLookup object. */ public FunctionLookup getFunctionLookup() { return getSystem().getFunctionLookup(); } /** * Returns the GrantManager object that is used to determine grant information * for the database. */ public GrantManager getGrantManager() { return database.getGrantManager(); } /** * Returns a DataTable from the database with the given table name. */ public DataTable getTable(TableName name) { database.addSelectedFromTable(name); return database.getTable(name); } /** * Returns a DataTableDef for the given table name. */ public DataTableDef getDataTableDef(TableName name) { return database.getDataTableDef(name); } /** * Creates a QueryPlanNode for the view with the given name. */ public QueryPlanNode createViewQueryPlanNode(TableName name) { return database.createViewQueryPlanNode(name); } /** * Increments the sequence generator and returns the next unique key. */ public long nextSequenceValue(String name) { return database.nextSequenceValue(name); } /** * Returns the current sequence value returned for the given sequence * generator within the connection defined by this context. If a value was * not returned for this connection then a statement exception is generated. */ public long currentSequenceValue(String name) { return database.lastSequenceValue(name); } /** * Sets the current sequence value for the given sequence generator. */ public void setSequenceValue(String name, long value) { database.setSequenceValue(name, value); } /** * Returns the user name of the connection. */ public String getUserName() { return database.getUser().getUserName(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DatabaseSystem.java000066400000000000000000000227521330501023400255010ustar00rootroot00000000000000/** * com.mckoi.database.DatabaseSystem 12 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.Stats; import com.mckoi.debug.*; import com.mckoi.database.control.DBConfig; //import java.io.File; import java.util.List; import java.util.ArrayList; //import java.util.ResourceBundle; //import java.util.MissingResourceException; import java.util.Properties; /** * This class provides information about shared resources available for the * entire database system running in this VM. Shared information includes * configuration details, DataCellCache, plug-ins, user management, etc. * * @author Tobias Downer */ public final class DatabaseSystem extends TransactionSystem { /** * The StatementCache that maintains a cache of parsed queries. */ private StatementCache statement_cache = null; /** * True if all queries on the database should be logged in the 'commands.log' * file in the log directory. */ private boolean query_logging; /** * The WorkerPool object that manages access to the database(s) in the * system. */ private WorkerPool worker_pool; /** * The list of Database objects that this system is being managed by this * VM. */ private ArrayList database_list; /** * Set to true when the database is shut down. */ private boolean shutdown = false; /** * The UserManager object that handles users connected to the database * engine. */ private UserManager user_manager; /** * The thread to run to shut down the database system. */ private ShutdownThread shutdown_thread; /** * Constructor. */ public DatabaseSystem() { super(); } /** * Inits the DatabaseSystem with the configuration properties of the system. * This can only be called once, and should be called at database boot time. */ public void init(DBConfig config) { super.init(config); database_list = new ArrayList(); // Create the user manager. user_manager = new UserManager(); if (config != null) { boolean status; // Set up the statement cache. status = getConfigBoolean("statement_cache", true); if (status) { statement_cache = new StatementCache(this, 127, 140, 20); } Debug().write(Lvl.MESSAGE, DatabaseSystem.class, "statement_cache = " + status); // The maximum number of worker threads. int max_worker_threads = getConfigInt("maximum_worker_threads", 4); if (max_worker_threads <= 0) { max_worker_threads = 1; } Debug().write(Lvl.MESSAGE, DatabaseSystem.class, "Max worker threads set to: " + max_worker_threads); worker_pool = new WorkerPool(this, max_worker_threads); // Should we be logging commands? query_logging = getConfigBoolean("query_logging", false); } else { throw new Error("Config bundle already set."); } shutdown = false; } // ---------- Queries ---------- /** * If query logging is enabled (all queries are output to 'commands.log' in * the log directory), this returns true. Otherwise it returns false. */ public boolean logQueries() { return query_logging; } // ---------- Clean up ---------- /** * Disposes all the resources associated with this DatabaseSystem and * invalidates this object. */ public void dispose() { super.dispose(); worker_pool = null; database_list = null; user_manager = null; } // ---------- Cache Methods ---------- /** * Returns the StatementCache that is used to cache StatementTree objects * that are being queried by the database. This is used to reduce the * SQL command parsing overhead. *

* If this method returns 'null' then statement caching is disabled. */ public StatementCache getStatementCache() { return statement_cache; } // ---------- System preparers ---------- /** * Given a Transaction.CheckExpression, this will prepare the expression and * return a new prepared CheckExpression. *

* A DatabaseSystem resolves the variables (ignoring case if necessary) and * the functions of the expression. */ public Transaction.CheckExpression prepareTransactionCheckConstraint( DataTableDef table_def, Transaction.CheckExpression check) { return super.prepareTransactionCheckConstraint(table_def, check); } // ---------- User management ---------- /** * Returns the UserManager object that handles users that are connected * to the database. The aim of this class is to unify the way users are * handled by the engine. It allows us to perform queries to see who's * connected, and any inter-user communication (triggers). */ UserManager getUserManager() { return user_manager; } // ---------- Worker Pool Methods ---------- /** * Waits until all executing commands have stopped. This is best called * right after a call to 'setIsExecutingCommands(false)'. If these two * commands are run, the database is in a known state where no commands * can be executed. *

* NOTE: This can't be called from the WorkerThread. Deadlock will * result if we were allowed to do this. */ void waitUntilAllWorkersQuiet() { worker_pool.waitUntilAllWorkersQuiet(); } /** * Controls whether the database system is allowed to execute commands or * not. If this is set to true, then calls to 'execute' will be executed * as soon as there is a free worker thread available. Otherwise no * commands are executed until this is enabled. */ void setIsExecutingCommands(boolean status) { worker_pool.setIsExecutingCommands(status); } /** * Executes database functions from the 'run' method of the given runnable * instance on the first available worker thread. All database functions * must go through a worker thread. If we ensure this, we can easily stop * all database functions from executing if need be. Also, we only need to * have a certain number of threads active at any one time rather than a * unique thread for each connection. */ void execute(User user, DatabaseConnection database, Runnable runner) { worker_pool.execute(user, database, runner); } // ---------- Shut down methods ---------- private final ArrayList shut_down_delegates = new ArrayList(); /** * Registers the delegate that is executed when the shutdown thread * is activated. Only one delegate may be registered with the database * system. This is only called once and shuts down the relevant * database services. */ void registerShutDownDelegate(Runnable delegate) { shut_down_delegates.add(delegate); } /** * The shut down thread. Started when 'shutDown' is called. */ private class ShutdownThread extends Thread { private boolean finished = false; synchronized void waitTillFinished() { while (finished == false) { try { wait(); } catch (InterruptedException e) {} } } public void run() { synchronized (this) { if (finished) { return; } } // We need this pause so that the command that executed this shutdown // has time to exit and retrieve the single row result. try { Thread.sleep(1500); } catch (InterruptedException e) {} // Stops commands from being executed by the system... setIsExecutingCommands(false); // Wait until the worker threads are all quiet... waitUntilAllWorkersQuiet(); // Close the worker pool worker_pool.shutdown(); int sz = shut_down_delegates.size(); if (sz == 0) { Debug().write(Lvl.WARNING, this, "No shut down delegates registered!"); } else { for (int i = 0; i < sz; ++i) { Runnable shut_down_delegate = (Runnable) shut_down_delegates.get(i); // Run the shut down delegates shut_down_delegate.run(); } shut_down_delegates.clear(); } synchronized (this) { // Wipe all variables from this object dispose(); finished = true; notifyAll(); } } }; /** * This starts the ShutDown thread that is used to shut down the database * server. Since the actual shutdown method is dependent on the type of * database we are running (server or stand-alone) we delegate the * shutdown method to the registered shutdown delegate. */ void startShutDownThread() { if (!shutdown) { shutdown = true; shutdown_thread = new ShutdownThread(); shutdown_thread.start(); } } /** * Returns true if 'shutDown' method has been called. */ boolean hasShutDown() { return shutdown; } /** * Wait until the shutdown thread has completed. (Shutdown process * has finished). */ void waitUntilShutdown() { shutdown_thread.waitTillFinished(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DefaultDataTable.java000066400000000000000000000264611330501023400257170ustar00rootroot00000000000000/** * com.mckoi.database.DefaultDataTable 11 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import java.io.IOException; import java.io.OutputStream; import java.io.InputStream; /** * This represents a default implementation of a DataTable. It encapsulates * information that is core to all DataTable objects. That is, *

* The table name, * The description of the table fields, * A set of SelectableScheme objects to describe row relations, * A counter for the number of rows in the table. *

* There are two classes that extend this object. DataTable which is a * DataTable that is a direct mapping to an internal table stored in the * Database files. And TemporaryTable that contains information generated * on the fly by the DBMS. *

* @author Tobias Downer */ public abstract class DefaultDataTable extends AbstractDataTable { /** * The Database object that this table is a child of. */ private Database database; /** * The number of rows in the table. */ protected int row_count; /** * A list of schemes for managing the data relations of each column. */ private SelectableScheme[] column_scheme; /** * The Constructor. */ DefaultDataTable(Database database) { super(); this.database = database; this.row_count = 0; } /** * Returns the Database object this table is part of. */ public Database getDatabase() { return database; } /** * Returns the SelectableScheme for the given column. This is different from * 'getColumnScheme(int column)' because this is designed to be overridden * so derived classes can manage their own SelectableScheme sources. */ protected SelectableScheme getRootColumnScheme(int column) { return column_scheme[column]; } /** * Clears the SelectableScheme information for the given column. */ protected void clearColumnScheme(int column) { column_scheme[column] = null; } /** * Blanks all the column schemes in the table to an initial state. This * will make all schemes of type InsertSearch. *

* NOTE: * The current default SelectableScheme type is InsertSearch. We may want * to make this variable. */ protected void blankSelectableSchemes() { blankSelectableSchemes(0); } /** * Blanks all the column schemes in this table to a specific type of * scheme. If Type = 0 then InsertSearch (fast but takes up memory - * requires each insert and delete from the table to be logged). If type * = 1 then BlindSearch (slower but uses no memory and doesn't require * insert and delete to be logged). */ protected void blankSelectableSchemes(int type) { column_scheme = new SelectableScheme[getColumnCount()]; for (int i = 0; i < column_scheme.length; ++i) { if (type == 0) { column_scheme[i] = new InsertSearch(this, i); } else if (type == 1) { column_scheme[i] = new BlindSearch(this, i); } } } /** * Returns the number of columns in the table. */ public int getColumnCount() { return getDataTableDef().columnCount(); } /** * Returns the number of rows stored in the table. */ public int getRowCount() { return row_count; } // /** // * Returns a list of all the fields within the table. The list is ordered // * the same way the fields were added in to the table. NOTE: if you use the // * TableField.getName() method, it will not be fully resolved. There will // * be no information about the table the field came from in the object. // */ // public TableField[] getFields() { // System.out.println("NOTE: Calls to DefaultDataTable.getFields() need to be deprecated."); // return getDataTableDef().toTableFieldArray(); // } // /** // * Returns the field at the given column. Note the the name of the field // * will not be fully resolved. It contains to information about the table // * the field came from. // */ // public TableField getFieldAt(int column) { // System.out.println("NOTE: Calls to DefaultDataTable.getFieldAt() need to be deprecated."); // return getDataTableDef().columnAt(column).tableFieldValue(); // } // /** // * Returns the fully resolved name of the given column in this table. // * // * @deprecated // */ // public String getResolvedColumnName(int column) { // StringBuffer out = new StringBuffer(); // out.append(getTableName().getName()); // out.append('.'); // out.append(getFieldAt(column).getName()); // return new String(out); // } /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public Variable getResolvedVariable(int column) { String col_name = getDataTableDef().columnAt(column).getName(); return new Variable(getTableName(), col_name); } // /** // * Given a field name, ie. 'CUSTOMER.CUSTOMERID' this will return the // * column number the field is at. Note that this method requires that the // * type of the column (ie. the Table) be appended to the start. Returns // * -1 if the field could not be found in the table. // * // * @deprecated // */ // public int findFieldName(String name) { // int point_index = name.indexOf('.'); // if (point_index == -1) { // throw new Error("Can't find '.' deliminator in name: " + name); // } // String type = name.substring(0, point_index); // String col_name = name.substring(point_index + 1); // // if (type.equals(getName())) { // int size = getColumnCount(); // for (int i = 0; i < size; ++i) { // TableField field = getFieldAt(i); // if (field.getName().equals(col_name)) { // return i; // } // } // } // // return -1; // } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public int findFieldName(Variable v) { // Check this is the correct table first... TableName table_name = v.getTableName(); DataTableDef table_def = getDataTableDef(); if (table_name != null && table_name.equals(getTableName())) { // Look for the column name String col_name = v.getName(); int size = getColumnCount(); for (int i = 0; i < size; ++i) { DataTableColumnDef col = table_def.columnAt(i); if (col.getName().equals(col_name)) { return i; } } } return -1; } /** * Returns a SelectableScheme object for the given column of the * VirtualTable. The Table parameter specifies the domain in which the * scheme should be given. If table != this, we can safely assume it is a * VirtualTable. */ SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { SelectableScheme scheme = getRootColumnScheme(column); // System.out.println("DefaultDataTable.getSelectableSchemaFor(" + // column + ", " + original_column + ", " + table); // System.out.println(this); // If we are getting a scheme for this table, simple return the information // from the column_trees Vector. if (table == this) { return scheme; } // Otherwise, get the scheme to calculate a subset of the given scheme. else { return scheme.getSubsetScheme(table, original_column); } } /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. */ void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { if (ancestor != this) { throw new RuntimeException("Method routed to incorrect table ancestor."); } } /** * Return the list of DataTable and row sets that make up the raw information * in this table. For a DataTable itselt, this is trivial. * NOTE: Using this method is extremely inefficient, and should never be * used. It is included only to complete feature set. * IDEA: Put a warning to check if this method is ever used. */ RawTableInformation resolveToRawTable(RawTableInformation info) { System.err.println("Efficiency Warning in DataTable.resolveToRawTable."); IntegerVector row_set = new IntegerVector(); RowEnumeration e = rowEnumeration(); while (e.hasMoreRows()) { row_set.addInt(e.nextRowIndex()); } info.add(this, row_set); return info; } // /** // * Returns a bit vector indicating the columns that are valid. // */ // boolean[] validColumns() { // int len = getColumnCount(); // boolean[] bit_vector = new boolean[len]; // for (int i = 0; i < len; ++i) { // bit_vector[i] = true; // } // return bit_vector; // } /* ===== Convenience methods for updating internal information ===== =============== regarding the SelectableSchemes ================= */ /** * Adds a single column of a row to the selectable scheme indexing. */ void addCellToColumnSchemes(int row_number, int column_number) { boolean indexable_type = getDataTableDef().columnAt(column_number).isIndexableType(); if (indexable_type) { SelectableScheme ss = getRootColumnScheme(column_number); ss.insert(row_number); } } /** * This is called when a row is in the table, and the SelectableScheme * objects for each column need to be notified of the rows existance, * therefore build up the relational model for the columns. */ void addRowToColumnSchemes(int row_number) { int col_count = getColumnCount(); DataTableDef table_def = getDataTableDef(); for (int i = 0; i < col_count; ++i) { if (table_def.columnAt(i).isIndexableType()) { SelectableScheme ss = getRootColumnScheme(i); ss.insert(row_number); } } } /** * This is called when an index to a row needs to be removed from the * SelectableScheme objects. This occurs when we have a modification log * of row removals that haven't actually happened to old backed up scheme. */ void removeRowToColumnSchemes(int row_number) { int col_count = getColumnCount(); DataTableDef table_def = getDataTableDef(); for (int i = 0; i < col_count; ++i) { if (table_def.columnAt(i).isIndexableType()) { SelectableScheme ss = getRootColumnScheme(i); ss.remove(row_number); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/DumpHelper.java000066400000000000000000000053261330501023400246330ustar00rootroot00000000000000/** * com.mckoi.database.DumpHelper 18 Aug 1999 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; import java.io.PrintStream; /** * A helper class for the 'Table.dumpTo' method. This provides variables * static methods for formating the contents of a table and outputting it to * an output stream. * * @author Tobias Downer */ class DumpHelper { /** * Dumps the contents of a table to the given output stream. It uses a * very simple method to format the text. */ static void dump(Table table, PrintStream out) { int col_count = table.getColumnCount(); // if (table instanceof DataTable) { // DataTable data_tab = (DataTable) table; // out.println("Total Hits: " + data_tab.getTotalHits()); // out.println("File Hits: " + data_tab.getFileHits()); // out.println("Cache Hits: " + data_tab.getCacheHits()); // out.println(); // } out.println("Table row count: " + table.getRowCount()); out.print(" "); // 6 spaces // First output the column header. for (int i = 0; i < col_count; ++i) { out.print(table.getResolvedVariable(i).toString()); if (i < col_count - 1) { out.print(", "); } } out.println(); // Print out the contents of each row int row_num = 0; RowEnumeration r_enum = table.rowEnumeration(); while (r_enum.hasMoreRows() && row_num < 250) { // Print the row number String num = Integer.toString(row_num); int space_gap = 4 - num.length(); for (int i = 0; i < space_gap; ++i) { out.print(' '); } out.print(num); out.print(": "); // Print each cell in the row int row_index = r_enum.nextRowIndex(); for (int col_index = 0; col_index < col_count; ++col_index) { TObject cell = table.getCellContents(col_index, row_index); out.print(cell.toString()); if (col_index < col_count - 1) { out.print(", "); } } out.println(); ++row_num; } out.println("Finished: " + row_num + "/" + table.getRowCount()); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Expression.java000066400000000000000000000700351330501023400247240ustar00rootroot00000000000000/** * com.mckoi.database.Expression 11 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.Date; import java.util.List; import java.util.ArrayList; import java.io.StringReader; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import com.mckoi.database.sql.SQL; import com.mckoi.util.BigNumber; /** * An expression that can be evaluated in a statement. This is used as a more * complete and flexible version of 'Condition' as well as representing column * and aggregate functions. *

* This class can represent constant expressions (expressions that have no * variable input), as well as variable expressions. Optimizations may be * possible when evaluating constant expressions. *

* Some examples of constant expressions;

 *   ( 9 + 3 ) * 90
 *   ( ? * 9 ) / 1
 *   lower("CaPS MUMma")
 *   40 & 0x0FF != 39
 * 
* Some examples of variable expressions;

 *   upper(Part.description)
 *   Part.id >= 50
 *   VendorMakesPart.part_id == Part.id
 *   Part.value_of <= Part.cost_of / 4
 * 
*

* NOTE: the expression is stored in postfix orientation. eg. * "8 + 9 * 3" becomes "8,9,3,*,+" *

* NOTE: This class is NOT thread safe. Do not use an * expression instance between threads. * * @author Tobias Downer */ public final class Expression implements java.io.Serializable, Cloneable { /** * Serialization UID. */ static final long serialVersionUID = 6981261114471924028L; /** * The list of elements followed by operators in our expression. The * expression elements may be of any type represented by the database * (see 'addElement' method for the accepted objects). The expression * operators may be '+', '-', '*', '*', '/', '=', '>=', '<>', etc (as an * Operator object (see the Operator class for details)). *

* This list is stored in postfix order. */ private ArrayList elements = new ArrayList(); /** * The evaluation stack for when the expression is evaluated. */ private transient ArrayList eval_stack; /** * The expression as a plain human readable string. This is in a form that * can be readily parsed to an Expression object. */ private StringBuffer text; /** * Constructs a new Expression. */ public Expression() { text = new StringBuffer(); } /** * Constructs a new Expression with a single object element. */ public Expression(Object ob) { this(); addElement(ob); } /** * Constructs a copy of the given Expression. */ public Expression(Expression exp) { concat(exp); text = new StringBuffer(new String(exp.text)); } /** * Constructs a new Expression from the concatination of expression1 and * expression2 and the operator for them. */ public Expression(Expression exp1, Operator op, Expression exp2) { // Remember, this is in postfix notation. elements.addAll(exp1.elements); elements.addAll(exp2.elements); elements.add(op); } /** * Returns the StringBuffer that we can use to append plain text * representation as we are parsing the expression. */ public StringBuffer text() { return text; } /** * Copies the text from the given expression. */ public void copyTextFrom(Expression e) { this.text = new StringBuffer(new String(e.text())); } /** * Static method that parses the given string which contains an expression * into an Expression object. For example, this will parse strings such * as '(a + 9) * 2 = b' or 'upper(concat('12', '56', id))'. *

* Care should be taken to not use this method inside an inner loop because * it creates a lot of objects. */ public static Expression parse(String expression) { synchronized (expression_parser) { try { expression_parser.ReInit(new StringReader(expression)); expression_parser.reset(); Expression exp = expression_parser.parseExpression(); exp.text().setLength(0); exp.text().append(expression); return exp; } catch (com.mckoi.database.sql.ParseException e) { throw new RuntimeException(e.getMessage()); } } } /** * A static expression parser. To use this we must first synchronize over * the object. */ private final static SQL expression_parser = new SQL(new StringReader("")); /** * Generates a simple expression from two objects and an operator. */ public static Expression simple(Object ob1, Operator op, Object ob2) { Expression exp = new Expression(ob1); exp.addElement(ob2); exp.addElement(op); return exp; } /** * Adds a new element into the expression. String, BigNumber, Boolean and * Variable are the types of elements allowed. *

* Must be added in postfix order. */ public void addElement(Object ob) { if (ob == null) { elements.add(TObject.nullVal()); } else if (ob instanceof TObject || ob instanceof ParameterSubstitution || ob instanceof CorrelatedVariable || ob instanceof Variable || ob instanceof FunctionDef || ob instanceof Operator || ob instanceof StatementTreeObject ) { elements.add(ob); } else { throw new Error("Unknown element type added to expression: " + ob.getClass()); } } /** * Merges an expression with this expression. For example, given the * expression 'ab', if the expression 'abc+-' was added the expression would * become 'ababc+-'. *

* This method is useful when copying parts of other expressions when forming * an expression. *

* This always returns this expression. This does not change 'text()'. */ public Expression concat(Expression expr) { elements.addAll(expr.elements); return this; } /** * Adds a new operator into the expression. Operators are represented as * an Operator (eg. ">", "+", "<<", "!=" ) *

* Must be added in postfix order. */ public void addOperator(Operator op) { elements.add(op); } /** * Returns the number of elements and operators that are in this postfix * list. */ public int size() { return elements.size(); } /** * Returns the element at the given position in the postfix list. Note, this * can return Operator's. */ public Object elementAt(int n) { return elements.get(n); } /** * Returns the element at the end of the postfix list (the last element). */ public Object last() { return elements.get(size() - 1); } /** * Sets the element at the given position in the postfix list. This should * be called after the expression has been setup to alter variable alias * names, etc. */ public void setElementAt(int n, Object ob) { elements.set(n, ob); } /** * Pushes an element onto the evaluation stack. */ private final void push(Object ob) { eval_stack.add(ob); } /** * Pops an element from the evaluation stack. */ private final Object pop() { return eval_stack.remove(eval_stack.size() - 1); } /** * Returns a complete List of Variable objects in this expression not * including correlated variables. */ public List allVariables() { ArrayList vars = new ArrayList(); for (int i = 0; i < elements.size(); ++i) { Object ob = elements.get(i); if (ob instanceof Variable) { vars.add(ob); } else if (ob instanceof FunctionDef) { Expression[] params = ((FunctionDef)ob).getParameters(); for (int n = 0; n < params.length; ++n) { vars.addAll(params[n].allVariables()); } } else if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TArrayType) { Expression[] exp_list = (Expression[]) tob.getObject(); for (int n = 0; n < exp_list.length; ++n) { vars.addAll(exp_list[n].allVariables()); } } } } return vars; } /** * Returns a complete list of all element objects that are in this expression * and in the parameters of the functions of this expression. */ public List allElements() { ArrayList elems = new ArrayList(); for (int i = 0; i < elements.size(); ++i) { Object ob = elements.get(i); if (ob instanceof Operator) { } else if (ob instanceof FunctionDef) { Expression[] params = ((FunctionDef) ob).getParameters(); for (int n = 0; n < params.length; ++n) { elems.addAll(params[n].allElements()); } } else if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TArrayType) { Expression[] exp_list = (Expression[]) tob.getObject(); for (int n = 0; n < exp_list.length; ++n) { elems.addAll(exp_list[n].allElements()); } } else { elems.add(ob); } } else { elems.add(ob); } } return elems; } /** * A general prepare that cascades through the expression and its parents and * substitutes an elements that the preparer wants to substitute. *

* NOTE: This will not cascade through to the parameters of Function objects * however it will cascade through FunctionDef parameters. For this * reason you MUST call 'prepareFunctions' after this method. */ public void prepare(ExpressionPreparer preparer) throws DatabaseException { for (int n = 0; n < elements.size(); ++n) { Object ob = elements.get(n); // If the preparer will prepare this type of object then set the // entry with the prepared object. if (preparer.canPrepare(ob)) { elements.set(n, preparer.prepare(ob)); } Expression[] exp_list = null; if (ob instanceof FunctionDef) { FunctionDef func = (FunctionDef) ob; exp_list = func.getParameters(); } else if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TArrayType) { exp_list = (Expression[]) tob.getObject(); } } else if (ob instanceof StatementTreeObject) { StatementTreeObject stree = (StatementTreeObject) ob; stree.prepareExpressions(preparer); } if (exp_list != null) { for (int p = 0; p < exp_list.length; ++p) { exp_list[p].prepare(preparer); } } } } /** * Returns true if the expression doesn't include any variables or non * constant functions (is constant). Note that a correlated variable is * considered a constant. */ public boolean isConstant() { for (int n = 0; n < elements.size(); ++n) { Object ob = elements.get(n); if (ob instanceof TObject) { TObject tob = (TObject) ob; TType ttype = tob.getTType(); // If this is a query plan, return false if (ttype instanceof TQueryPlanType) { return false; } // If this is an array, check the array for constants else if (ttype instanceof TArrayType) { Expression[] exp_list = (Expression[]) tob.getObject(); for (int p = 0; p < exp_list.length; ++p) { if (!exp_list[p].isConstant()) { return false; } } } } else if (ob instanceof Variable) { return false; } else if (ob instanceof FunctionDef) { Expression[] params = ((FunctionDef) ob).getParameters(); for (int p = 0; p < params.length; ++p) { if (!params[p].isConstant()) { return false; } } } } return true; } /** * Returns true if the expression has a subquery (eg 'in ( select ... )') * somewhere in it (this cascades through function parameters also). */ public boolean hasSubQuery() { List list = allElements(); int len = list.size(); for (int n = 0; n < len; ++n) { Object ob = list.get(n); if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TQueryPlanType) { return true; } } } return false; } /** * Returns true if the expression contains a NOT operator somewhere in it. */ public boolean containsNotOperator() { for (int n = 0; n < elements.size(); ++n) { Object ob = elements.get(n); if (ob instanceof Operator) { if (((Operator) ob).isNot()) { return true; } } } return false; } /** * Discovers all the correlated variables in this expression. If this * expression contains a sub-query plan, we ask the plan to find the list of * correlated variables. The discovery process increments the 'level' * variable for each sub-plan. */ public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { List elems = allElements(); int sz = elems.size(); // For each element for (int i = 0; i < sz; ++i) { Object ob = elems.get(i); if (ob instanceof CorrelatedVariable) { CorrelatedVariable v = (CorrelatedVariable) ob; if (v.getQueryLevelOffset() == level) { list.add(v); } } else if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TQueryPlanType) { QueryPlanNode node = (QueryPlanNode) tob.getObject(); list = node.discoverCorrelatedVariables(level + 1, list); } } } return list; } /** * Discovers all the tables in the sub-queries of this expression. This is * used for determining all the tables that a query plan touches. */ public ArrayList discoverTableNames(ArrayList list) { List elems = allElements(); int sz = elems.size(); // For each element for (int i = 0; i < sz; ++i) { Object ob = elems.get(i); if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TQueryPlanType) { QueryPlanNode node = (QueryPlanNode) tob.getObject(); list = node.discoverTableNames(list); } } } return list; } /** * Returns the QueryPlanNode object in this expression if it evaluates to a * single QueryPlanNode, otherwise returns null. */ public QueryPlanNode getQueryPlanNode() { Object ob = elementAt(0); if (size() == 1 && ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TQueryPlanType) { return (QueryPlanNode) tob.getObject(); } } return null; } /** * Returns the Variable if this expression evaluates to a single variable, * otherwise returns null. A correlated variable will not be returned. */ public Variable getVariable() { Object ob = elementAt(0); if (size() == 1 && ob instanceof Variable) { return (Variable) ob; } return null; } /** * Returns an array of two Expression objects that represent the left hand * and right and side of the last operator in the post fix notation. * For example, (a + b) - (c + d) will return { (a + b), (c + d) }. Or * more a more useful example is;

   *   id + 3 > part_id - 2 will return ( id + 3, part_id - 2 }
   * 
*/ public Expression[] split() { if (size() <= 1) { throw new Error("Can only split expressions with more than 1 element."); } int midpoint = -1; int stack_size = 0; for (int n = 0; n < size() - 1; ++n) { Object ob = elementAt(n); if (ob instanceof Operator) { --stack_size; } else { ++stack_size; } if (stack_size == 1) { midpoint = n; } } if (midpoint == -1) { throw new Error("postfix format error: Midpoint not found."); } Expression lhs = new Expression(); for (int n = 0; n <= midpoint; ++n) { lhs.addElement(elementAt(n)); } Expression rhs = new Expression(); for (int n = midpoint + 1; n < size() - 1; ++n) { rhs.addElement(elementAt(n)); } return new Expression[] { lhs, rhs }; } /** * Returns the end Expression of this expression. For example, an expression * of 'ab' has an end expression of 'b'. The expression 'abc+=' has an end * expression of 'abc+='. *

* This is a useful method to call in the middle of an Expression object * being formed. It allows for the last complete expression to be * returned. *

* If this is called when an expression is completely formed it will always * return the complete expression. */ public Expression getEndExpression() { int stack_size = 1; int end = size() - 1; for (int n = end; n > 0; --n) { Object ob = elementAt(n); if (ob instanceof Operator) { ++stack_size; } else { --stack_size; } if (stack_size == 0) { // Now, n .. end represents the new expression. Expression new_exp = new Expression(); for (int i = n; i <= end; ++i) { new_exp.addElement(elementAt(i)); } return new_exp; } } return new Expression(this); } /** * Breaks this expression into a list of sub-expressions that are split * by the given operator. For example, given the expression; *

   *   (a = b AND b = c AND (a = 2 OR c = 1))
   * 

* Calling this method with logical_op = "and" will return a list of the * three expressions. *

* This is a common function used to split up an expressions into logical * components for processing. */ public ArrayList breakByOperator(ArrayList list, final String logical_op) { // The last operator must be 'and' Object ob = last(); if (ob instanceof Operator) { Operator op = (Operator) ob; if (op.is(logical_op)) { // Last operator is 'and' so split and recurse. Expression[] exps = split(); list = exps[0].breakByOperator(list, logical_op); list = exps[1].breakByOperator(list, logical_op); return list; } } // If no last expression that matches then add this expression to the // list. list.add(this); return list; } /** * Evaluates this expression and returns an Object that represents the * result of the evaluation. The passed VariableResolver object is used * to resolve the variable name to a value. The GroupResolver object is * used if there are any aggregate functions in the evaluation - this can be * null if evaluating an expression without grouping aggregates. The query * context object contains contextual information about the environment of * the query. *

* NOTE: This method is gonna be called a lot, so we need it to be optimal. *

* NOTE: This method is not thread safe! The reason it's not safe * is because of the evaluation stack. */ public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // Optimization - trivial case of 'a' or 'ab*' postfix are tested for // here. int element_count = elements.size(); if (element_count == 1) { return (TObject) elementToObject(0, group, resolver, context); } else if (element_count == 3) { TObject o1 = (TObject) elementToObject(0, group, resolver, context); TObject o2 = (TObject) elementToObject(1, group, resolver, context); Operator op = (Operator) elements.get(2); return op.eval(o1, o2, group, resolver, context); } if (eval_stack == null) { eval_stack = new ArrayList(); } for (int n = 0; n < element_count; ++n) { Object val = elementToObject(n, group, resolver, context); if (val instanceof Operator) { // Pop the last two values off the stack, evaluate them and push // the new value back on. Operator op = (Operator) val; TObject v2 = (TObject) pop(); TObject v1 = (TObject) pop(); push(op.eval(v1, v2, group, resolver, context)); } else { push(val); } } // We should end with a single value on the stack. return (TObject) pop(); } /** * Evaluation without a grouping table. */ public TObject evaluate(VariableResolver resolver, QueryContext context) { return evaluate(null, resolver, context); } /** * Returns the element at the given position in the expression list. If * the element is a variable then it is resolved on the VariableResolver. * If the element is a function then it is evaluated and the result is * returned. */ private Object elementToObject(int n, GroupResolver group, VariableResolver resolver, QueryContext context) { Object ob = elements.get(n); if (ob instanceof TObject || ob instanceof Operator) { return ob; } else if (ob instanceof Variable) { return resolver.resolve((Variable) ob); } else if (ob instanceof CorrelatedVariable) { return ((CorrelatedVariable) ob).getEvalResult(); } else if (ob instanceof FunctionDef) { Function fun = ((FunctionDef) ob).getFunction(context); return fun.evaluate(group, resolver, context); } else { if (ob == null) { throw new NullPointerException("Null element in expression"); } throw new Error("Unknown element type: " + ob.getClass()); } } /** * Cascades through the expression and if any aggregate functions are found * returns true, otherwise returns false. */ public boolean hasAggregateFunction(QueryContext context) { for (int n = 0; n < elements.size(); ++n) { Object ob = elements.get(n); if (ob instanceof FunctionDef) { if (((FunctionDef) ob).isAggregate(context)) { return true; } } else if (ob instanceof TObject) { TObject tob = (TObject) ob; if (tob.getTType() instanceof TArrayType) { Expression[] list = (Expression[]) tob.getObject(); for (int i = 0; i < list.length; ++i) { if (list[i].hasAggregateFunction(context)) { return true; } } } } } return false; } /** * Determines the type of object this expression evaluates to. We determine * this by looking at the last element of the expression. If the last * element is a TType object, it returns the type. If the last element is a * Function, Operator or Variable then it returns the type that these * objects have set as their result type. */ public TType returnTType(VariableResolver resolver, QueryContext context) { Object ob = elements.get(elements.size() - 1); if (ob instanceof FunctionDef) { Function fun = ((FunctionDef) ob).getFunction(context); return fun.returnTType(resolver, context); } else if (ob instanceof TObject) { return ((TObject)ob).getTType(); } else if (ob instanceof Operator) { Operator op = (Operator) ob; return op.returnTType(); } else if (ob instanceof Variable) { Variable variable = (Variable) ob; return resolver.returnTType(variable); } else if (ob instanceof CorrelatedVariable) { CorrelatedVariable variable = (CorrelatedVariable) ob; return variable.returnTType(); } else { throw new Error("Unable to determine type for expression."); } } /** * Performs a deep clone of this object, calling 'clone' on any elements * that are mutable or shallow copying immutable members. */ public Object clone() throws CloneNotSupportedException { // Shallow clone Expression v = (Expression) super.clone(); v.eval_stack = null; // v.text = new StringBuffer(new String(text)); int size = elements.size(); ArrayList cloned_elements = new ArrayList(size); v.elements = cloned_elements; // Clone items in the elements list for (int i = 0; i < size; ++i) { Object element = elements.get(i); if (element instanceof TObject) { // TObject is immutable except for TArrayType and TQueryPlanType TObject tob = (TObject) element; TType ttype = tob.getTType(); // For a query plan if (ttype instanceof TQueryPlanType) { QueryPlanNode node = (QueryPlanNode) tob.getObject(); node = (QueryPlanNode) node.clone(); element = new TObject(ttype, node); } // For an array else if (ttype instanceof TArrayType) { Expression[] arr = (Expression[]) tob.getObject(); arr = (Expression[]) arr.clone(); for (int n = 0; n < arr.length; ++n) { arr[n] = (Expression) arr[n].clone(); } element = new TObject(ttype, arr); } } else if (element instanceof Operator || element instanceof ParameterSubstitution) { // immutable so we do not need to clone these } else if (element instanceof CorrelatedVariable) { element = ((CorrelatedVariable) element).clone(); } else if (element instanceof Variable) { element = ((Variable) element).clone(); } else if (element instanceof FunctionDef) { element = ((FunctionDef) element).clone(); } else if (element instanceof StatementTreeObject) { element = ((StatementTreeObject) element).clone(); } else { throw new CloneNotSupportedException(element.getClass().toString()); } cloned_elements.add(element); } return v; } /** * Returns a string representation of this object for diagnostic * purposes. */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[ Expression "); if (text() != null) { buf.append("["); buf.append(text().toString()); buf.append("]"); } buf.append(": "); for (int n = 0; n < elements.size(); ++n) { buf.append(elements.get(n)); if (n < elements.size() - 1) { buf.append(","); } } buf.append(" ]"); return new String(buf); } // ---------- Serialization methods ---------- /** * Writes the state of this object to the object stream. This method is * implemented because GCJ doesn't like it if you implement readObject * without writeObject. */ private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject(); } /** * Reads the state of this object from the object stream. */ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); // This converts old types to the new TObject type system introduced // in version 0.94. int sz = elements.size(); for (int i = 0; i < sz; ++i) { Object ob = elements.get(i); TObject conv_object = null; if (ob == null || ob instanceof com.mckoi.database.global.NullObject) { conv_object = TObject.nullVal(); } else if (ob instanceof String) { conv_object = TObject.stringVal((String) ob); } else if (ob instanceof java.math.BigDecimal) { conv_object = TObject.bigNumberVal( BigNumber.fromBigDecimal((java.math.BigDecimal) ob)); } else if (ob instanceof java.util.Date) { conv_object = TObject.dateVal((java.util.Date) ob); } else if (ob instanceof Boolean) { conv_object = TObject.booleanVal(((Boolean) ob).booleanValue()); } if (conv_object != null) { elements.set(i, conv_object); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ExpressionPreparer.java000066400000000000000000000025361330501023400264260ustar00rootroot00000000000000/** * com.mckoi.database.ExpressionPreparer 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An interface used to prepare an Expression object. This interface is used * to mutate an element of an Expression from one form to another. For * example, we may use this to translate a StatementTree object to a * Statement object. * * @author Tobias Downer */ public interface ExpressionPreparer { /** * Returns true if this preparer will prepare the given object in an * expression. */ boolean canPrepare(Object element); /** * Returns the new translated object to be mutated from the given element. */ Object prepare(Object element) throws DatabaseException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FilterTable.java000066400000000000000000000177571330501023400247760ustar00rootroot00000000000000/** * com.mckoi.database.FilterTable 13 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * A table that is a filter for another table. By default, all Table methods * are implemented to call the parent. This class should be used when we * want to implement a Table filter of some kind. For example, a filter * for specific columns, or even rows, etc. *

* NOTE: For efficiency reasons, this will store SelectableScheme objects * generated by the parent like VirtualTable. * * @author Tobias Downer */ public class FilterTable extends Table { /** * The Table we are filtering the columns from. */ protected Table parent; /** * The schemes to describe the entity relation in the given column. */ private SelectableScheme[] column_scheme; /** * The Constructor. */ public FilterTable(Table parent) { this.parent = parent; } /** * Returns the parent table. */ protected Table getParent() { return parent; } /** * Returns the parent Database object. */ public Database getDatabase() { return parent.getDatabase(); } /** * Returns the number of columns in the table. */ public int getColumnCount() { return parent.getColumnCount(); } /** * Returns the number of rows stored in the table. */ public int getRowCount() { return parent.getRowCount(); } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public int findFieldName(Variable v) { return parent.findFieldName(v); } /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public Variable getResolvedVariable(int column) { return parent.getResolvedVariable(column); } /** * Returns a SelectableScheme for the given column in the given VirtualTable * row domain. */ SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { if (column_scheme == null) { column_scheme = new SelectableScheme[parent.getColumnCount()]; } // Is there a local scheme available? SelectableScheme scheme = column_scheme[column]; if (scheme == null) { // If we are asking for the selectable schema of this table we must // tell the parent we are looking for its selectable scheme. Table t = table; if (table == this) { t = parent; } // Scheme is not cached in this table so ask the parent. scheme = parent.getSelectableSchemeFor(column, original_column, t); if (table == this) { column_scheme[column] = scheme; } } else { // If this has a cached scheme and we are in the correct domain then // return it. if (table == this) { return scheme; } else { // Otherwise we must calculate the subset of the scheme return scheme.getSubsetScheme(table, original_column); } } return scheme; } /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. */ void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { if (ancestor == this || ancestor == parent) { return; } else { parent.setToRowTableDomain(column, row_set, ancestor); } } /** * Return the list of DataTable and row sets that make up the raw information * in this table. */ RawTableInformation resolveToRawTable(RawTableInformation info) { return parent.resolveToRawTable(info); } /** * Returns an object that represents the information in the given cell * in the table. This will generally be an expensive algorithm, so calls * to it should be kept to a minimum. Note that the offset between two * rows is not necessarily 1. */ public TObject getCellContents(int column, int row) { return parent.getCellContents(column, row); } /** * Returns an Enumeration of the rows in this table. * The Enumeration is a fast way of retrieving consequtive rows in the table. */ public RowEnumeration rowEnumeration() { return parent.rowEnumeration(); } /** * Returns a DataTableDef object that defines the name of the table and the * layout of the columns of the table. Note that for tables that are joined * with other tables, the table name and schema for this object become * mangled. For example, a table called 'PERSON' joined with a table called * 'MUSIC' becomes a table called 'PERSON#MUSIC' in a null schema. */ public DataTableDef getDataTableDef() { return parent.getDataTableDef(); } /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ void addDataTableListener(DataTableListener listener) { parent.addDataTableListener(listener); } /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ void removeDataTableListener(DataTableListener listener) { parent.removeDataTableListener(listener); } /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). * NOTE: This is nothing to do with the 'LockingMechanism' object. */ public void lockRoot(int lock_key) { parent.lockRoot(lock_key); } /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public void unlockRoot(int lock_key) { parent.unlockRoot(lock_key); } /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public boolean hasRootsLocked() { return parent.hasRootsLocked(); } /** * Prints a graph of the table hierarchy to the stream. */ public void printGraph(java.io.PrintStream out, int indent) { for (int i = 0; i < indent; ++i) { out.print(' '); } out.println("F[" + getClass()); parent.printGraph(out, indent + 2); for (int i = 0; i < indent; ++i) { out.print(' '); } out.println("]"); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FixedRecordList.java000066400000000000000000000225671330501023400256260ustar00rootroot00000000000000/** * com.mckoi.database.FixedRecordList 11 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import java.io.IOException; import com.mckoi.store.Store; import com.mckoi.store.AreaWriter; import com.mckoi.store.MutableArea; import com.mckoi.store.Area; /** * A structure that provides a fast way to read and write fixed sized nodes in * a Store object. Each node in the list is of fixed size. *

* This structure can locate a node in the list very quickly. However, the * structure can not be mutated. For example, deleting node '4' will make the * node available for recycling but will not shift any nodes after 4 in the * list up by one. *

* Once a node is allocated from the list its position will not change. *

* This structure does not provide versioning features. *

* The structure is composed of two element types - the header and the list * block elements. The header is resembled by the following diagram; *

* LIST BLOCK HEADER * +-------------------------------+ * | 4 MAGIC | * | 4 list block count | * | 8 (reserved for delete chain) | * | 8 pointer to list block 0 | * | 8 pointer to list block 1 | * . ... etc ... . * | 8 pointer to list block 63 | * +-------------------------------+ * *

* The first list block element is 32 entries in size, the second list block is * 64 entries in size, etc. Each entry of the list block element is of fixed * size. *

* This class is NOT thread safe. * * @author Tobias Downer */ public class FixedRecordList { /** * The magic value for fixed record list structures. */ private final static int MAGIC = 0x087131AA; /** * The backing Store object that persistantly stores the structure. */ private final Store store; /** * The fixed size of the elements in the list. */ private final int element_size; /** * A pointer to the list header area. */ private long list_header_p; /** * The header for the list blocks. */ private MutableArea list_header_area; /** * The number of blocks in the list block. */ private int list_block_count; /** * Pointers to the blocks in the list block. */ private long[] list_block_element; private MutableArea[] list_block_area; /** * Constructs the structure. */ public FixedRecordList(Store store, int element_size) { this.store = store; this.element_size = element_size; list_block_element = new long[64]; list_block_area = new MutableArea[64]; } /** * Creates the structure in the store, and returns a pointer to the structure. */ public long create() throws IOException { // Allocate space for the list header (8 + 8 + (64 * 8)) AreaWriter writer = store.createArea(528); list_header_p = writer.getID(); writer.putInt(MAGIC); writer.finish(); list_header_area = store.getMutableArea(list_header_p); list_block_count = 0; updateListHeaderArea(); return list_header_p; } /** * Initializes the structure from the store. */ public void init(long list_pointer) throws IOException { list_header_p = list_pointer; list_header_area = store.getMutableArea(list_header_p); int magic = list_header_area.getInt(); // MAGIC if (magic != MAGIC) { throw new IOException("Incorrect magic for list block. [magic=" + magic + "]"); } this.list_block_count = list_header_area.getInt(); list_header_area.getLong(); for (int i = 0; i < list_block_count; ++i) { long block_pointer = list_header_area.getLong(); list_block_element[i] = block_pointer; list_block_area[i] = store.getMutableArea(block_pointer); } } /** * Adds to the given ArrayList all the areas in the store that are used by * this structure (as Long). */ public void addAllAreasUsed(ArrayList list) throws IOException { list.add(new Long(list_header_p)); for (int i = 0; i < list_block_count; ++i) { list.add(new Long(list_block_element[i])); } } /** * Returns the 8 byte long that is reserved for storing the delete chain * (if there is one). */ public long getReservedLong() throws IOException { list_header_area.position(8); return list_header_area.getLong(); } /** * Sets the 8 byte long that is reserved for storing the delete chain * (if there is one). */ public void setReservedLong(long v) throws IOException { list_header_area.position(8); list_header_area.putLong(v); list_header_area.checkOut(); } /** * Updates the list header area from the information store within the * state of this object. This should only be called when a new block is * added to the list block, or the store is created. */ private void updateListHeaderArea() throws IOException { list_header_area.position(4); list_header_area.putInt(list_block_count); list_header_area.position(16); for (int i = 0; i < list_block_count; ++i) { list_header_area.putLong(list_block_element[i]); } list_header_area.checkOut(); } /** * Returns an Area object from the list block area with the position over * the record entry requested. Note that the Area object can only be safely * used if there is a guarentee that no other access to this object while the * area object is accessed. */ public MutableArea positionOnNode(final long record_number) throws IOException { // What block is this record in? int bit = 0; long work = record_number + 32; while (work != 0) { work = work >> 1; ++bit; } long start_val = (1 << (bit - 1)) - 32; int block_offset = bit - 6; long record_offset = record_number - start_val; // Get the pointer to the block that contains this record status MutableArea block_area = list_block_area[block_offset]; // long tempv = (record_offset * element_size); // int position_to = (int) tempv; // if (tempv == 1) { // ++tempv; // } // block_area.position(position_to); block_area.position((int) (record_offset * element_size)); return block_area; } /** * Returns the number of block elements in this list structure. This will * return a number between 0 and 63 (inclusive). */ public int listBlockCount() { return list_block_count; } /** * Returns the total number of nodes that are currently addressable by this * list structure. For example, if the list contains 0 blocks then there are * no addressable nodes. If it contains 1 block, there are 32 addressable * nodes. If it contains 2 blocks, there are 64 + 32 = 96 nodes. 3 blocks = * 128 + 64 + 32 = 224 nodes. */ public long addressableNodeCount() { return listBlockFirstPosition(list_block_count); } /** * Returns the number of nodes that can be stored in the given block, where * block 0 is the first block (32 addressable nodes). */ public long listBlockNodeCount(int block_number) { return 32L << block_number; } /** * Returns the index of the first node in the given block number. For * example, this first node of block 0 is 0, the first node of block 1 is * 32, the first node of block 2 is 96, etc. */ public long listBlockFirstPosition(int block_number) { long start_index = 0; int i = block_number; long diff = 32; while (i > 0) { start_index = start_index + diff; diff = diff << 1; --i; } return start_index; } /** * Increases the size of the list structure so it may accomodate more record * entries. This simply adds a new block for more nodes. */ public void increaseSize() throws IOException { // The size of the block long size_of_block = 32L << list_block_count; // Allocate the new block in the store AreaWriter writer = store.createArea(size_of_block * element_size); long nblock_p = writer.getID(); writer.finish(); MutableArea nblock_area = store.getMutableArea(nblock_p); // Update the block list list_block_element[list_block_count] = nblock_p; list_block_area[list_block_count] = nblock_area; ++list_block_count; // Update the list header, updateListHeaderArea(); } /** * Decreases the size of the list structure. This should be used with care * because it deletes all nodes in the last block. */ public void decreaseSize() throws IOException { --list_block_count; // Free the top block store.deleteArea(list_block_element[list_block_count]); // Help the GC list_block_area[list_block_count] = null; list_block_element[list_block_count] = 0; // Update the list header. updateListHeaderArea(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FixedSizeDataStore.java000066400000000000000000001473751330501023400263020ustar00rootroot00000000000000/** * com.mckoi.database.FixedSizeDataStore 25 Jun 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.Arrays; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.IntegerVector; import com.mckoi.util.UserTerminal; import com.mckoi.util.Cache; import com.mckoi.debug.*; /** * A file format that allows for the very quick retreival of data that is * stored within it. To allow for such quick reference of information in the * file, we must make stipulations about how the data is stored. *

    *
  1. Each data element in the table must be a fixed length. This could be * thought of like a 'sector' of a disk drive. Or in a table, each row may * be of the fixed size. *
  2. We keep track of deleted rows via a linked list of sectors. *
* The header of the data store is as follows: *

 *   0 4 (int)  : MAGIC        - used to identify file type.
 *   4 4 (int)  : version      - version of the file store.
 *   8 4 (int)  : sector_size  - the size of each sector in the store.
 *  12 8 (long) : delete_head  - the head sector of the delete list.
 *  20 8 (long) : sectors_used - number of sectors being used (not deleted).
 *  28 1 (byte) : open         - set to 1 when file opened, 0 when closed.
 *  29 4 (int)  : sector_start - offset where sector information starts.
 *  33 ...  63                 - reserved
 *  64 ... 191                 - reserved buffer for misc. state data.
 * 192 ... sector_start        - reserved
 * 

* Each sector contains a 5 byte header. This header includes a byte that * contains either USED or DELETED, and a int pointer to the next chained * sector. The int pointer is used to either represent a pointer to the next * sector in the chain of USED sectors, with -1 indicating the end. Or, if * the sector is DELETED, it points to the next deleted sector in the chain. * * @author Tobias Downer */ public final class FixedSizeDataStore { /** * The Magic number used to help identify that the file we are reading is * formatted as a fixed size data store. */ private final static int MAGIC = 0x0badbead; /** * The offset in the file where the sector data starts. */ private final static int SECTOR_DATA_OFFSET = 512; /** * The number of bytes that are stored in a sector in addition to the * user data in a sector. */ private final static int EXTRA_SECTOR_SIZE = 5; /** * The mark that indicates whether a sector is deleted (available) or being * used. */ private final static byte USED = 0, DELETED = (byte) 0x080; /** * If true then sectors are cached in the 'sector_cache'. */ private final static boolean SECTORS_CACHED = true; /** * A DebugLogger object we can use to write debug messages to. */ private DebugLogger debug; /** * The size of each 'sector' */ private int sector_size; /** * The File that keeps the data. */ private File data_file; /** * The RandomAccessFile object for the data file. */ private RandomAccessFile data_store; /** * Set to true if we opened the store in read only mode, otherwise false. */ private boolean read_only; /** * The file size of the data store. */ private long data_store_size; /** * The offset where the header information finishes and the sector data * starts. */ private int sector_offset; /** * The sector buffer. This is filled with the information in some given * sector. */ private byte[] sector_buffer; /** * The sector that we currently have loaded into the buffer. */ private int buffered_sector; /** * The head of the deleted sectors. */ private int delete_head; /** * The number of used sectors in the store. */ private int used_sector_count; /** * The number of locks that have been put on this store. If this number is * > 0 then we may not reclaim deleted sector because another thread may be * reading the data. */ private int lock_count; /** * A cache of sectors read from the store. */ private Cache sector_cache; /** * Constructs the data store. If 'sector_size' <= 0 then we determine * sector size when the file is opened. If cached_access is true then * all access to the store is through a cache which will greatly improve * the performance of read dominated access. */ public FixedSizeDataStore(File data_file, int sector_size, boolean cache_access, DebugLogger logger) { this.debug = logger; // Enable the cache? if (cache_access) { sector_cache = new Cache(64); } else { sector_cache = null; } if (sector_size > 0) { this.sector_size = sector_size + EXTRA_SECTOR_SIZE; } else { this.sector_size = -1; } this.data_file = data_file; } public FixedSizeDataStore(File data_file, int sector_size, DebugLogger logger) { this(data_file, sector_size, SECTORS_CACHED, logger); } // ---------- Internal methods ---------- /** * Returns true if the store is locked from reclaiming deleted rows. */ boolean locked() { return (lock_count > 0); } /** * Returns the total number of sectors in the file. */ private int sectorCount() throws IOException { // PROFILE: data_store.length() is expensive so we keep the length as an // instance variable // return (int) ((data_store.length() - sector_offset) / sector_size); return (int) ((data_store_size - sector_offset) / sector_size); } /** * Seeks to the 'nth' sector in the store. */ private long seekSector(int sector) throws IOException { long ra_index = (sector * sector_size); long seek_to = ra_index + sector_offset; data_store.seek(seek_to); // Skip past the header. return seek_to; } /** * Read the 'nth' sector from the store and fills the internal * 'sector_buffer' with the contents. */ private void readSector(int sector) throws IOException { // If the buffered sector is already loaded then don't re-read. if (buffered_sector != sector) { if (sector_cache != null) { // If this sector is in the cache then use the cached entry instead. Integer cacheKey = new Integer(sector); byte[] sbuf = (byte[]) sector_cache.get(cacheKey); if (sbuf == null) { // If not in the cache then read from the file. seekSector(sector); data_store.readFully(sector_buffer, 0, sector_size); sbuf = new byte[sector_size]; System.arraycopy(sector_buffer, 0, sbuf, 0, sector_size); sector_cache.put(cacheKey, sbuf); } else { // Otherwise, read the cached entry. System.arraycopy(sbuf, 0, sector_buffer, 0, sector_size); } } else { // If no caching then read the sector seekSector(sector); data_store.readFully(sector_buffer, 0, sector_size); } buffered_sector = sector; } } /** * Sets the length of the data store to the given size. This has a side- * effect of setting the file pointer to the end of the file. */ private void setDataStoreSize(long new_size) throws IOException { long p = new_size - 1; if (p > 0) { data_store.seek(p); data_store.write(0); data_store_size = new_size; } } /** * Writes the sector data in 'sector_buffer' to the given sector offset in * the store. */ private void writeSector(int sector, int length) throws IOException { long seek_to = seekSector(sector); // If writing to end of file, extend the file size. if (seek_to == data_store_size) { // This will extend the file size by 16 sector lengths and add the // additional sectors to the deleted chain. setDataStoreSize(seek_to + sector_size); seekSector(sector); } // Check just to make sure, if (length <= sector_size) { data_store.write(sector_buffer, 0, length); if (sector_cache != null) { // Copy this into the cache. byte[] sbuf = new byte[sector_size]; System.arraycopy(sector_buffer, 0, sbuf, 0, length); sector_cache.put(new Integer(sector), sbuf); } } else { throw new IOException("length > sector_size"); } } /** * Writes the sector data in 'sector_buffer' to the given sector offset in * the store. */ private void writeSector(int sector) throws IOException { writeSector(sector, sector_size); } /** * Sets up the sector header information in 'sector_buffer'. */ private void setSectorHeader(byte status, int next_sector) throws IOException { sector_buffer[0] = status; sector_buffer[1] = (byte) ((next_sector >>> 24) & 0xFF); sector_buffer[2] = (byte) ((next_sector >>> 16) & 0xFF); sector_buffer[3] = (byte) ((next_sector >>> 8) & 0xFF); sector_buffer[4] = (byte) ((next_sector >>> 0) & 0xFF); } /** * Writes the contents of the byte[] array to the sector, setting the * USED flag to true, and the 'next' int in the sector header. *

* NOTE: Assumes length is less than user space size of sector. */ private int writeBufToSector(int sector, int next_sector, byte[] buf, int offset, int length) throws IOException { // Write a new sector buffer entry, setSectorHeader(USED, next_sector); System.arraycopy(buf, offset, sector_buffer, 5, length); // NOTE: Notice the order here. We update header state first, then // write the sector. If a crash happens between 'synch' and 'writeSector' // all that will happen is we'll be left with a hanging chain. There // should be no corruption. // Add 1 to the used sector count. ++used_sector_count; // Sync the file synch(); // Write the sector in the buffer writeSector(sector, length + 5); // We now have this sector in the buffer buffered_sector = sector; // Return the sector we wrote this to, return sector; } /** * Reclaims the first sector from the free sector list. */ private int reclaimTopFree() throws IOException { // There's a sector we can use so use it! int free_sector = delete_head; // Take this sector out of the chain of deleted records. readSector(free_sector); int c1 = (((int) sector_buffer[1]) & 0x0FF); int c2 = (((int) sector_buffer[2]) & 0x0FF); int c3 = (((int) sector_buffer[3]) & 0x0FF); int c4 = (((int) sector_buffer[4]) & 0x0FF); delete_head = (c1 << 24) + (c2 << 16) + (c3 << 8) + (c4); return free_sector; } /** * Finds the first free available sector that we can use. If we are * reclaiming from the deleted list, the deleted row is taken from the * linked list immediately. *

* NOTE: This method may alter 'delete_head' changing the list of deleted * sectors. */ private int findFreeSector() throws IOException { // Are we locked and can we reclaim a deleted sector? if (!locked() && delete_head != -1) { return reclaimTopFree(); } // Either we are locked or there are no deleted sectors in the chain. // The new sector is at the end of the store. return sectorCount(); } /** * Finds the first free available sector past the next one. This means, * if locked or delete_head == -1 then we return the sectorCount() + 1, * otherwise we reclaim the next available of the delete queue. */ private int findFreeSectorPastNext() throws IOException { // Are we locked and can we reclaim a deleted sector? if (!locked() && delete_head != -1) { return reclaimTopFree(); } // Either we are locked or there are no deleted sectors in the chain. // The new sector is at the end of the store. return sectorCount() + 1; } /** * Finds the first 'n' available sectors that we can use. If we are * reclaiming from the deleted list, the deleted row(s) are taken from the * linked list immediately. *

* NOTE: This method may alter 'delete_head' changing the list of deleted * sectors. */ private int[] findFreeSectors(int count) throws IOException { int fs_index = 0; int[] free_sectors = new int[count]; // Are we locked, and can we reclaim a deleted sector? if (!locked()) { while (fs_index < count && delete_head != -1) { free_sectors[fs_index] = reclaimTopFree(); ++fs_index; } } int sec_count = sectorCount(); // Free are on end of file now, while (fs_index < count) { free_sectors[fs_index] = sec_count; ++sec_count; ++fs_index; } // Return the top list of free sectors. return free_sectors; } // ---------- Public methods ---------- /** * Returns the size of the data store file. This is the total number of * bytes stored in the data store. */ public long totalSize() { return data_file.length(); } /** * Every data store has a 128 byte buffer that can be used to store state * information. The buffer starts at offset 64 of the file until offset 192. * This method writes data to that offset. */ public void writeReservedBuffer(byte[] info, int offset, int length, int res_offset) throws IOException { if ((length + res_offset) > 128) { throw new Error("Attempted to write > 128 bytes in reserve buffer."); } data_store.seek(res_offset + 64); data_store.write(info, offset, length); } public void writeReservedBuffer(byte[] info, int offset, int length) throws IOException { writeReservedBuffer(info, offset, length, 0); } /** * Reads from the buffer reserve into the given byte array. */ public void readReservedBuffer(byte[] info, int offset, int length) throws IOException { if (length > 128) { throw new Error("Attempted to read > 128 bytes from reserve buffer."); } data_store.seek(64); data_store.readFully(info, offset, length); } // Byte array used to synchronize data in store. // Enough room for two longs. private byte[] sync_buffer = new byte[16]; /** * Synchronizes the memory store with the file header. This writes * information into the header. This should be called periodically. * Synch does nothing for a read only store. */ public void synch() throws IOException { if (!read_only) { // Write the head deleted sector. ByteArrayUtil.setLong(delete_head, sync_buffer, 0); // Write the number of sectors that are used (not deleted). ByteArrayUtil.setLong(used_sector_count, sync_buffer, 8); // Write the update // Skip past magic int and sector size int data_store.seek(12); data_store.write(sync_buffer, 0, 16); } } /** * Performs a hard synchronization of this store. This will force the OS * to synchronize the contents of the data store. hardSynch does nothing * for a read only store. */ public void hardSynch() throws IOException { if (!read_only) { synch(); // Make sure we are also synchronized in the file system. try { data_store.getFD().sync(); } catch (SyncFailedException e) { /* ignore */ } } } /** * Returns true if the store has been opened in read only mode. */ public boolean isReadOnly() { return read_only; } /** * Opens the data store. The data store can be opened in 'read only' mode. * Returns 'true' if the open procedure should repair itself (dirty open) or * false if the file was cleanly closed down. *

* It is not possible to open a damaged store in read only mode. * * @param read_only if true, then the database is opened in read only mode, * otherwise it is opened in read/write mode. */ public boolean open(boolean read_only) throws IOException { this.read_only = read_only; // If the file doesn't exist, check we have a valid sector size. if (!data_file.exists()) { if (sector_size <= 0) { throw new IOException("Sector size not set for new file."); } } // Open the file String mode = read_only ? "r" : "rw"; data_store = new RandomAccessFile(data_file, mode); data_store.seek(0); // Does the header exist? if (data_store.length() < SECTOR_DATA_OFFSET) { if (read_only) { throw new IOException( "Unable to open FixedSizeDataStore. No header found."); } ByteArrayOutputStream bout = new ByteArrayOutputStream(SECTOR_DATA_OFFSET); DataOutputStream dout = new DataOutputStream(bout); // No, so create the header // Write the magic int dout.writeInt(MAGIC); // The version of the file type. dout.writeInt(0x0100); // Write the sector size dout.writeInt(sector_size); // Write the delete_head dout.writeLong(-1); // Write the number of sectors that are being used. dout.writeLong(0); // Write whether file open or closed dout.writeByte(0); // Write the offset where the sector information starts. dout.writeInt(SECTOR_DATA_OFFSET); // Transfer to a new buffer and write entirely to file. byte[] buf = bout.toByteArray(); dout.close(); byte[] buf2 = new byte[SECTOR_DATA_OFFSET]; System.arraycopy(buf, 0, buf2, 0, buf.length); for (int i = buf.length; i < SECTOR_DATA_OFFSET; ++i) { buf2[i] = (byte) 255; } data_store.write(buf2); } data_store.seek(0); // Set the size of the file. data_store_size = data_store.length(); // Read the header, if (data_store.readInt() == MAGIC) { // Read the version number, int version = data_store.readInt(); if (version != 0x0100) { throw new IOException("Unknown version."); } // Check the sector size is right, int ss_check = data_store.readInt(); // If sector_size not set yet, then set it from value in file. if (sector_size <= 0) { sector_size = ss_check; } if (ss_check == sector_size) { boolean need_repair = false; // Find the head of the deleted sectors linked list. delete_head = (int) data_store.readLong(); // Find the number of sectors that are being used. used_sector_count = (int) data_store.readLong(); // Did we close down cleanly? need_repair = data_store.readByte() == 0 ? false : true; // The offset where the sector data starts. sector_offset = data_store.readInt(); sector_buffer = new byte[sector_size]; buffered_sector = -2; // Write the 'open' flag to indicate store is open // ( Only if we opening in read/write mode ) if (!read_only) { data_store.seek(28); data_store.writeByte(1); } // Check sector count * sector_size + sector_offset is the same size // as the data store. int pred_size = (sectorCount() * sector_size) + sector_offset; // System.out.println("Sector Count: " + sectorCount()); // System.out.println("Sector Size: " + sector_size); // System.out.println("Sector Offset: " + sector_offset); // System.out.println("Data Store Size: " + data_store_size); // System.out.println("Pred Size: " + pred_size); if (pred_size != data_store_size) { debug.write(Lvl.ERROR, this, "The FixedSizeDataStore file size is incorrect."); debug.write(Lvl.ERROR, this, "File size should be: " + pred_size + "\n" + "But it's really: " + data_store_size); need_repair = true; } // Move seek to sector start offset. data_store.seek(sector_offset); // Do we need to repair? if (need_repair) { debug.write(Lvl.ALERT, this, "Store not closed cleanly."); } // Return true if we repaired. return need_repair; } else { throw new IOException( "Sector size for this data store does not match."); } } else { throw new IOException("Format invalid; MAGIC number didn't match."); } } /** * Closes the data store. */ public void close() throws IOException { // Sync internal information synch(); // Write a '0' in the 'open' header section to indicate the store // closed cleanly. // ( Only if we opening in read/write mode ) if (!read_only) { data_store.seek(28); data_store.writeByte(0); } // Check the size long close_size = data_store.length(); if (close_size != data_store_size) { debug.write(Lvl.ERROR, this, "On closing file, data_store_size != close_size (" + data_store_size + " != " + close_size + ")"); } // Sync the file with the hardware, try { data_store.getFD().sync(); } catch (SyncFailedException e) { /* ignore */ } // Close the file data_store.close(); // Help the GC data_store = null; sector_buffer = null; buffered_sector = -2; } /** * Returns true if the store is closed. */ public boolean isClosed() { return data_store == null; } /** * Deletes the data store from the file system. */ public void delete() { if (data_store == null) { data_file.delete(); } else { throw new Error("Must close before FixedSizeDataStore is deleted."); } } /** * Returns true if the file for this store exists. */ public boolean exists() throws IOException { return data_file.exists(); } /** * Returns the number of bytes that the user may store in a sector. The * actual sector space in the file may be slightly larger. */ public int getSectorSize() { return sector_size - EXTRA_SECTOR_SIZE; } /** * Returns the number of sectors in the store that are being used (as * opposed to being deleted). */ public int getSectorUseCount() { return used_sector_count; } /** * Returns the total number of sectors that are currently available * (includes used and deleted sectors). */ public int rawSectorCount() throws IOException { return sectorCount(); } // ------- Locking ------- /** * Locks the store by some process so that we may not reclaim deleted * sectors. The purpose of this is in the situation where we have a slow * thread accessing information from the store, and a seperate thread * is still able to modifying (delete and add) to the store. */ public void lock() { ++lock_count; } /** * Unlocks the store. */ public void unlock() { --lock_count; if (lock_count < 0) { throw new Error("Unlocked more times than we locked."); } } // ------- Sector queries -------- /** * Returns true if the sector number is flagged as deleted. If returns false * then the sector is being used. */ public boolean isSectorDeleted(int sector) throws IOException { readSector(sector); return ((sector_buffer[0] & DELETED) != 0); } // ------- Get a sector from the store ------- /** * Gets the contents of the sector at the given index. */ public byte[] getSector(int sector, byte[] buf, int offset, int length) throws IOException { if (sector >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } int ssize = getSectorSize(); if (length > ssize) { throw new IOException("length > sector size"); } readSector(sector); System.arraycopy(sector_buffer, EXTRA_SECTOR_SIZE, buf, offset, length); return buf; } /** * Gets the contents of the sector at the given index. */ public byte[] getSector(int sector, byte[] buf) throws IOException { return getSector(sector, buf, 0, Math.min(buf.length, getSectorSize())); } /** * Gets the contents of the sector at the given index. */ public byte[] getSector(int sector) throws IOException { if (sector >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } int ssize = getSectorSize(); byte[] buf = new byte[ssize]; readSector(sector); System.arraycopy(sector_buffer, EXTRA_SECTOR_SIZE, buf, 0, ssize); return buf; } /** * Gets the contents of the sector at the given index as an int[] array. * The array size is /4 of the sector size. If the sector size is not * divisible by 4 then the last 1-3 bytes are truncated. */ public int[] getSectorAsIntArray(int sector, int[] buf) throws IOException { if (sector >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } int length = buf.length * 4; int ssize = getSectorSize(); if (length > ssize) { throw new IOException("length > sector size"); } readSector(sector); // Convert the sector (as a byte array) to an int array. int p = EXTRA_SECTOR_SIZE; int i = 0; while (i < buf.length) { int c1 = (((int) sector_buffer[p++]) & 0x0FF); int c2 = (((int) sector_buffer[p++]) & 0x0FF); int c3 = (((int) sector_buffer[p++]) & 0x0FF); int c4 = (((int) sector_buffer[p++]) & 0x0FF); int v = (c1 << 24) + (c2 << 16) + (c3 << 8) + (c4); buf[i++] = v; } return buf; } /** * Reads information across a chain of sectors and fills the byte[] array * buffer. Returns the number of bytes that were read (should always be * equal to 'length'). */ public int readAcross(int sector_head, byte[] buf, int offset, int length) throws IOException { if (sector_head >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } int to_read = length; int ssize = getSectorSize(); int walk = sector_head; while (walk != -1 && to_read > 0) { // Read in the sector readSector(walk); // Is the sector deleted? if ((sector_buffer[0] & DELETED) != 0) { throw new IOException("Can not read across a deleted chain."); } // The next sector in the chain... int next_walk = ByteArrayUtil.getInt(sector_buffer, 1); // Fill the byte[] array buffer with what's in the sector. int amount_read = Math.min(to_read, ssize); System.arraycopy(sector_buffer, EXTRA_SECTOR_SIZE, buf, offset, amount_read); offset += amount_read; to_read -= amount_read; // Walk to next in chain walk = next_walk; } return offset; } /** * Traverses a sector chain and returns an array of all sectors that are * part of the chain. * Useful for diagnostic, repair and statistical operations. */ public int[] getSectorChain(int sector_head, int length) throws IOException { if (sector_head >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } // The number of sectors to traverse. int span_count = calculateSectorSpan(length); int[] spans = new int[span_count]; int ssize = getSectorSize(); int walk = sector_head; int chain_count = 0; while (chain_count < span_count) { spans[chain_count] = walk; // Read in the sector readSector(walk); // The next sector in the chain... walk = ByteArrayUtil.getInt(sector_buffer, 1); // Increment the chain walk counter. ++chain_count; } return spans; } /** * Traverses a sector chain and returns an array of all sectors that are * part of the chain. * Useful for diagnostic, repair and statistical operations. */ public int[] getSectorChain(int sector_head) throws IOException { if (sector_head >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } IntegerVector spans = new IntegerVector(); int ssize = getSectorSize(); int walk = sector_head; while (walk > -1) { spans.addInt(walk); // Read in the sector readSector(walk); // The next sector in the chain... walk = ByteArrayUtil.getInt(sector_buffer, 1); } return spans.toIntArray(); } // ------- Delete a sector from the store ------- /** * Deletes a sector from the store. The sector is only marked as deleted, * however, and the contents may still be accessed via the 'getSector' * methods. If the store is add locked, then it is guarenteed that no * deleted sectors will be overwritten until the add lock is taken from the * table. *

* Throws an IO error if the sector is marked as deleted. */ public void deleteSector(int sector) throws IOException { deleteAcross(sector); } /** * Deletes a set of sectors that have been chained together. This should * be used to delete data added via the 'write' method. However, it * can be used to delete data added via the 'addSector' */ public void deleteAcross(final int sector_head) throws IOException { if (sector_head < 0) { throw new IOException("Sector is out of range."); } if (sector_head >= sectorCount()) { throw new IOException("Can't get sector, out of range."); } // How algorithm works: // delete_head is set to sector_head // We then walk through the chain until we hit a -1 and then we set // that to the old delete_head. // NOTE: This algorithm doesn't change any chained sectors, so the // 'readAcross' method will still work even on deleted chains (provided // there's a lock on the store). int walk = sector_head; while (walk != -1) { // Read in the sector readSector(walk); if ((sector_buffer[0] & DELETED) != 0) { // Already been deleted, so throw an IOException throw new IOException("Sector has already been deleted."); } // The next sector in the chain... int next_walk = ByteArrayUtil.getInt(sector_buffer, 1); // Mark as deleted sector_buffer[0] = DELETED; // If the next chain is -1 (end of chain) then set it to delete_head if (next_walk == -1) { ByteArrayUtil.setInt(delete_head, sector_buffer, 1); } // Write the new header for the sector. seekSector(walk); data_store.write(sector_buffer, 0, 5); if (sector_cache != null) { // Remove this from the cache. sector_cache.remove(new Integer(walk)); } // Delete 1 from the used sector count. --used_sector_count; // Walk to next in chain walk = next_walk; } // Add this chain to the deleted chain. delete_head = sector_head; // Synchronize with the file system. synch(); } /** * Deletes all sectors in the entire store. Use with care. */ public void deleteAllSectors() throws IOException { int sector_count = sectorCount(); for (int i = 0; i < sector_count; ++i) { readSector(i); sector_buffer[0] = DELETED; int next = i + 1; if (i == sector_count - 1) { next = -1; } ByteArrayUtil.setInt(next, sector_buffer, 1); writeSector(i); } // Set the head of the delete chain delete_head = sector_count == 0 ? -1 : 0; // set 'used_sector_count' used_sector_count = 0; // Sync the information with the file synch(); } // ------- Adds a new sector into the store ------- /** * Writes the contents of a sector into the store overwritting any * other information that may be stored there. This is used as a rough * data editting command. */ public int overwriteSector(int sector, byte[] buf, int offset, int length) throws IOException { int ssize = getSectorSize(); if (length > ssize) { throw new IOException("Sector too large to add to store."); } // Write the sector entry, return writeBufToSector(sector, -1, buf, offset, length); } /** * Writes the contents of a sector into the store overwritting any * other information that may be stored there. This is used as a rough * data editting command. */ public int overwriteSector(int sector, byte[] buf) throws IOException { return overwriteSector(sector, buf, 0, buf.length); } /** * Adds a new sector into the store. It finds a suitable sector to store * the information and returns the sector number. If lock_count > 0 then * we do not reclaim deleted sectors, otherwise we do. */ public int addSector(byte[] buf, int offset, int length) throws IOException { int ssize = getSectorSize(); if (length > ssize) { throw new IOException("Sector too large to add to store."); } // Find a suitable sector to add the data into. int sector = findFreeSector(); // Write a new sector buffer entry, return writeBufToSector(sector, -1, buf, offset, length); } /** * Adds a new sector into the store. It finds a suitable sector to store * the information and returns the sector number. If lock_count > 0 then * we do not reclaim deleted sectors, otherwise we do. */ public int addSector(byte[] buf) throws IOException { return addSector(buf, 0, buf.length); } /** * Calculates the number of sectors the given length of bytes will span. */ public int calculateSectorSpan(int length) { int sector_size = getSectorSize(); int span_count = length / sector_size; // Special case, if length is zero then still use at least 1 sector, if (length == 0 || (length % sector_size) != 0) { ++span_count; } return span_count; } /** * Writes a byte[] array of data across as many sectors as it takes to store * the data. Returns the index to the first sector that contains the * start of the data. */ public int writeAcross(byte[] buf, int offset, int length) throws IOException { int sector_size = getSectorSize(); // How many sectors does this data span? int span_count = calculateSectorSpan(length); // Get free sectors to write this buffer information to. int[] free_sectors = findFreeSectors(span_count); // Sort the list so we are writing forward in the file. Arrays.sort(free_sectors, 0, span_count); // Write the information to the sectors. int to_write = length; int to_offset = 0; for (int i = 0; i < span_count; ++i) { int sector = free_sectors[i]; int next_sector; if (i < span_count - 1) { next_sector = free_sectors[i + 1]; } else { next_sector = -1; } // Write the sector part to the store. writeBufToSector(sector, next_sector, buf, to_offset, Math.min(to_write, sector_size)); to_write -= sector_size; to_offset += sector_size; } // Return the first free sector... return free_sectors[0]; } /** * The last sector output stream that was created. */ private SectorOutputStream sector_output_stream; /** * Returns an OutputStream implementation that is used to write a stream * of information into this data store. As data is written into the stream, * the data is flushed into this store at the next available sector. When * the stream is closed, the entire contents of the stream will be contained * within the store. A call to 'getSectorOfLastOutputStream' can be used to * return an index that is used to reference this stream of information in * the store. *

* NOTE: While an output stream returned by this method is not closed, * it is unsafe to use any methods in the FixedSizeDataStore object. */ public OutputStream getSectorOutputStream() throws IOException { sector_output_stream = new SectorOutputStream(); return sector_output_stream; } /** * Returns the first sector the OutputStream returned by * 'getSectorOutputStream' wrote to. This is the start of the chain. */ public int getSectorOfLastOutputStream() { return sector_output_stream.first_sector; } /** * Returns the number of bytes that were written out by the last closed * output stream returned by 'getSectorOutputStream'. */ public int getLengthOfLastOutputStream() { return sector_output_stream.count; } /** * Wipes the SectorOutputStream from this object. This should be closed * after the stream is closed. */ public void wipeLastOutputStream() { sector_output_stream = null; } /** * Returns an InputStream implementation that is used to read a stream of * information from the store. This input stream will iterate through the * sector chain given. *

* NOTE: Using this InputStream, an end of stream identifier is never * produced. When the last sector in the chain is reached, the input * stream will first read padding whitespace, then it will either loop to * the start of the last sector, or move to another undefined sector. * You must not rely on this stream reaching an EOF. */ public InputStream getSectorInputStream(int sector_head) throws IOException { return new SectorInputStream(sector_head); } // ------- Utility methods ------- /** * Copies the entire contents of this store to a destination directory. * This can only be called when the data store is open. It makes an * exact copy of the file. *

* The purpose of this method is so we can make a copy of the data * in this store while the store is open and 'live'. *

* We assume synchronization on this object. *

* @param path the directory to copy this file to. */ public void copyTo(File path) throws IOException { String fname = data_file.getName(); FileOutputStream fout = new FileOutputStream(new File(path, fname)); int BUF_SIZE = 65536; // 64k copy buffer. byte[] buf = new byte[BUF_SIZE]; data_store.seek(0); int read = data_store.read(buf, 0, BUF_SIZE); while (read >= 0) { fout.write(buf, 0, read); read = data_store.read(buf, 0, BUF_SIZE); } fout.close(); } /** * Attempts to repair this data store to a correct state. The UserTerminal * object can be used to ask the user questions and to output information * on the progress of the repair. *

* The store must have been opened before this method is called. */ public void fix(UserTerminal terminal) throws IOException { terminal.println("- File: " + data_file); // First synch with the disk synch(); // Check the length is correct if ((data_store_size - (long) sector_offset) % (long) sector_size != 0) { terminal.println("+ Altering length of file so it is correct " + "for sector size"); int row_count = sectorCount() + 1; long new_size = (row_count * sector_size) + sector_offset; setDataStoreSize(new_size); } IntegerVector sector_info = new IntegerVector(); IntegerVector scc = new IntegerVector(); int null_count = 0; // The total number of physical sectors in the file, int sector_count = sectorCount(); terminal.println("- Sector Count: " + sectorCount()); // Go through every sector and mark each one appropriately. for (int i = 0; i < sector_count; ++i) { readSector(i); // Deleted sector int next_chain = ByteArrayUtil.getInt(sector_buffer, 1); sector_info.addInt((int) sector_buffer[0]); sector_info.addInt(next_chain); if (next_chain == -1) { ++null_count; } else { int old_val = 0; if (next_chain < scc.size()) { old_val = scc.intAt(next_chain); } scc.placeIntAt(old_val + 1, next_chain); } } // The number of unchanged sectors... terminal.println("- unchained sectors = " + null_count); // Any sectors that are referenced more than once are erroneous. // These sectors are marked as bad IntegerVector bad_sectors = new IntegerVector(); for (int i = 0; i < scc.size(); ++i) { int ref_count = scc.intAt(i); if (ref_count > 1) { terminal.println("- [" + i + "] reference count = " + ref_count); terminal.println("+ Marking all references as bad (except first)."); boolean found_first = false; for (int n = 0; n < sector_info.size(); n += 2) { if (sector_info.intAt(n + 1) == i) { if (found_first) { bad_sectors.addInt(n / 2); } found_first = true; } } } } // Any marked as bad? if (bad_sectors.size() > 0) { terminal.println("+ Marked " + bad_sectors.size() + " sectors bad."); } // Mark the sectors as deleted for (int i = 0; i < bad_sectors.size(); ++i) { int sector = bad_sectors.intAt(i); readSector(sector); sector_buffer[0] = DELETED; writeSector(sector); } // PENDING: Are there are chains from active to deleted sectors, or // deleted to active. // Then go ahead and repair the file, repair(); } /** * Cleans up so all deleted sectors are completely removed from the store. * This has the effect of reducing the size of the file by the size of every * deleted sector. *

* It is extremely important that nothing can be read/written from the file * while this is happening. And certainly, we can not have any locks on * this store. *

* Returns true if the layout of the sectors changed (so we can fix * indices that point to sectors). */ public boolean clearDeletedSectors() throws IOException { if (locked()) { throw new IOException( "Store is locked, can not reclaim deleted sectors."); } // Are there any deleted rows to reclaim? if (delete_head != -1) { // Yes, so run through the table and move all data over the top of // deleted rows. int scount = sectorCount(); int move_to = 0; int row_count = 0; for (int i = 0; i < scount; ++i) { // Read the sector readSector(i); // Is it used? (DELETED flag not set) if ((sector_buffer[0] & DELETED) == 0) { ++row_count; // Not deleted, therefore we may have to move. Is move_to < i? if (move_to < i) { // Move this sector to 'move_to' writeSector(move_to); buffered_sector = move_to; } move_to = move_to + 1; } } // Resize the file. long new_size = (row_count * sector_size) + sector_offset; setDataStoreSize(new_size); // Set the delete_head to -1 delete_head = -1; // The number of sectors that are being used. used_sector_count = row_count; // Synchronize the header. synch(); // Sectors moved around so return true. return true; } else { // No rows to remove so return false. return false; } } // [ It's a bad idea to use this when there are sector chains because it // reorganizes the chain of deleted sectors. The order of deleted sectors is // important when dirty reading deleted information from the store (when a // table is updated for example). // In addition, it's debatable whether a 'repair' method is worth it. It // would probably be better to use 'clearDeletedSectors' to ensure the // store is in a good state. ] /** * Repairs the consistancy of the store. This is an expensive operation * that runs through every sector and determines if it's deleted or used. * If it's deleted it is added into the deleted linked list. *

* Repair assumes we can at least get past the 'open' method. This method * does not change the order of the sectors in the store. However it may * change the order in which deleted sectors are reclaimed. *

* In a perfect world, this should never need to be called. However, it's * a good idea to call this every so often because we are assured that * the delete linked list and 'used_sector_count' variables will be * correct when the method returns. *

* It is not possible to repair a store that's been opened in read only * mode. */ public void repair() throws IOException { // Init to known states. delete_head = -1; int scount = sectorCount(); int row_count = 0; int delete_count = 0; byte[] mark_buffer = new byte[5]; for (int i = 0; i < scount; ++i) { // Read the sector readSector(i); // Is it deleted? if ((sector_buffer[0] & DELETED) != 0) { // Add this row into the list of deleted rows. int v = delete_head; mark_buffer[0] = DELETED; mark_buffer[1] = (byte) ((v >>> 24) & 0xFF); mark_buffer[2] = (byte) ((v >>> 16) & 0xFF); mark_buffer[3] = (byte) ((v >>> 8) & 0xFF); mark_buffer[4] = (byte) ((v >>> 0) & 0xFF); seekSector(i); data_store.write(mark_buffer, 0, 5); if (sector_cache != null) { // Remove from cache sector_cache.remove(new Integer(i)); } delete_head = i; ++delete_count; } else { // Add to the used sector count ++row_count; } } // 'delete_head' should be set correctly now, // set 'used_sector_count' used_sector_count = row_count; // Sync the information with the file synch(); debug.write(Lvl.MESSAGE, this, "Repair found (" + delete_count + ") deleted, (" + row_count + ") used sectors."); } // ------- Diagnostics for the store ------- /** * Returns a string that contains diagnostic information. */ public String statusString() throws IOException { int sc = sectorCount(); StringBuffer str = new StringBuffer(); str.append("Sector Count: "); str.append(sc); str.append("\nSectors Used: "); str.append(getSectorUseCount()); str.append("\nLocks: "); str.append(lock_count); str.append("\nFree Sectors: "); str.append(sc - getSectorUseCount()); str.append("\n"); return new String(str); } // ---------- Inner classes ---------- /** * A buffered OutputStream object that writes all data written to the stream * out to the store. */ private class SectorOutputStream extends OutputStream { /** * The sector buffers. */ private final byte[] buf; /** * The first sector we wrote to. */ private int first_sector = -1; /** * The cur sector to use. */ private int cur_sector = -1; /** * The last sector we wrote to. */ private int last_sector = -1; /** * Current index in the buffer */ private int index; /** * Total bytes written. */ private int count; SectorOutputStream() throws IOException { buf = new byte[getSectorSize()]; index = 0; count = 0; first_sector = findFreeSector(); cur_sector = first_sector; } // ---------- Implemented from OutputStream ---------- public void write(int b) throws IOException { if (index >= buf.length) { // Flush to the next sector. int next_sector = findFreeSector(); if (next_sector == cur_sector) { // Nasty hack - if next_sector == cur_sector then we reclaiming // space from end of store, so increment by 1. next_sector = next_sector + 1; } // Write the buffer. writeBufToSector(cur_sector, next_sector, buf, 0, index); cur_sector = next_sector; index = 0; } buf[index] = (byte) b; ++index; ++count; } public void write(byte[] b, int offset, int len) throws IOException { while (index + len > buf.length) { // Copy int to_copy = buf.length - index; System.arraycopy(b, offset, buf, index, to_copy); offset += to_copy; len -= to_copy; index += to_copy; // Not really necessary - just gets set to 0 count += to_copy; int next_sector = findFreeSector(); if (next_sector == cur_sector) { // Nasty hack - if next_sector == cur_sector then we reclaiming // space from end of store, so increment by 1. next_sector = next_sector + 1; } writeBufToSector(cur_sector, next_sector, buf, 0, index); cur_sector = next_sector; index = 0; } if (len > 0) { System.arraycopy(b, offset, buf, index, len); index += len; count += len; } } public void flush() throws IOException { // Flush does nothing... } public void close() throws IOException { writeBufToSector(cur_sector, -1, buf, 0, index); } } /** * An input stream that reads information across a sector chain starting at * the given head sector. */ private final class SectorInputStream extends InputStream { /** * The current sector we are traversing. */ private int sector; /** * Current index in buf. */ private int index; /** * The number of bytes we have read. */ private int count; /** * A reference to the sector buffer. */ private byte[] sector_buffer; /** * Constructor. */ SectorInputStream(int sector_head) throws IOException { this.sector = sector_head; this.sector_buffer = FixedSizeDataStore.this.sector_buffer; // Load the first sector. loadNextSector(); count = 0; } /** * Loads the next sector in the chain into sector_buffer and sets index * to the start of the buffer. */ private void loadNextSector() throws IOException { if (sector != -1) { // Read contents into 'sector_buffer' readSector(sector); } index = EXTRA_SECTOR_SIZE; // The next sector sector = ByteArrayUtil.getInt(sector_buffer, 1); } // ---------- Implemented from InputStream ---------- public final int read() throws IOException { int b = ((int) sector_buffer[index]) & 0x0FF; ++index; ++count; if (index >= sector_size) { loadNextSector(); } return b; } public int read(byte[] b, int offset, int len) throws IOException { int original_len = len; while (index + len > sector_size) { // Copy int to_copy = sector_size - index; System.arraycopy(sector_buffer, index, b, offset, to_copy); offset += to_copy; len -= to_copy; index += to_copy; // Not really necessary - just gets set to 0 count += to_copy; // Load the next sector. loadNextSector(); } if (len > 0) { System.arraycopy(sector_buffer, index, b, offset, len); index += len; count += len; if (index >= sector_size) { loadNextSector(); } } return original_len; } public long skip(long len) throws IOException { long original_len = len; while (index + len > sector_size) { int to_copy = sector_size - index; len -= to_copy; index += to_copy; // Not really necessary - just gets set to 0 count += to_copy; // Load the next sector. loadNextSector(); } if (len > 0) { index += len; count += len; if (index >= sector_size) { loadNextSector(); } } return original_len; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Function.java000066400000000000000000000070121330501023400243450ustar00rootroot00000000000000/** * com.mckoi.database.Function 11 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.List; /** * Represents a function that is part of an expression to be evaluated. A * function evaluates to a resultant Object. If the parameters of a function * are not constant values, then the evaluation will require a lookup via a * VariableResolver or GroupResolver. The GroupResolver helps evaluate an * aggregate function. * * @author Tobias Downer */ public interface Function { /** * Returns the name of the function. The name is a unique identifier that * can be used to recreate this function. This identifier can be used to * easily serialize the function when grouped with its parameters. */ public String getName(); /** * Returns the list of Variable objects that this function uses as its * parameters. If this returns an empty list, then the function must * only have constant parameters. This information can be used to optimize * evaluation because if all the parameters of a function are constant then * we only need to evaluate the function once. */ public List allVariables(); /** * Returns the list of all element objects that this function uses as its * parameters. If this returns an empty list, then the function has no * input elements at all. ( something like: upper(user()) ) */ public List allElements(); /** * Returns true if this function is an aggregate function. An aggregate * function requires that the GroupResolver is not null when the evaluate * method is called. */ public boolean isAggregate(QueryContext context); /** * Prepares the exressions that are the parameters of this function. This * is intended to be used if we need to resolve aspects such as Variable * references. For example, a variable reference to 'number' may become * 'APP.Table.NUMBER'. */ public void prepareParameters(ExpressionPreparer preparer) throws DatabaseException; /** * Evaluates the function and returns a TObject that represents the result * of the function. The VariableResolver object should be used to look * up variables in the parameter of the function. The 'FunctionTable' * object should only be used when the function is a grouping function. For * example, 'avg(value_of)'. */ public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context); /** * The type of object this function returns. eg. TStringType, * TBooleanType, etc. The VariableResolver points to a dummy row that can * be used to dynamically determine the return type. For example, an * implementation of SQL 'GREATEST' would return the same type as the * list elements. */ public TType returnTType(VariableResolver resolver, QueryContext context); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FunctionDef.java000066400000000000000000000105441330501023400247700ustar00rootroot00000000000000/** * com.mckoi.database.FunctionDef 07 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A definition of a function including its name and parameters. A FunctionDef * can easily be transformed into a Function object via a set of * FunctionFactory instances. *

* NOTE: This object is NOT immutable or thread-safe. A FunctionDef should not * be shared among different threads. * * @author Tobias Downer */ public final class FunctionDef implements java.io.Serializable, Cloneable { static final long serialVersionUID = 3339781003247956829L; /** * The name of the function. */ private String name; /** * The list of parameters for the function. */ private Expression[] params; /** * A cached Function object that was generated when this FunctionDef was * looked up. Note that the Function object is transient. */ private transient Function cached_function; /** * Constructs the FunctionDef. */ public FunctionDef(String name, Expression[] params) { this.name = name; this.params = params; } /** * The name of the function. For example, 'MIN' or 'CONCAT'. */ public String getName() { return name; } /** * The list of parameters that are passed to the function. For example, * a concat function may have 7 parameters ('There', ' ', 'are', ' ', 10, * ' ', 'bottles.') */ public Expression[] getParameters() { return params; } /** * Returns true if this function is an aggregate, or the parameters are * aggregates. It requires a QueryContext object to lookup the function in * the function factory database. */ public boolean isAggregate(QueryContext context) { FunctionLookup fun_lookup = context.getFunctionLookup(); boolean is_aggregate = fun_lookup.isAggregate(this); if (is_aggregate) { return true; } // Look at params Expression[] params = getParameters(); for (int i = 0; i < params.length; ++i) { is_aggregate = params[i].hasAggregateFunction(context); if (is_aggregate) { return true; } } // No return false; } /** * Returns a Function object from this FunctionDef. Note that two calls to * this method will produce the same Function object, however the same * Function object will not be produced over multiple instances of * FunctionDef even when they represent the same thing. */ public Function getFunction(QueryContext context) { if (cached_function != null) { return cached_function; } else { FunctionLookup lookup = context.getFunctionLookup(); cached_function = lookup.generateFunction(this); if (cached_function == null) { throw new StatementException("Function '" + getName() + "' doesn't exist."); } return cached_function; } } /** * Performs a deep clone of this object. */ public Object clone() throws CloneNotSupportedException { FunctionDef v = (FunctionDef) super.clone(); // Deep clone the parameters Expression[] exps = (Expression[]) ((Expression[]) v.params).clone(); // Clone each element of the array for (int n = 0; n < exps.length; ++n) { exps[n] = (Expression) exps[n].clone(); } v.params = exps; v.cached_function = null; return v; } /** * Human understandable string, used for the column title. */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append(name); buf.append('('); for (int i = 0; i < params.length; ++i) { buf.append(params[i].text().toString()); if (i < params.length - 1) { buf.append(','); } } buf.append(')'); return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FunctionFactory.java000066400000000000000000000177561330501023400257150ustar00rootroot00000000000000/** * com.mckoi.database.FunctionFactory 12 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.Iterator; import java.util.HashMap; import java.util.Set; import java.text.*; /** * A factory that generates Function objects given a function name and a * set of expression's that represent parameters. A developer may create * their own instance of this class and register the factory with the * DatabaseSystem. When the SQL grammer comes across a function, it will * try and resolve the function name against the registered function * factories. * * @author Tobias Downer */ public abstract class FunctionFactory implements FunctionLookup { private static final Expression GLOB_EXPRESSION; static { GLOB_EXPRESSION = new Expression(); GLOB_EXPRESSION.addElement(TObject.stringVal("*")); GLOB_EXPRESSION.text().append("*"); } /** * Represents a function argument * for glob's such as 'count(*)' */ public static final Expression[] GLOB_LIST = new Expression[] { GLOB_EXPRESSION }; /** * The mapping of 'fun_name' to 'fun_class' for each function that's * registered with this factory. */ private HashMap fun_class_mapping; /** * Constructor arguments for the function. */ private Class[] construct_proto; /** * Constructs the FunctionFactory. */ public FunctionFactory() { fun_class_mapping = new HashMap(); // The is the prototype for the constructor when creating a new function. construct_proto = new Class[1]; Object exp_arr_ob = java.lang.reflect.Array.newInstance(new Expression().getClass(), 0); construct_proto[0] = exp_arr_ob.getClass(); } /** * Adds a new function to this factory. Takes a function name and a * class that is the Function implementation. When the 'generateFunction' * method is called, it looks up the class with the function name and * returns a new instance of the function. *

* @param fun_name the name of the function (eg. 'sum', 'concat'). * @param fun_class the Function class that we instantiate for this function. * @param fun_type that type of function (either FunctionInfo.STATIC, * FunctionInfo.AGGREGATE, FunctionInfo.STATE_BASED). */ protected void addFunction(String fun_name, Class fun_class, int fun_type) { try { String lf_name = fun_name.toLowerCase(); if (fun_class_mapping.get(lf_name) == null) { FF_FunctionInfo ff_info = new FF_FunctionInfo(fun_name, fun_type, fun_class.getConstructor(construct_proto)); fun_class_mapping.put(lf_name, ff_info); } else { throw new Error("Function '" + fun_name + "' already defined in factory."); } } catch (NoSuchMethodException e) { throw new RuntimeException(e.getMessage()); } } /** * Adds a new static function to this factory. */ protected void addFunction(String fun_name, Class fun_class) { addFunction(fun_name, fun_class, FunctionInfo.STATIC); } /** * Removes a static function from this factory. */ protected void removeFunction(String fun_name) { String lf_name = fun_name.toLowerCase(); if (fun_class_mapping.get(lf_name) != null) { fun_class_mapping.remove(fun_name.toLowerCase()); } else { throw new Error("Function '" + lf_name + "' is not defined in this factory."); } } /** * Returns true if the function name is defined in this factory. */ protected boolean functionDefined(String fun_name) { String lf_name = fun_name.toLowerCase(); return fun_class_mapping.get(lf_name) != null; } /** * Initializes this FunctionFactory. This is an abstract method that * needs to be implemented. (It doesn't need to do anything if a developer * implements their own version of 'generateFunction'). */ public abstract void init(); /** * Creates a Function object for the function with the given name with the * given arguments. If this factory does not handle a function with the * given name then it returns null. */ public Function generateFunction(FunctionDef function_def) { //String func_name, Expression[] params) { String func_name = function_def.getName(); Expression[] params = function_def.getParameters(); // This will lookup the function name (case insensitive) and if a // function class was registered, instantiates and returns it. FF_FunctionInfo ff_info = (FF_FunctionInfo) fun_class_mapping.get(func_name.toLowerCase()); if (ff_info == null) { // Function not handled by this factory so return null. return null; } else { Constructor fun_constructor = (Constructor) ff_info.getConstructor(); Object[] args = new Object[] { params }; try { return (Function) fun_constructor.newInstance(args); } catch (InvocationTargetException e) { throw new RuntimeException(e.getTargetException().getMessage()); } catch (Exception e) { throw new RuntimeException(e.getMessage()); } } } /** * Returns true if the function defined by FunctionDef is an aggregate * function, or false otherwise. */ public boolean isAggregate(FunctionDef function_def) { FunctionInfo f_info = getFunctionInfo(function_def.getName()); if (f_info == null) { // Function not handled by this factory so return false. return false; } else { return (f_info.getType() == FunctionInfo.AGGREGATE); } } /** * Returns a FunctionInfo instance of the function with the given name that * this FunctionFactory manages. If 'generateFunction' is reimplemented then * this method should be rewritten also. */ public FunctionInfo getFunctionInfo(String fun_name) { FF_FunctionInfo ff_info = (FF_FunctionInfo) fun_class_mapping.get(fun_name.toLowerCase()); return ff_info; } /** * Returns the list of all function names that this FunctionFactory manages. * This is used to compile information about the function factories. If * 'generateFunction' is reimplemented then this method should be rewritten * also. */ public FunctionInfo[] getAllFunctionInfo() { Set keys = fun_class_mapping.keySet(); int list_size = keys.size(); FunctionInfo[] list = new FunctionInfo[list_size]; Iterator i = keys.iterator(); int n = 0; while (i.hasNext()) { String fun_name = (String) i.next(); list[n] = getFunctionInfo(fun_name); ++n; } return list; } /** * An implementation of FunctionInfo. */ protected class FF_FunctionInfo implements FunctionInfo { private String name; private int type; private Constructor constructor; public FF_FunctionInfo(String name, int type, Constructor constructor) { this.name = name; this.type = type; this.constructor = constructor; } public String getName() { return name; } public int getType() { return type; } public Constructor getConstructor() { return constructor; } public String getFunctionFactoryName() { return FunctionFactory.this.getClass().toString(); } }; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FunctionInfo.java000066400000000000000000000046501330501023400251660ustar00rootroot00000000000000/** * com.mckoi.database.FunctionInfo 17 Aug 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Meta information about a function. Used to compile information about a * particular function. * * @author Tobias Downer */ public interface FunctionInfo { /** * A type that represents a static function. A static function is not * an aggregate therefore does not require a GroupResolver. The result of * a static function is guarenteed the same given identical parameters over * subsequent calls. */ public static final int STATIC = 1; /** * A type that represents an aggregate function. An aggregate function * requires the GroupResolver variable to be present in able to resolve the * function over some set. The result of an aggregate function is * guarenteed the same given the same set and identical parameters. */ public static final int AGGREGATE = 2; /** * A function that is non-aggregate but whose return value is not guarenteed * to be the same given the identical parameters over subsequent calls. This * would include functions such as RANDOM and UNIQUEKEY. The result is * dependant on some other state (a random seed and a sequence value). */ public static final int STATE_BASED = 3; /** * The name of the function as used by the SQL grammar to reference it. */ String getName(); /** * The type of function, either STATIC, AGGREGATE or STATE_BASED (eg. result * is not dependant entirely from input but from another state for example * RANDOM and UNIQUEKEY functions). */ int getType(); /** * The name of the function factory class that this function is handled by. * For example, "com.mckoi.database.InternalFunctionFactory". */ String getFunctionFactoryName(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FunctionLookup.java000066400000000000000000000027051330501023400255430ustar00rootroot00000000000000/** * com.mckoi.database.FunctionLookup 07 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An interface that resolves and generates a Function objects given a * FunctionDef object. * * @author Tobias Downer */ public interface FunctionLookup { /** * Generate the Function given a FunctionDef object. Returns null if the * FunctionDef can not be resolved to a valid function object. If the * specification of the function is invalid for some reason (the number or * type of the parameters is incorrect) then a StatementException is thrown. */ Function generateFunction(FunctionDef function_def); /** * Returns true if the function defined by FunctionDef is an aggregate * function, or false otherwise. */ boolean isAggregate(FunctionDef function_def); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/FunctionTable.java000066400000000000000000000631421330501023400253230ustar00rootroot00000000000000/** * com.mckoi.database.FunctionTable 12 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.Types; import com.mckoi.database.global.ByteLongObject; import com.mckoi.util.Cache; import com.mckoi.util.IntegerVector; import com.mckoi.debug.*; import com.mckoi.util.BigNumber; import java.util.Date; /** * A table that has a number of columns and as many rows as the refering * table. Tables of this type are used to construct aggregate and function * columns based on an expression. They are joined with the result table in * the last part of the query processing. *

* For example, a query like 'select id, id * 2, 8 * 9 from Part' the * columns 'id * 2' and '8 * 9' would be formed from this table. *

* SYNCHRONIZATION ISSUE: Instances of this object are NOT thread safe. The * reason it's not is because if 'getCellContents' is used concurrently it's * possible for the same value to be added into the cache causing an error. * It is not expected that this object will be shared between threads. * * @author Tobias Downer */ public class FunctionTable extends DefaultDataTable { /** * The key used to make distinct unique ids for FunctionTables. *

* NOTE: This is a thread-safe static mutable variable. */ private static int UNIQUE_KEY_SEQ = 0; /** * The table name given to all function tables. */ private static final TableName FUNCTION_TABLE_NAME = new TableName(null, "FUNCTIONTABLE"); /** * A unique id given to this FunctionTable when it is created. No two * FunctionTable objects may have the same number. This number is between * 0 and 260 million. */ private int unique_id; /** * The DataTableDef object that describes the columns in this function * table. */ private DataTableDef fun_table_def; /** * The table that this function table cross references. This is not a * parent table, but more like the table we will eventually be joined with. */ private Table cross_ref_table; /** * The TableVariableResolver for the table we are cross referencing. */ private TableVariableResolver cr_resolver; /** * The TableGroupResolver for the table. */ private TableGroupResolver group_resolver; /** * The list of expressions that are evaluated to form each column. */ private Expression[] exp_list; /** * Some information about the expression list. If the value is 0 then the * column is simple to solve and shouldn't be cached. */ private byte[] exp_info; /** * The lookup mapping for row->group_index used for grouping. */ private IntegerVector group_lookup; /** * The group row links. Iterate through this to find all the rows in a * group until bit 31 set. */ private IntegerVector group_links; /** * Whether the whole table is a group. */ private boolean whole_table_as_group = false; /** * If the whole table is a group, this is the grouping rows. This is * obtained via 'selectAll' of the reference table. */ private IntegerVector whole_table_group; /** * The total size of the whole table group size. */ private int whole_table_group_size; /** * If the whole table is a simple enumeration (row index is 0 to getRowCount) * then this is true. */ private boolean whole_table_is_simple_enum; /** * The context of this function table. */ private QueryContext context; /** * Constructs the FunctionTable. */ public FunctionTable(Table cross_ref_table, Expression[] in_exp_list, String[] col_names, DatabaseQueryContext context) { super(context.getDatabase()); // Make sure we are synchronized over the class. synchronized(FunctionTable.class) { unique_id = UNIQUE_KEY_SEQ; ++UNIQUE_KEY_SEQ; } unique_id = (unique_id & 0x0FFFFFFF) | 0x010000000; this.context = context; this.cross_ref_table = cross_ref_table; cr_resolver = cross_ref_table.getVariableResolver(); cr_resolver.setRow(0); // Create a DataTableDef object for this function table. fun_table_def = new DataTableDef(); fun_table_def.setTableName(FUNCTION_TABLE_NAME); exp_list = new Expression[in_exp_list.length]; exp_info = new byte[in_exp_list.length]; // Create a new DataTableColumnDef for each expression, and work out if the // expression is simple or not. for (int i = 0; i < in_exp_list.length; ++i) { Expression expr = in_exp_list[i]; // Examine the expression and determine if it is simple or not if (expr.isConstant() && !expr.hasAggregateFunction(context)) { // If expression is a constant, solve it TObject result = expr.evaluate(null, null, context); expr = new Expression(result); exp_list[i] = expr; exp_info[i] = 1; } else { // Otherwise must be dynamic exp_list[i] = expr; exp_info[i] = 0; } // Make the column def DataTableColumnDef column = new DataTableColumnDef(); column.setName(col_names[i]); column.setFromTType(expr.returnTType(cr_resolver, context)); fun_table_def.addVirtualColumn(column); } // Make sure the table def isn't changed from this point on. fun_table_def.setImmutable(); // Function tables are the size of the referring table. row_count = cross_ref_table.getRowCount(); // Set schemes to 'blind search'. blankSelectableSchemes(1); } public FunctionTable(Expression[] exp_list, String[] col_names, DatabaseQueryContext context) { this(context.getDatabase().getSingleRowTable(), exp_list, col_names, context); } /** * Return a TObject that represents the value of the 'column', 'row' of * this table. If 'cache' is not null then the resultant value is added to * the cache. If 'cache' is null, no caching happens. */ private TObject calcValue(int column, int row, DataCellCache cache) { cr_resolver.setRow(row); if (group_resolver != null) { group_resolver.setUpGroupForRow(row); } Expression expr = exp_list[column]; TObject cell = expr.evaluate(group_resolver, cr_resolver, context); if (cache != null) { cache.put(unique_id, row, column, cell); } return cell; } // ------ Public methods ------ /** * Sets the whole reference table as a single group. */ public void setWholeTableAsGroup() { whole_table_as_group = true; whole_table_group_size = getReferenceTable().getRowCount(); // Set up 'whole_table_group' to the list of all rows in the reference // table. RowEnumeration en = getReferenceTable().rowEnumeration(); whole_table_is_simple_enum = en instanceof SimpleRowEnumeration; if (!whole_table_is_simple_enum) { whole_table_group = new IntegerVector(getReferenceTable().getRowCount()); while (en.hasMoreRows()) { whole_table_group.addInt(en.nextRowIndex()); } } // Set up a group resolver for this method. group_resolver = new TableGroupResolver(); } /** * Creates a grouping matrix for the given tables. The grouping matrix * is arranged so that each row of the referring table that is in the * group is given a number that refers to the top group entry in the * group list. The group list is a linked integer list that chains through * each row item in the list. */ public void createGroupMatrix(Variable[] col_list) { // If we have zero rows, then don't bother creating the matrix. if (getRowCount() <= 0 || col_list.length <= 0) { return; } Table root_table = getReferenceTable(); int r_count = root_table.getRowCount(); int[] col_lookup = new int[col_list.length]; for (int i = col_list.length - 1; i >= 0; --i) { col_lookup[i] = root_table.findFieldName(col_list[i]); } IntegerVector row_list = root_table.orderedRowList(col_lookup); // 'row_list' now contains rows in this table sorted by the columns to // group by. // This algorithm will generate two lists. The group_lookup list maps // from rows in this table to the group number the row belongs in. The // group number can be used as an index to the 'group_links' list that // contains consequtive links to each row in the group until -1 is reached // indicating the end of the group; group_lookup = new IntegerVector(r_count); group_links = new IntegerVector(r_count); int current_group = 0; int previous_row = -1; for (int i = 0; i < r_count; ++i) { int row_index = row_list.intAt(i); if (previous_row != -1) { boolean equal = true; // Compare cell in column in this row with previous row. for (int n = 0; n < col_lookup.length && equal; ++n) { TObject c1 = root_table.getCellContents(col_lookup[n], row_index); TObject c2 = root_table.getCellContents(col_lookup[n], previous_row); equal = equal && (c1.compareTo(c2) == 0); } if (!equal) { // If end of group, set bit 15 group_links.addInt(previous_row | 0x040000000); current_group = group_links.size(); } else { group_links.addInt(previous_row); } } group_lookup.placeIntAt(current_group, row_index); // (val, pos) previous_row = row_index; } // Add the final row. group_links.addInt(previous_row | 0x040000000); // Set up a group resolver for this method. group_resolver = new TableGroupResolver(); } // ------ Methods intended for use by grouping functions ------ /** * Returns the Table this function is based on. We need to provide this * method for aggregate functions. */ public Table getReferenceTable() { return cross_ref_table; } /** * Returns the group of the row at the given index. */ public int rowGroup(int row_index) { return group_lookup.intAt(row_index); } /** * The size of the group with the given number. */ public int groupSize(int group_number) { int group_size = 1; int i = group_links.intAt(group_number); while ((i & 0x040000000) == 0) { ++group_size; ++group_number; i = group_links.intAt(group_number); } return group_size; } /** * Returns an IntegerVector that represents the list of all rows in the * group the index is at. */ public IntegerVector groupRows(int group_number) { IntegerVector ivec = new IntegerVector(); int i = group_links.intAt(group_number); while ((i & 0x040000000) == 0) { ivec.addInt(i); ++group_number; i = group_links.intAt(group_number); } ivec.addInt(i & 0x03FFFFFFF); return ivec; } /** * Returns a Table that is this function table merged with the cross * reference table. The result table includes only one row from each * group. *

* The 'max_column' argument is optional (can be null). If it's set to a * column in the reference table, then the row with the max value from the * group is used as the group row. For example, 'Part.id' will return the * row with the maximum part.id from each group. */ public Table mergeWithReference(Variable max_column) { Table table = getReferenceTable(); IntegerVector row_list; if (whole_table_as_group) { // Whole table is group, so take top entry of table. row_list = new IntegerVector(1); RowEnumeration row_enum = table.rowEnumeration(); if (row_enum.hasMoreRows()) { row_list.addInt(row_enum.nextRowIndex()); } else { // MAJOR HACK: If the referencing table has no elements then we choose // an arbitary index from the reference table to merge so we have // at least one element in the table. // This is to fix the 'SELECT COUNT(*) FROM empty_table' bug. row_list.addInt(Integer.MAX_VALUE - 1); } } else if (table.getRowCount() == 0) { row_list = new IntegerVector(0); } else if (group_links != null) { // If we are grouping, reduce down to only include one row from each // group. if (max_column == null) { row_list = topFromEachGroup(); } else { int col_num = getReferenceTable().findFieldName(max_column); row_list = maxFromEachGroup(col_num); } } else { // OPTIMIZATION: This should be optimized. It should be fairly trivial // to generate a Table implementation that efficiently merges this // function table with the reference table. // This means there is no grouping, so merge with entire table, int r_count = table.getRowCount(); row_list = new IntegerVector(r_count); RowEnumeration en = table.rowEnumeration(); while (en.hasMoreRows()) { row_list.addInt(en.nextRowIndex()); } } // Create a virtual table that's the new group table merged with the // functions in this... Table[] tabs = new Table[] { table, this }; IntegerVector[] row_sets = new IntegerVector[] { row_list, row_list }; VirtualTable out_table = new VirtualTable(tabs); out_table.set(tabs, row_sets); // Output this as debugging information if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, out_table + " = " + this + ".mergeWithReference(" + getReferenceTable() + ", " + max_column + " )"); } } table = out_table; return table; } // ------ Package protected methods ----- /** * Returns a list of rows that represent one row from each distinct group * in this table. This should be used to construct a virtual table of * rows from each distinct group. */ IntegerVector topFromEachGroup() { IntegerVector extract_rows = new IntegerVector(); int size = group_links.size(); boolean take = true; for (int i = 0; i < size; ++i) { int r = group_links.intAt(i); if (take) { extract_rows.addInt(r & 0x03FFFFFFF); } if ((r & 0x040000000) == 0) { take = false; } else { take = true; } } return extract_rows; } /** * Returns a list of rows that represent the maximum row of the given column * from each distinct group in this table. This should be used to construct * a virtual table of rows from each distinct group. */ IntegerVector maxFromEachGroup(int col_num) { final Table ref_tab = getReferenceTable(); IntegerVector extract_rows = new IntegerVector(); int size = group_links.size(); int to_take_in_group = -1; TObject max = null; boolean take = true; for (int i = 0; i < size; ++i) { int r = group_links.intAt(i); int act_r_index = r & 0x03FFFFFFF; TObject cell = ref_tab.getCellContents(col_num, act_r_index); if (max == null || cell.compareTo(max) > 0) { max = cell; to_take_in_group = act_r_index; } if ((r & 0x040000000) != 0) { extract_rows.addInt(to_take_in_group); max = null; } } return extract_rows; } // ------ Methods that are implemented for Table interface ------ /** * Returns the DataTableDef object that represents the columns in this * function table. */ public DataTableDef getDataTableDef() { return fun_table_def; } /** * Returns an object that represents the information in the given cell * in the table. This can be used to obtain information about the given * table cells. */ public TObject getCellContents(int column, int row) { // [ FUNCTION TABLE CACHING NOW USES THE GLOBAL CELL CACHING MECHANISM ] // Check if in the cache, DataCellCache cache = getDatabase().getDataCellCache(); // Is the column worth caching, and is caching enabled? if (exp_info[column] == 0 && cache != null) { TObject cell = cache.get(unique_id, row, column); if (cell != null) { // In the cache so return the cell. return cell; } else { // Not in the cache so calculate the value and put it in the cache. cell = calcValue(column, row, cache); return cell; } } else { // Caching is not enabled return calcValue(column, row, null); } } /** * Returns an Enumeration of the rows in this table. * Each call to 'nextRowIndex' returns the next valid row index in the table. */ public RowEnumeration rowEnumeration() { return new SimpleRowEnumeration(row_count); } /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ void addDataTableListener(DataTableListener listener) { // Add a data table listener to the reference table. // NOTE: This will cause the reference table to have the same listener // registered twice if the 'mergeWithReference' method is used. While // this isn't perfect behaviour, it means if 'mergeWithReference' isn't // used, we still will be notified of changes in the reference table // which will alter the values in this table. getReferenceTable().addDataTableListener(listener); } /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ void removeDataTableListener(DataTableListener listener) { // Removes a data table listener to the reference table. // ( see notes above... ) getReferenceTable().removeDataTableListener(listener); } /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). * NOTE: This is nothing to do with the 'LockingMechanism' object. */ public void lockRoot(int lock_key) { // We lock the reference table. // NOTE: This cause the reference table to lock twice when we use the // 'mergeWithReference' method. While this isn't perfect behaviour, it // means if 'mergeWithReference' isn't used, we still maintain a safe // level of locking. getReferenceTable().lockRoot(lock_key); } /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public void unlockRoot(int lock_key) { // We unlock the reference table. // NOTE: This cause the reference table to unlock twice when we use the // 'mergeWithReference' method. While this isn't perfect behaviour, it // means if 'mergeWithReference' isn't used, we still maintain a safe // level of locking. getReferenceTable().unlockRoot(lock_key); } /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public boolean hasRootsLocked() { return getReferenceTable().hasRootsLocked(); } // ---------- Convenience statics ---------- /** * Returns a FunctionTable that has a single Expression evaluated in it. * The column name is 'result'. */ public static Table resultTable(DatabaseQueryContext context, Expression expression) { Expression[] exp = new Expression[] { expression }; String[] names = new String[] { "result" }; Table function_table = new FunctionTable(exp, names, context); SubsetColumnTable result = new SubsetColumnTable(function_table); int[] map = new int[] { 0 }; Variable[] vars = new Variable[] { new Variable("result") }; result.setColumnMap(map, vars); return result; } /** * Returns a FunctionTable that has a single TObject in it. * The column title is 'result'. */ public static Table resultTable(DatabaseQueryContext context, TObject ob) { Expression result_exp = new Expression(); result_exp.addElement(ob); return resultTable(context, result_exp); } /** * Returns a FunctionTable that has a single Object in it. * The column title is 'result'. */ public static Table resultTable(DatabaseQueryContext context, Object ob) { return resultTable(context, TObject.objectVal(ob)); } /** * Returns a FunctionTable that has an int value made into a BigDecimal. * The column title is 'result'. */ public static Table resultTable(DatabaseQueryContext context, int result_val) { return resultTable(context, BigNumber.fromInt(result_val)); } // ---------- Inner classes ---------- /** * Group resolver. This is used to resolve group information in the * refering table. */ final class TableGroupResolver implements GroupResolver { /** * The IntegerVector that represents the group we are currently * processing. */ private IntegerVector group; // /** // * The group row index we are current set at. // */ // private int group_row_index; /** * The current group number. */ private int group_number = -1; /** * A VariableResolver that can resolve variables within a set of a group. */ private TableGVResolver tgv_resolver; /** * Creates a resolver that resolves variables within a set of the group. */ private TableGVResolver createVariableResolver() { if (tgv_resolver != null) { return tgv_resolver; } tgv_resolver = new TableGVResolver(); return tgv_resolver; } /** * Ensures that 'group' is set up. */ private void ensureGroup() { if (group == null) { if (group_number == -2) { group = whole_table_group; // // ISSUE: Unsafe calls if reference table is a DataTable. // group = new IntegerVector(getReferenceTable().getRowCount()); // RowEnumeration renum = getReferenceTable().rowEnumeration(); // while (renum.hasMoreRows()) { // group.addInt(renum.nextRowIndex()); // } } else { group = groupRows(group_number); } } } /** * Given a row index, this will setup the information in this resolver * to solve for this group. */ public void setUpGroupForRow(int row_index) { if (whole_table_as_group) { if (group_number != -2) { group_number = -2; group = null; } } else { int g = rowGroup(row_index); if (g != group_number) { group_number = g; group = null; } } } public int groupID() { return group_number; } public int size() { if (group_number == -2) { return whole_table_group_size; // return whole_table_group.size(); // // ISSUE: Unsafe call if reference table is a DataTable. // return getReferenceTable().getRowCount(); } else if (group != null) { return group.size(); } else { return groupSize(group_number); } } public TObject resolve(Variable variable, int set_index) { // String col_name = variable.getName(); int col_index = getReferenceTable().fastFindFieldName(variable); if (col_index == -1) { throw new Error("Can't find column: " + variable); } ensureGroup(); int row_index = set_index; if (group != null) { row_index = group.intAt(set_index); } TObject cell = getReferenceTable().getCellContents(col_index, row_index); return cell; } public VariableResolver getVariableResolver(int set_index) { TableGVResolver resolver = createVariableResolver(); resolver.setIndex(set_index); return resolver; } // ---------- Inner classes ---------- private class TableGVResolver implements VariableResolver { private int set_index; void setIndex(int set_index) { this.set_index = set_index; } // ---------- Implemented from VariableResolver ---------- public int setID() { throw new Error("setID not implemented here..."); } public TObject resolve(Variable variable) { return TableGroupResolver.this.resolve(variable, set_index); } public TType returnTType(Variable variable) { int col_index = getReferenceTable().fastFindFieldName(variable); if (col_index == -1) { throw new Error("Can't find column: " + variable); } return getReferenceTable().getDataTableDef().columnAt( col_index).getTType(); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTConnectionInfoDataSource.java000066400000000000000000000074371330501023400277140ustar00rootroot00000000000000/** * com.mckoi.database.GTConnectionInfoDataSource 23 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; /** * An implementation of MutableTableDataSource that presents the current * connection information. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ final class GTConnectionInfoDataSource extends GTDataSource { /** * The DatabaseConnection object that this is table is modelling the * information within. */ private DatabaseConnection database; /** * The list of info keys/values in this object. */ private ArrayList key_value_pairs; /** * Constructor. */ public GTConnectionInfoDataSource(DatabaseConnection connection) { super(connection.getSystem()); this.database = connection; this.key_value_pairs = new ArrayList(); } /** * Initialize the data source. */ public GTConnectionInfoDataSource init() { // Set up the connection info variables. key_value_pairs.add("auto_commit"); key_value_pairs.add(database.getAutoCommit() ? "true" : "false"); key_value_pairs.add("isolation_level"); key_value_pairs.add(database.getTransactionIsolationAsString()); key_value_pairs.add("user"); key_value_pairs.add(database.getUser().getUserName()); key_value_pairs.add("time_connection"); key_value_pairs.add(new java.sql.Timestamp( database.getUser().getTimeConnected()).toString()); key_value_pairs.add("connection_string"); key_value_pairs.add(database.getUser().getConnectionString()); key_value_pairs.add("current_schema"); key_value_pairs.add(database.getCurrentSchema()); key_value_pairs.add("case_insensitive_identifiers"); key_value_pairs.add(database.isInCaseInsensitiveMode() ? "true" : "false"); return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return key_value_pairs.size() / 2; } public TObject getCellContents(final int column, final int row) { switch (column) { case 0: // var return columnValue(column, (String) key_value_pairs.get(row * 2)); case 1: // value return columnValue(column, (String) key_value_pairs.get((row * 2) + 1)); default: throw new Error("Column out of bounds."); } } // ---------- Overwritten from GTDataSource ---------- public void dispose() { super.dispose(); key_value_pairs = null; database = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRConnectionInfo")); // Add column definitions def.addColumn(stringColumn("var")); def.addColumn(stringColumn("value")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTCurrentConnectionsDataSource.java000066400000000000000000000074571330501023400306300ustar00rootroot00000000000000/** * com.mckoi.database.GTCurrentConnectionsDataSource 23 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import java.util.Date; /** * An implementation of MutableTableDataSource that presents the current * list of connections on the database. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ final class GTCurrentConnectionsDataSource extends GTDataSource { /** * The DatabaseConnection object that this is table is modelling the * information within. */ private DatabaseConnection database; /** * The list of info keys/values in this object. */ private ArrayList key_value_pairs; /** * Constructor. */ public GTCurrentConnectionsDataSource(DatabaseConnection connection) { super(connection.getSystem()); this.database = connection; this.key_value_pairs = new ArrayList(); } /** * Initialize the data source. */ public GTCurrentConnectionsDataSource init() { UserManager user_manager = database.getDatabase().getUserManager(); // Synchronize over the user manager while we inspect the information, synchronized (user_manager) { for (int i = 0; i < user_manager.userCount(); ++i) { User user = user_manager.userAt(i); key_value_pairs.add(user.getUserName()); key_value_pairs.add(user.getConnectionString()); key_value_pairs.add(new Date(user.getLastCommandTime())); key_value_pairs.add(new Date(user.getTimeConnected())); } } return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return key_value_pairs.size() / 4; } public TObject getCellContents(final int column, final int row) { switch (column) { case 0: // username return columnValue(column, (String) key_value_pairs.get(row * 4)); case 1: // host_string return columnValue(column, (String) key_value_pairs.get((row * 4) + 1)); case 2: // last_command return columnValue(column, (Date) key_value_pairs.get((row * 4) + 2)); case 3: // time_connected return columnValue(column, (Date) key_value_pairs.get((row * 4) + 3)); default: throw new Error("Column out of bounds."); } } // ---------- Overwritten from GTDataSource ---------- public void dispose() { super.dispose(); key_value_pairs = null; database = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRCurrentConnections")); // Add column definitions def.addColumn(stringColumn("username")); def.addColumn(stringColumn("host_string")); def.addColumn(dateColumn("last_command")); def.addColumn(dateColumn("time_connected")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTDataSource.java000066400000000000000000000114121330501023400250440ustar00rootroot00000000000000/** * com.mckoi.database.GTDataSource 27 Apr 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.SQLTypes; /** * A base class for a dynamically generated data source. While this inherits * MutableTableDataSource (so we can make a DataTable out of it) a GTDataSource * derived class may not be mutable. For example, an implementation of this * class may produce a list of a columns in all tables. You would typically * not want a user to change this information unless they run a DML command. * * @author Tobias Downer */ abstract class GTDataSource implements MutableTableDataSource { /** * The TransactionSystem object for this table. */ private TransactionSystem system; /** * Constructor. */ public GTDataSource(TransactionSystem system) { this.system = system; } /** * Returns a TObject that represents a value for the given column in this * table. The Object must be of a compatible class to store in the type * of the column defined. */ protected TObject columnValue(int column, Object ob) { TType type = getDataTableDef().columnAt(column).getTType(); return new TObject(type, ob); } // ---------- Implemented from TableDataSource ---------- public TransactionSystem getSystem() { return system; } public abstract DataTableDef getDataTableDef(); public abstract int getRowCount(); public RowEnumeration rowEnumeration() { return new SimpleRowEnumeration(getRowCount()); } public SelectableScheme getColumnScheme(int column) { return new BlindSearch(this, column); } public abstract TObject getCellContents(final int column, final int row); // ---------- Implemented from MutableTableDataSource ---------- public int addRow(RowData row_data) { throw new RuntimeException("Functionality not available."); } public void removeRow(int row_index) { throw new RuntimeException("Functionality not available."); } public int updateRow(int row_index, RowData row_data) { throw new RuntimeException("Functionality not available."); } public MasterTableJournal getJournal() { throw new RuntimeException("Functionality not available."); } public void flushIndexChanges() { throw new RuntimeException("Functionality not available."); } public void constraintIntegrityCheck() { throw new RuntimeException("Functionality not available."); } public void dispose() { } public void addRootLock() { // No need to lock roots } public void removeRootLock() { // No need to lock roots } // ---------- Static ---------- /** * Convenience methods for constructing a DataTableDef for the dynamically * generated table. */ protected static DataTableColumnDef stringColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setNotNull(true); column.setSQLType(SQLTypes.VARCHAR); column.setSize(Integer.MAX_VALUE); column.setScale(-1); column.setIndexScheme("BlindSearch"); column.initTTypeInfo(); return column; } protected static DataTableColumnDef booleanColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setNotNull(true); column.setSQLType(SQLTypes.BIT); column.setSize(-1); column.setScale(-1); column.setIndexScheme("BlindSearch"); column.initTTypeInfo(); return column; } protected static DataTableColumnDef numericColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setNotNull(true); column.setSQLType(SQLTypes.NUMERIC); column.setSize(-1); column.setScale(-1); column.setIndexScheme("BlindSearch"); column.initTTypeInfo(); return column; } protected static DataTableColumnDef dateColumn(String name) { DataTableColumnDef column = new DataTableColumnDef(); column.setName(name); column.setNotNull(true); column.setSQLType(SQLTypes.TIMESTAMP); column.setSize(-1); column.setScale(-1); column.setIndexScheme("BlindSearch"); column.initTTypeInfo(); return column; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTPrivMapDataSource.java000066400000000000000000000076611330501023400263560ustar00rootroot00000000000000/** * com.mckoi.database.GTPrivMapDataSource 26 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.util.BigNumber; /** * A GTDataSource that maps a Privs 11-bit set to strings that represent the * priv in human understandable string. Each 11-bit priv set contains 12 * entries for each bit that was set. *

* This table provides a convenient way to join the system grant table and * 'expand' the privs that are allowed though it. * * @author Tobias Downer */ public class GTPrivMapDataSource extends GTDataSource { /** * Number of bits. */ private static int BIT_COUNT = Privileges.BIT_COUNT; /** * Constructor. */ public GTPrivMapDataSource(DatabaseConnection connection) { super(connection.getSystem()); } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return (1 << BIT_COUNT) * BIT_COUNT; } public TObject getCellContents(final int column, final int row) { int c1 = row / BIT_COUNT; if (column == 0) { return columnValue(column, BigNumber.fromInt(c1)); } else { int priv_bit = (1 << (row % BIT_COUNT)); String priv_string = null; if ((c1 & priv_bit) != 0) { priv_string = Privileges.formatPriv(priv_bit); } return columnValue(column, priv_string); } } // ---------- Overwritten from GTDataSource ---------- public SelectableScheme getColumnScheme(int column) { if (column == 0) { return new PrivMapSearch(this, column); } else { return new BlindSearch(this, column); } } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRPrivMap")); // Add column definitions def.addColumn(numericColumn("priv_bit")); def.addColumn( stringColumn("description")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } // ---------- Inner classes ---------- /** * A SelectableScheme that makes searching on the 'priv_bit' column a lot * less painless! */ private static final class PrivMapSearch extends CollatedBaseSearch { PrivMapSearch(TableDataSource table, int column) { super(table, column); } public SelectableScheme copy(TableDataSource table, boolean immutable) { // Return a fresh object. This implementation has no state so we can // ignore the 'immutable' flag. return new BlindSearch(table, getColumn()); } protected int searchFirst(TObject val) { if (val.isNull()) { return -1; } int num = ((BigNumber) val.getObject()).intValue(); if (num < 0) { return -1; } else if (num > (1 << BIT_COUNT)) { return -(((1 << BIT_COUNT) * BIT_COUNT) + 1); } return (num * BIT_COUNT); } protected int searchLast(TObject val) { int p = searchFirst(val); if (p >= 0) { return p + (BIT_COUNT - 1); } else { return p; } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTProductDataSource.java000066400000000000000000000060221330501023400264060ustar00rootroot00000000000000/** * com.mckoi.database.GTProductDataSource 23 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.database.global.StandardMessages; /** * An implementation of MutableTableDataSource that models information about * the software. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ final class GTProductDataSource extends GTDataSource { /** * The list of info keys/values in this object. */ private ArrayList key_value_pairs; /** * Constructor. */ public GTProductDataSource(Transaction transaction) { super(transaction.getSystem()); this.key_value_pairs = new ArrayList(); } /** * Initialize the data source. */ public GTProductDataSource init() { // Set up the product variables. key_value_pairs.add("name"); key_value_pairs.add(StandardMessages.NAME); key_value_pairs.add("version"); key_value_pairs.add(StandardMessages.VERSION); key_value_pairs.add("copyright"); key_value_pairs.add(StandardMessages.COPYRIGHT); return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return key_value_pairs.size() / 2; } public TObject getCellContents(final int column, final int row) { switch (column) { case 0: // var return columnValue(column, (String) key_value_pairs.get(row * 2)); case 1: // value return columnValue(column, (String) key_value_pairs.get((row * 2) + 1)); default: throw new Error("Column out of bounds."); } } // ---------- Overwritten from GTDataSource ---------- public void dispose() { super.dispose(); key_value_pairs = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRProductInfo")); // Add column definitions def.addColumn(stringColumn("var")); def.addColumn(stringColumn("value")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTSQLTypeInfoDataSource.java000066400000000000000000000165331330501023400271130ustar00rootroot00000000000000/** * com.mckoi.database.GTSQLTypeInfoDataSource 23 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.util.BigNumber; import com.mckoi.database.global.SQLTypes; /** * A GTDataSource that models all SQL types that are available. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ public class GTSQLTypeInfoDataSource extends GTDataSource { /** * The DatabaseConnection object. Currently this is not used, but it may * be needed in the future if user-defined SQL types are supported. */ private DatabaseConnection database; /** * The list of info keys/values in this object. */ private ArrayList key_value_pairs; /** * Constant for type_nullable types. */ private static final BigNumber TYPE_NULLABLE = BigNumber.fromInt(java.sql.DatabaseMetaData.typeNullable); /** * Constructor. */ public GTSQLTypeInfoDataSource(DatabaseConnection connection) { super(connection.getSystem()); this.database = connection; this.key_value_pairs = new ArrayList(); } /** * Adds a type description. */ private void addType(String name, int type, int precision, String prefix, String suffix, String oops, boolean searchable) { key_value_pairs.add(name); key_value_pairs.add(BigNumber.fromLong(type)); key_value_pairs.add(BigNumber.fromLong(precision)); key_value_pairs.add(prefix); key_value_pairs.add(suffix); key_value_pairs.add(searchable ? BigNumber.fromLong(3) : BigNumber.fromLong(0)); } /** * Initialize the data source. */ public GTSQLTypeInfoDataSource init() { addType("BIT", SQLTypes.BIT, 1, null, null, null, true); addType("BOOLEAN", SQLTypes.BIT, 1, null, null, null, true); addType("TINYINT", SQLTypes.TINYINT, 9, null, null, null, true); addType("SMALLINT", SQLTypes.SMALLINT, 9, null, null, null, true); addType("INTEGER", SQLTypes.INTEGER, 9, null, null, null, true); addType("BIGINT", SQLTypes.BIGINT, 9, null, null, null, true); addType("FLOAT", SQLTypes.FLOAT, 9, null, null, null, true); addType("REAL", SQLTypes.REAL, 9, null, null, null, true); addType("DOUBLE", SQLTypes.DOUBLE, 9, null, null, null, true); addType("NUMERIC", SQLTypes.NUMERIC, 9, null, null, null, true); addType("DECIMAL", SQLTypes.DECIMAL, 9, null, null, null, true); addType("CHAR", SQLTypes.CHAR, 9, "'", "'", null, true); addType("VARCHAR", SQLTypes.VARCHAR, 9, "'", "'", null, true); addType("LONGVARCHAR", SQLTypes.LONGVARCHAR, 9, "'", "'", null, true); addType("DATE", SQLTypes.DATE, 9, null, null, null, true); addType("TIME", SQLTypes.TIME, 9, null, null, null, true); addType("TIMESTAMP", SQLTypes.TIMESTAMP, 9, null, null, null, true); addType("BINARY", SQLTypes.BINARY, 9, null, null, null, false); addType("VARBINARY", SQLTypes.VARBINARY, 9, null, null, null, false); addType("LONGVARBINARY", SQLTypes.LONGVARBINARY, 9, null, null, null, false); addType("JAVA_OBJECT", SQLTypes.JAVA_OBJECT, 9, null, null, null, false); return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return key_value_pairs.size() / 6; } public TObject getCellContents(final int column, final int row) { int i = (row * 6); switch (column) { case 0: // type_name return columnValue(column, (String) key_value_pairs.get(i)); case 1: // data_type return columnValue(column, (BigNumber) key_value_pairs.get(i + 1)); case 2: // precision return columnValue(column, (BigNumber) key_value_pairs.get(i + 2)); case 3: // literal_prefix return columnValue(column, (String) key_value_pairs.get(i + 3)); case 4: // literal_suffix return columnValue(column, (String) key_value_pairs.get(i + 4)); case 5: // create_params return columnValue(column, null); case 6: // nullable return columnValue(column, TYPE_NULLABLE); case 7: // case_sensitive return columnValue(column, Boolean.TRUE); case 8: // searchable return columnValue(column, (BigNumber) key_value_pairs.get(i + 5)); case 9: // unsigned_attribute return columnValue(column, Boolean.FALSE); case 10: // fixed_prec_scale return columnValue(column, Boolean.FALSE); case 11: // auto_increment return columnValue(column, Boolean.FALSE); case 12: // local_type_name return columnValue(column, null); case 13: // minimum_scale return columnValue(column, BigNumber.fromLong(0)); case 14: // maximum_scale return columnValue(column, BigNumber.fromLong(10000000)); case 15: // sql_data_type return columnValue(column, null); case 16: // sql_datetype_sub return columnValue(column, null); case 17: // num_prec_radix return columnValue(column, BigNumber.fromLong(10)); default: throw new Error("Column out of bounds."); } } // ---------- Overwritten from GTDataSource ---------- public void dispose() { super.dispose(); key_value_pairs = null; database = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRSQLTypeInfo")); // Add column definitions def.addColumn( stringColumn("TYPE_NAME")); def.addColumn(numericColumn("DATA_TYPE")); def.addColumn(numericColumn("PRECISION")); def.addColumn( stringColumn("LITERAL_PREFIX")); def.addColumn( stringColumn("LITERAL_SUFFIX")); def.addColumn( stringColumn("CREATE_PARAMS")); def.addColumn(numericColumn("NULLABLE")); def.addColumn(booleanColumn("CASE_SENSITIVE")); def.addColumn(numericColumn("SEARCHABLE")); def.addColumn(booleanColumn("UNSIGNED_ATTRIBUTE")); def.addColumn(booleanColumn("FIXED_PREC_SCALE")); def.addColumn(booleanColumn("AUTO_INCREMENT")); def.addColumn( stringColumn("LOCAL_TYPE_NAME")); def.addColumn(numericColumn("MINIMUM_SCALE")); def.addColumn(numericColumn("MAXIMUM_SCALE")); def.addColumn( stringColumn("SQL_DATA_TYPE")); def.addColumn( stringColumn("SQL_DATETIME_SUB")); def.addColumn(numericColumn("NUM_PREC_RADIX")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTStatisticsDataSource.java000066400000000000000000000066401330501023400271260ustar00rootroot00000000000000/** * com.mckoi.database.GTStatisticsDataSource 28 Apr 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.Stats; /** * An implementation of MutableTableDataSource that presents database * statistical information. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ final class GTStatisticsDataSource extends GTDataSource { /** * Contains all the statistics information for this session. */ private String[] statistics_info; /** * The system database stats. */ private Stats stats; /** * Constructor. */ public GTStatisticsDataSource(DatabaseConnection connection) { super(connection.getSystem()); stats = connection.getDatabase().stats(); } /** * Initialize the data source. */ public GTStatisticsDataSource init() { synchronized (stats) { stats.set((int) (Runtime.getRuntime().freeMemory() / 1024), "Runtime.memory.freeKB"); stats.set((int) (Runtime.getRuntime().totalMemory() / 1024), "Runtime.memory.totalKB"); String[] key_set = stats.keyList(); int glob_length = key_set.length * 2; statistics_info = new String[glob_length]; for (int i = 0; i < glob_length; i += 2) { String key_name = key_set[i / 2]; statistics_info[i] = key_name; statistics_info[i + 1] = stats.statString(key_name); } } return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return statistics_info.length / 2; } public TObject getCellContents(final int column, final int row) { switch (column) { case 0: // stat_name return columnValue(column, statistics_info[row * 2]); case 1: // value return columnValue(column, statistics_info[(row * 2) + 1]); default: throw new Error("Column out of bounds."); } } // ---------- Overwritten from GTDataSource ---------- public void dispose() { super.dispose(); statistics_info = null; stats = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRDatabaseStatistics")); // Add column definitions def.addColumn(stringColumn("stat_name")); def.addColumn(stringColumn("value")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTTableColumnsDataSource.java000066400000000000000000000120331330501023400273550ustar00rootroot00000000000000/** * com.mckoi.database.GTTableColumnsDataSource 27 Apr 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.BigNumber; import com.mckoi.database.global.SQLTypes; /** * An implementation of MutableTableDataSource that presents information * about the columns of all tables in all schema. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ final class GTTableColumnsDataSource extends GTDataSource { /** * The transaction that is the view of this information. */ private Transaction transaction; /** * The list of all DataTableDef visible to the transaction. */ private DataTableDef[] visible_tables; /** * The number of rows in this table. */ private int row_count; /** * Constructor. */ public GTTableColumnsDataSource(Transaction transaction) { super(transaction.getSystem()); this.transaction = transaction; } /** * Initialize the data source. */ public GTTableColumnsDataSource init() { // All the tables TableName[] list = transaction.getTableList(); visible_tables = new DataTableDef[list.length]; row_count = 0; for (int i = 0; i < list.length; ++i) { DataTableDef def = transaction.getDataTableDef(list[i]); row_count += def.columnCount(); visible_tables[i] = def; } return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return row_count; } public TObject getCellContents(final int column, final int row) { final int sz = visible_tables.length; int rs = 0; for (int n = 0; n < sz; ++n) { final DataTableDef def = visible_tables[n]; final int b = rs; rs += def.columnCount(); if (row >= b && row < rs) { // This is the column that was requested, int seq_no = row - b; DataTableColumnDef col_def = def.columnAt(seq_no); switch (column) { case 0: // schema return columnValue(column, def.getSchema()); case 1: // table return columnValue(column, def.getName()); case 2: // column return columnValue(column, col_def.getName()); case 3: // sql_type return columnValue(column, BigNumber.fromLong(col_def.getSQLType())); case 4: // type_desc return columnValue(column, col_def.getSQLTypeString()); case 5: // size return columnValue(column, BigNumber.fromLong(col_def.getSize())); case 6: // scale return columnValue(column, BigNumber.fromLong(col_def.getScale())); case 7: // not_null return columnValue(column, new Boolean(col_def.isNotNull())); case 8: // default return columnValue(column, col_def.getDefaultExpressionString()); case 9: // index_str return columnValue(column, col_def.getIndexScheme()); case 10: // seq_no return columnValue(column, BigNumber.fromLong(seq_no)); default: throw new Error("Column out of bounds."); } } } // for each visible table throw new Error("Row out of bounds."); } // ---------- Overwritten ---------- public void dispose() { super.dispose(); visible_tables = null; transaction = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName( new TableName(Database.SYSTEM_SCHEMA, "sUSRTableColumns")); // Add column definitions def.addColumn(stringColumn("schema")); def.addColumn(stringColumn("table")); def.addColumn(stringColumn("column")); def.addColumn(numericColumn("sql_type")); def.addColumn(stringColumn("type_desc")); def.addColumn(numericColumn("size")); def.addColumn(numericColumn("scale")); def.addColumn(booleanColumn("not_null")); def.addColumn(stringColumn("default")); def.addColumn(stringColumn("index_str")); def.addColumn(numericColumn("seq_no")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GTTableInfoDataSource.java000066400000000000000000000074151330501023400266400ustar00rootroot00000000000000/** * com.mckoi.database.GTTableInfoDataSource 27 Apr 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.Arrays; /** * An implementation of MutableTableDataSource that presents information * about the tables in all schema. *

* NOTE: This is not designed to be a long kept object. It must not last * beyond the lifetime of a transaction. * * @author Tobias Downer */ final class GTTableInfoDataSource extends GTDataSource { /** * The transaction that is the view of this information. */ private Transaction transaction; /** * The list of all TableName visible to the transaction. */ private TableName[] table_list; /** * The list of all table types that are visible. */ private String[] table_types; /** * The number of rows in this table. */ private int row_count; /** * Constructor. */ public GTTableInfoDataSource(Transaction transaction) { super(transaction.getSystem()); this.transaction = transaction; } /** * Initialize the data source. */ public GTTableInfoDataSource init() { // All the tables table_list = transaction.getTableList(); Arrays.sort(table_list); table_types = new String[table_list.length]; row_count = table_list.length; for (int i = 0; i < table_list.length; ++i) { String cur_type = transaction.getTableType(table_list[i]); // If the table is in the SYS_INFO schema, the type is defined as a // SYSTEM TABLE. if (cur_type.equals("TABLE") && table_list[i].getSchema().equals("SYS_INFO")) { cur_type = "SYSTEM TABLE"; } table_types[i] = cur_type; } return this; } // ---------- Implemented from GTDataSource ---------- public DataTableDef getDataTableDef() { return DEF_DATA_TABLE_DEF; } public int getRowCount() { return row_count; } public TObject getCellContents(final int column, final int row) { final TableName tname = table_list[row]; switch (column) { case 0: // schema return columnValue(column, tname.getSchema()); case 1: // name return columnValue(column, tname.getName()); case 2: // type return columnValue(column, table_types[row]); case 3: // other // Table notes, etc. (future enhancement) return columnValue(column, ""); default: throw new Error("Column out of bounds."); } } // ---------- Overwritten from GTDataSource ---------- public void dispose() { super.dispose(); table_list = null; transaction = null; } // ---------- Static ---------- /** * The data table def that describes this table of data source. */ static final DataTableDef DEF_DATA_TABLE_DEF; static { DataTableDef def = new DataTableDef(); def.setTableName(new TableName(Database.SYSTEM_SCHEMA, "sUSRTableInfo")); // Add column definitions def.addColumn(stringColumn("schema")); def.addColumn(stringColumn("name")); def.addColumn(stringColumn("type")); def.addColumn(stringColumn("other")); // Set to immutable def.setImmutable(); DEF_DATA_TABLE_DEF = def; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GrantManager.java000066400000000000000000000465441330501023400251430ustar00rootroot00000000000000/** * com.mckoi.database.GrantManager 23 Aug 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.sql.*; import com.mckoi.util.IntegerVector; import com.mckoi.util.BigNumber; import com.mckoi.util.Cache; /** * A class that manages the grants on a database for a given database * connection and user. * * @author Tobias Downer */ public class GrantManager { // ---------- Statics ---------- /** * Represents a TABLE object to grant privs over for the user. */ public final static int TABLE = 1; /** * Represents a DOMAIN object to grant privs over for the user. */ public final static int DOMAIN = 2; // /** // * Represents a STORED PROCEDURE object to grant privs over for this user. // */ // public final static int STORED_PROCEDURE = 16; // // /** // * Represents a TRIGGER object to grant privs over for this user. // */ // public final static int TRIGGER = 17; // // /** // * Represents a custom SEQUENCE GENERATOR object to grant privs over. // */ // public final static int SEQUENCE_GENERATOR = 18; /** * Represents a SCHEMA object to grant privs over for the user. */ public final static int SCHEMA = 65; /** * Represents a CATALOG object to grant privs over for this user. */ public final static int CATALOG = 66; /** * The string representing the public user (privs granted to all users). */ public final static String PUBLIC_USERNAME_STR = "@PUBLIC"; /** * The name of the 'public' username. If a grant is made on 'public' then * all users are given the grant. */ public final static TObject PUBLIC_USERNAME = TObject.stringVal(PUBLIC_USERNAME_STR); // ---------- Members ---------- /** * The DatabaseConnection instance. */ private DatabaseConnection connection; /** * The QueryContext instance. */ private QueryContext context; /** * A cache of privileges for the various tables in the database. This cache * is populated as the user 'visits' a table. */ private Cache priv_cache; /** * Set to true if the grant table is modified in this manager. */ private boolean grant_table_changed; /** * Constructs the GrantManager. * Should only be constructed from DatabaseConnection. */ GrantManager(DatabaseConnection connection) { this.connection = connection; this.context = new DatabaseQueryContext(connection); this.priv_cache = new Cache(129, 129, 20); this.grant_table_changed = false; // Attach a cache backed on the GRANTS table which will invalidate the // connection cache whenever the grant table is modified. connection.attachTableBackedCache(new TableBackedCache(Database.SYS_GRANTS) { public void purgeCacheOfInvalidatedEntries( IntegerVector added_rows, IntegerVector removed_rows) { // If there were changed then invalidate the cache if (grant_table_changed) { invalidateGrantCache(); grant_table_changed = false; } // Otherwise, if there were committed added or removed changes also // invalidate the cache, else if ((added_rows != null && added_rows.size() > 0) || (removed_rows != null && removed_rows.size() > 0)) { invalidateGrantCache(); } } }); } // ---------- Private priv caching methods ---------- /** * Flushes any grant information that's being cached. */ private void invalidateGrantCache() { priv_cache.removeAll(); } /** * Inner class that represents a grant query on a particular object, param * and user name. *

* This object is designed to be an immutable key in a cache. */ private static class GrantQuery { private int object; private String param; private String username; private int flags; GrantQuery(int object, String param, String username, boolean flag1, boolean flag2) { this.object = object; this.param = param; this.username = username; this.flags = flag1 ? 1 : 0; this.flags = this.flags | (flag2 ? 2 : 0); } public boolean equals(Object ob) { GrantQuery dest = (GrantQuery) ob; return (object == dest.object && param.equals(dest.param) && username.equals(dest.username) && flags == dest.flags); } public int hashCode() { return object + param.hashCode() + username.hashCode() + flags; } } private Privileges getPrivs(int object, String param, String username, boolean only_grant_options, String granter, boolean include_public_privs) throws DatabaseException { // Create the grant query key GrantQuery key = new GrantQuery(object, param, username, only_grant_options, include_public_privs); // Is the Privileges object for this query already in the cache? Privileges privs = (Privileges) priv_cache.get(key); if (privs == null) { // Not in cache so we need to ask database for the information. // try { // Connection c = connection.getJDBCConnection(); // PreparedStatement stmt = c.prepareStatement( // " SELECT \"priv\" FROM \"SYS_INFO.sUSRGrantPriv\" " + // " WHERE \"grant_id\" IN " + // " ( SELECT \"id\" FROM \"SYS_INFO.sUSRGrant\" " + // " WHERE \"param\" = ? " + // " AND \"object\" = ? " + // " AND (\"grantor\" = ? OR (? AND \"grantor\" = '@PUBLIC')) " + // " AND (? OR \"grant_option\" = 'true') " + // " AND (? OR \"granter\" = ?) " + // " )"); // stmt.setString(1, param); // stmt.setInt(2, object); // stmt.setString(3, username); // stmt.setBoolean(4, include_public_privs); // stmt.setBoolean(5, !only_grant_options); // stmt.setBoolean(6, (granter == null)); // stmt.setString(7, granter); // ResultSet rs = stmt.executeQuery(); // privs = Privileges.fromResultSet(rs); // rs.close(); // stmt.close(); // c.close(); // } // catch (SQLException e) { // connection.Debug().writeException(e); // throw new DatabaseException("SQL Error: " + e.getMessage()); // } // The system grants table. DataTable grant_table = connection.getTable(Database.SYS_GRANTS); Variable object_col = grant_table.getResolvedVariable(1); Variable param_col = grant_table.getResolvedVariable(2); Variable grantee_col = grant_table.getResolvedVariable(3); Variable grant_option_col = grant_table.getResolvedVariable(4); Variable granter_col = grant_table.getResolvedVariable(5); Operator EQUALS = Operator.get("="); Table t1 = grant_table; // All that match the given object parameter // It's most likely this will reduce the search by the most so we do // it first. t1 = t1.simpleSelect(context, param_col, EQUALS, new Expression(TObject.stringVal(param))); // The next is a single exhaustive select through the remaining records. // It finds all grants that match either public or the grantee is the // username, and that match the object type. // Expression: ("grantee_col" = username OR "grantee_col" = 'public') Expression user_check = Expression.simple(grantee_col, EQUALS, TObject.stringVal(username)); if (include_public_privs) { user_check = new Expression( user_check, Operator.get("or"), Expression.simple(grantee_col, EQUALS, PUBLIC_USERNAME) ); } // Expression: ("object_col" = object AND // ("grantee_col" = username OR "grantee_col" = 'public')) // All that match the given username or public and given object Expression expr = new Expression( Expression.simple(object_col, EQUALS, TObject.intVal(object)), Operator.get("and"), user_check); // Are we only searching for grant options? if (only_grant_options) { Expression grant_option_check = Expression.simple(grant_option_col, EQUALS, TObject.stringVal("true")); expr = new Expression(expr, Operator.get("and"), grant_option_check); } // Do we need to check for a granter when we looking for privs? if (granter != null) { Expression granter_check = Expression.simple(granter_col, EQUALS, TObject.stringVal(granter)); expr = new Expression(expr, Operator.get("and"), granter_check); } t1 = t1.exhaustiveSelect(context, expr); // For each grant, merge with the resultant priv object privs = Privileges.EMPTY_PRIVS; RowEnumeration e = t1.rowEnumeration(); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); BigNumber priv_bit = (BigNumber) t1.getCellContents(0, row_index).getObject(); privs = privs.add(priv_bit.intValue()); } // Put the privs object in the cache priv_cache.put(key, privs); } return privs; } /** * Internal method that sets the privs for the given object, param, grantee, * grant option and granter. This first revokes any grants that have been * setup for the object, and adds a new record with the new grants. */ private void internalSetPrivs(Privileges new_privs, int object, String param, String grantee, boolean grant_option, String granter) throws DatabaseException { // Revoke existing privs on this object for this grantee revokeAllGrantsOnObject(object, param, grantee, grant_option, granter); if (!new_privs.isEmpty()) { // The system grants table. DataTable grant_table = connection.getTable(Database.SYS_GRANTS); // Add the grant to the grants table. RowData rdat = new RowData(grant_table); rdat.setColumnDataFromObject(0, BigNumber.fromInt(new_privs.toInt())); rdat.setColumnDataFromObject(1, BigNumber.fromInt(object)); rdat.setColumnDataFromObject(2, param); rdat.setColumnDataFromObject(3, grantee); rdat.setColumnDataFromObject(4, grant_option ? "true" : "false"); rdat.setColumnDataFromObject(5, granter); grant_table.add(rdat); // Invalidate the privilege cache invalidateGrantCache(); // Notify that the grant table has changed. grant_table_changed = true; } } // ---------- Public methods ---------- /** * Adds a grant on the given database object. * * @param privs the privileges to grant. * @param object the object to grant (TABLE, DOMAIN, etc) * @param param the parameter of the object (eg. the table name) * @param grantee the user name to grant the privs to. * @param grant_option if true, allows the user to pass grants to other * users. * @param granter the user granting. */ public void addGrant(Privileges privs, int object, String param, String grantee, boolean grant_option, String granter) throws DatabaseException { if (object == TABLE) { // Check that the table exists, if (!connection.tableExists(TableName.resolve(param))) { throw new DatabaseException("Table: " + param + " does not exist."); } } else if (object == SCHEMA) { // Check that the schema exists. if (!connection.schemaExists(param)) { throw new DatabaseException("Schema: " + param + " does not exist."); } } // Get any existing grants on this object to this grantee Privileges existing_privs = getPrivs(object, param, grantee, grant_option, granter, false); // Merge the existing privs with the new privs being added. Privileges new_privs = privs.merge(existing_privs); // If the new_privs are the same as the existing privs, don't bother // changing anything. if (!new_privs.equals(existing_privs)) { internalSetPrivs(new_privs, object, param, grantee, grant_option, granter); } } /** * For all tables in the given schema, this adds the given grant for each * of the tables. */ public void addGrantToAllTablesInSchema(String schema, Privileges privs, String grantee, boolean grant_option, String granter) throws DatabaseException { // The list of all tables TableName[] list = connection.getTableList(); for (int i = 0; i < list.length; ++i) { TableName tname = list[i]; // If the table is in the given schema, if (tname.getSchema().equals(schema)) { addGrant(privs, TABLE, tname.toString(), grantee, grant_option, granter); } } } /** * Removes a grant on the given object for the given grantee, grant option * and granter. */ public void removeGrant(Privileges privs, int object, String param, String grantee, boolean grant_option, String granter) throws DatabaseException { // Get any existing grants on this object to this grantee Privileges existing_privs = getPrivs(object, param, grantee, grant_option, granter, false); // Remove privs from the the existing privs. Privileges new_privs = existing_privs.remove(privs); // If the new_privs are the same as the existing privs, don't bother // changing anything. if (!new_privs.equals(existing_privs)) { internalSetPrivs(new_privs, object, param, grantee, grant_option, granter); } } /** * Removes all privs granted on the given object for the given grantee with * the given grant option. */ public void revokeAllGrantsOnObject(int object, String param, String grantee, boolean grant_option, String granter) throws DatabaseException { // The system grants table. DataTable grant_table = connection.getTable(Database.SYS_GRANTS); Variable object_col = grant_table.getResolvedVariable(1); Variable param_col = grant_table.getResolvedVariable(2); Variable grantee_col = grant_table.getResolvedVariable(3); Variable grant_option_col = grant_table.getResolvedVariable(4); Variable granter_col = grant_table.getResolvedVariable(5); Operator EQUALS = Operator.get("="); Table t1 = grant_table; // All that match the given object parameter // It's most likely this will reduce the search by the most so we do // it first. t1 = t1.simpleSelect(context, param_col, EQUALS, new Expression(TObject.stringVal(param))); // The next is a single exhaustive select through the remaining records. // It finds all grants that match either public or the grantee is the // username, and that match the object type. // Expression: ("grantee_col" = username) Expression user_check = Expression.simple(grantee_col, EQUALS, TObject.stringVal(grantee)); // Expression: ("object_col" = object AND // "grantee_col" = username) // All that match the given username or public and given object Expression expr = new Expression( Expression.simple(object_col, EQUALS, TObject.intVal(object)), Operator.get("and"), user_check); // Are we only searching for grant options? Expression grant_option_check = Expression.simple(grant_option_col, EQUALS, TObject.stringVal(grant_option ? "true" : "false")); expr = new Expression(expr, Operator.get("and"), grant_option_check); // Make sure the granter matches up also Expression granter_check = Expression.simple(granter_col, EQUALS, TObject.stringVal(granter)); expr = new Expression(expr, Operator.get("and"), granter_check); t1 = t1.exhaustiveSelect(context, expr); // Remove these rows from the table grant_table.delete(t1); // Invalidate the privilege cache invalidateGrantCache(); // Notify that the grant table has changed. grant_table_changed = true; } /** * Completely removes all privs granted on the given object for all users. * This would typically be used when the object is dropped from the database. */ public void revokeAllGrantsOnObject(int object, String param) throws DatabaseException { // The system grants table. DataTable grant_table = connection.getTable(Database.SYS_GRANTS); Variable object_col = grant_table.getResolvedVariable(1); Variable param_col = grant_table.getResolvedVariable(2); // All that match the given object Table t1 = grant_table.simpleSelect(context, object_col, Operator.get("="), new Expression(TObject.intVal(object))); // All that match the given parameter t1 = t1.simpleSelect(context, param_col, Operator.get("="), new Expression(TObject.stringVal(param))); // Remove these rows from the table grant_table.delete(t1); // Invalidate the privilege cache invalidateGrantCache(); // Notify that the grant table has changed. grant_table_changed = true; } /** * Returns all Privileges for the given object for the given grantee (user). * This would be used to determine the access a user has to a table. *

* Note that the Privileges object includes all the grants on the object given * to PUBLIC also. *

* This method will concatenate multiple privs granted on the same * object. *

* PERFORMANCE: This method is called a lot (at least once on every query). */ public Privileges userGrants(int object, String param, String username) throws DatabaseException { return getPrivs(object, param, username, false, null, true); } /** * Returns all Privileges for the given object for the given grantee (user) * that the user is allowed to give grant options for. This would be used to * determine if a user has privs to give another user grants on an object. *

* Note that the Privileges object includes all the grants on the object given * to PUBLIC also. *

* This method will concatenate multiple grant options given on the same * object to the user. */ public Privileges userGrantOptions(int object, String param, String username) throws DatabaseException { return getPrivs(object, param, username, true, null, true); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GroupHelper.java000066400000000000000000000102171330501023400250150ustar00rootroot00000000000000/** * com.mckoi.database.GroupHelper 25 Jun 1999 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import java.util.ArrayList; import java.io.IOException; /** * This is a static class that provides the functionality for seperating a * table into distinct groups. This class is used in the * 'Table.group(String[] columns)' method. *

* @author Tobias Downer * @deprecated don't use this anymore */ final class GroupHelper { // /** // * The sorted table we are grouping. // */ // private Table table; // // /** // * The table enumerator. // */ // private RowEnumeration row_enum; // // /** // * The index of the last column we are grouping on. // */ // private int last_col_index; // // /** // * The last row index. // */ // private int last_row_index; // // /** // * The columns we are grouping over. // */ // private int[] columns; /** * Constructs the helper. */ GroupHelper(Table table, String[] col_names) { throw new Error("Not used anymore"); // // Optimisation, pre-resolve the columns into indices // columns = new int[col_names.length]; // for (int i = 0; i < columns.length; ++i) { // int ci = table.findFieldName(col_names[i]); // if (ci == -1) { // throw new Error("Unknown field name in group ( " + // col_names[i] + " )"); // } // columns[i] = ci; // } // // // Sort the tables by the column groups. // for (int i = col_names.length - 1; i >= 0; --i) { //// table = table.orderByColumn(col_names[i]); // table = table.orderByColumn(columns[i], true); // } // this.table = table; // // row_enum = table.rowEnumeration(); // if (row_enum.hasMoreRows()) { // last_row_index = row_enum.nextRowIndex(); // } // else { // last_row_index = -1; // } } // /** // * Returns the next group in the table. Returns 'null' if there are no // * more groups in the table. // */ // public VirtualTable nextGroup() { // if (last_row_index != -1) { // // IntegerVector new_list = new IntegerVector(8); // // int row_index = last_row_index; // int top_row_index = row_index; // // boolean equal = true; // // do { // new_list.addInt(row_index); // if (!row_enum.hasMoreRows()) { // break; // } // row_index = row_enum.nextRowIndex(); // // equal = true; // for (int i = 0; i < columns.length && equal; ++i) { // TObject cell = table.getCellContents(columns[i], top_row_index); // equal = equal && // table.compareCellTo(cell, columns[i], row_index) == Table.EQUAL; // } // // } while (equal); // // if (!equal) { // last_row_index = row_index; // } // else { // last_row_index = -1; // } // // // VirtualTable vtable = new VirtualTable(table); // vtable.set(table, new_list); // return vtable; // // } // else { // return null; // } // // // } // // // // public static final VirtualTable[] group(Table table, String[] groups) { // // // GroupHelper g_help = new GroupHelper(table, groups); // ArrayList list = new ArrayList(); // VirtualTable tab = g_help.nextGroup(); // while (tab != null) { // list.add(tab); // tab = g_help.nextGroup(); // } // // // Make into an array // VirtualTable[] table_array = (VirtualTable[]) // list.toArray(new VirtualTable[list.size()]); // return table_array; // // } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/GroupResolver.java000066400000000000000000000035101330501023400253750ustar00rootroot00000000000000/** * com.mckoi.database.GroupResolver 14 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * Similar to VariableResolver, this method is used by grouping Functions to * find information about the current group being evaluated (used for * evaluating aggregate functions). * * @author Tobias Downer */ public interface GroupResolver { /** * A number that uniquely identifies this group from all the others in the * set of groups. */ public int groupID(); /** * The total number of set items in this group. */ public int size(); /** * Returns the value of a variable of a group. The set index signifies the * set item of the group. For example, if the group contains 10 items, then * set_index may be between 0 and 9. Return types must be either * a String, BigDecimal or Boolean. */ public TObject resolve(Variable variable, int set_index); /** * Returns a VariableResolver that can be used to resolve variable in the * get set of the group. The object returned is undefined after the next * call to this method. */ public VariableResolver getVariableResolver(int set_index); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/INHelper.java000066400000000000000000000176771330501023400242500ustar00rootroot00000000000000/** * com.mckoi.database.INHelper 17 Sep 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.BlockIntegerList; import com.mckoi.util.IntegerVector; /** * This is a static class that provides methods for performing the Query * table command 'in' and 'not in'. This command finds a match between one * of the columns in two tables. If match between a cell in one column is also * found in the column of the other table, the row is included in the resultant * table (or discluded (is that a word?) for 'not in'). *

* @author Tobias Downer */ final class INHelper { /** * This implements the 'in' command. Returns the rows selected from table1. *

* NOTE: This is actually an incorrect implementation. We * only keep for compatibility with DQL system. The may return multiple * values from 'table1' */ final static IntegerVector origIn(Table table1, Table table2, int column1, int column2) { // First pick the the smallest and largest table. We only want to iterate // through the smallest table. // NOTE: This optimisation can't be performed for the 'not_in' command. Table small_table; Table large_table; int small_column; int large_column; if (table1.getRowCount() < table2.getRowCount()) { small_table = table1; large_table = table2; small_column = column1; large_column = column2; } else { small_table = table2; large_table = table1; small_column = column2; large_column = column1; } // Iterate through the small table's column. If we can find identical // cells in the large table's column, then we should include the row in our // final result. IntegerVector result_rows = new IntegerVector(); RowEnumeration e = small_table.rowEnumeration(); Operator EQUALSOP = Operator.get("="); while (e.hasMoreRows()) { int small_row_index = e.nextRowIndex(); TObject cell = small_table.getCellContents(small_column, small_row_index); IntegerVector selected_set = large_table.selectRows(large_column, EQUALSOP, cell); // We've found cells that are IN both columns, if (selected_set.size() > 0) { // If the large table is what our result table will be based on, append // the rows selected to our result set. Otherwise add the index of // our small table. This only works because we are performing an // EQUALS operation. if (large_table == table1) { result_rows.append(selected_set); } else { result_rows.addInt(small_row_index); } } } return result_rows; } /** * This implements the 'in' command. Returns the rows selected from table1. * This correctly implements the 'in' relation. The 'origIn' implementation * may return multiple rows from the largest table. */ final static IntegerVector in(Table table1, Table table2, int column1, int column2) { // First pick the the smallest and largest table. We only want to iterate // through the smallest table. // NOTE: This optimisation can't be performed for the 'not_in' command. Table small_table; Table large_table; int small_column; int large_column; if (table1.getRowCount() < table2.getRowCount()) { small_table = table1; large_table = table2; small_column = column1; large_column = column2; } else { small_table = table2; large_table = table1; small_column = column2; large_column = column1; } // Iterate through the small table's column. If we can find identical // cells in the large table's column, then we should include the row in our // final result. BlockIntegerList result_rows = new BlockIntegerList(); RowEnumeration e = small_table.rowEnumeration(); Operator EQUALSOP = Operator.get("="); while (e.hasMoreRows()) { int small_row_index = e.nextRowIndex(); TObject cell = small_table.getCellContents(small_column, small_row_index); IntegerVector selected_set = large_table.selectRows(large_column, EQUALSOP, cell); // We've found cells that are IN both columns, if (selected_set.size() > 0) { // If the large table is what our result table will be based on, append // the rows selected to our result set. Otherwise add the index of // our small table. This only works because we are performing an // EQUALS operation. if (large_table == table1) { // Only allow unique rows into the table set. int sz = selected_set.size(); boolean rs = true; for (int i = 0; rs == true && i < sz; ++i) { rs = result_rows.uniqueInsertSort(selected_set.intAt(i)); } } else { // Don't bother adding in sorted order because it's not important. result_rows.add(small_row_index); } } } return new IntegerVector(result_rows); } /** * A multi-column version of IN. */ final static IntegerVector in(Table table1, Table table2, int[] t1_cols, int[] t2_cols) { if (t1_cols.length > 1) { throw new Error("Multi-column 'in' not supported."); } return in(table1, table2, t1_cols[0], t2_cols[0]); } /** * This implements the 'not_in' command. * ISSUE: This will be less efficient than 'in' if table1 has many rows and * table2 has few rows. */ final static IntegerVector notIn(Table table1, Table table2, int col1, int col2) { // Handle trivial cases int t2_row_count = table2.getRowCount(); if (t2_row_count == 0) { // No rows so include all rows. return table1.selectAll(col1); } else if (t2_row_count == 1) { // 1 row so select all from table1 that doesn't equal the value. RowEnumeration e = table2.rowEnumeration(); TObject cell = table2.getCellContents(col2, e.nextRowIndex()); return table1.selectRows(col1, Operator.get("<>"), cell); } // Iterate through table1's column. If we can find identical cell in the // tables's column, then we should not include the row in our final // result. IntegerVector result_rows = new IntegerVector(); RowEnumeration e = table1.rowEnumeration(); Operator EQUALSOP = Operator.get("="); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); TObject cell = table1.getCellContents(col1, row_index); IntegerVector selected_set = table2.selectRows(col2, Operator.get("="), cell); // We've found a row in table1 that doesn't have an identical cell in // table2, so we should include it in the result. if (selected_set.size() <= 0) { result_rows.addInt(row_index); } } return result_rows; } /** * A multi-column version of NOT IN. */ final static IntegerVector notIn(Table table1, Table table2, int[] t1_cols, int[] t2_cols) { if (t1_cols.length > 1) { throw new Error("Multi-column 'not in' not supported."); } return notIn(table1, table2, t1_cols[0], t2_cols[0]); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/IndexSet.java000066400000000000000000000023611330501023400243050ustar00rootroot00000000000000/** * com.mckoi.database.IndexSet 19 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerListInterface; /** * A set of list of indexes. This will often expose an isolated snapshot of a * set of indices for a table. * * @author Tobias Downer */ public interface IndexSet { /** * Returns a mutable object that implements IntegerListInterface for the * given index number in this set of indices. */ IntegerListInterface getIndex(int n); /** * Cleans up and disposes the resources associated with this set of index. */ void dispose(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/IndexSetStore.java000066400000000000000000001303641330501023400253270ustar00rootroot00000000000000/** * com.mckoi.database.IndexSetStore 03 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; import java.io.OutputStream; import java.io.DataOutputStream; import java.io.ByteArrayOutputStream; import java.util.ArrayList; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.AbstractBlockIntegerList; import com.mckoi.util.BlockIntegerList; import com.mckoi.util.BlockIntegerList.IntArrayListBlock; import com.mckoi.util.IntegerListBlockInterface; import com.mckoi.util.IntegerIterator; import com.mckoi.util.IntegerVector; import com.mckoi.util.UserTerminal; import com.mckoi.util.Cache; import com.mckoi.store.Store; import com.mckoi.store.Area; import com.mckoi.store.MutableArea; import com.mckoi.store.AreaWriter; import com.mckoi.debug.*; /** * A class that manages the storage of a set of transactional index lists in a * way that is fast to modify. This class has a number of objectives; *

*

*

* This object uses a com.mckoi.store.Store instance as its backing medium. *

* This store manages three types of areas; 'Index header', 'Index block' and * 'Index element'. *

* Index header: This area type contains an entry for each index being stored. * The Index header contains a pointer to an 'Index block' area for each index. * The pointer to the 'Index block' in this area changes whenever an index * changes, or when new indexes are added or deleted from the store. *

* Index block: This area contains a number of pointers to Index element blocks. * The number of entries depends on the number of indices in the list. Each * entry contains the size of the block, the first and last entry of the block, * and a pointer to the element block itself. If an element of the index * changes or elements are removed or deleted, this block does NOT change. * This should be considered an immutable area. *

* Index element: This area simply contains the actual values in a block of the * index. An Index element area does not change and should be considered an * immutable area. * * @author Tobias Downer */ final class IndexSetStore { /** * The magic value that we use to mark the start area. */ private static final int MAGIC = 0x0CA90291; /** * A DebugLogger object used to log debug messages to. */ private final DebugLogger debug; /** * The TransactionSystem for this index set. */ private final TransactionSystem system; /** * The Store that contains all the data of the index store. */ private Store store; /** * The starting header of this index set. This is a very small area that * simply contains a magic value and a pointer to the index header. This * is the only MutableArea object that is required by the index set. */ private MutableArea start_area; /** * The index header area. The index header area contains an entry for each * index being stored. Each entry is 16 bytes in size and has a 16 byte * header. *

* HEADER: ( version (int), reserved (int), index count (long) )
* ENTRY: ( type (int), block_size (int), index block pointer (long) ) */ private long index_header_p; private Area index_header_area; /** * The index blocks - one for each index being stored. An index block area * contains an entry for each index element in an index. Each entry is 28 * bytes in size and the area has a 16 byte header. *

* HEADER: ( version (int), reserved (int), index size (long) )
* ENTRY: ( first entry (long), last entry (long), * index element pointer (long), type/element size (int) ) *

* type/element size contains the number of elements in the block, and the * block compaction factor. For example, type 1 means the block contains * short sized index values, 2 is int sized index values, and 3 is long * sized index values. */ private IndexBlock[] index_blocks; /** * Constructs the IndexSetStore over the given Store object. */ public IndexSetStore(Store store, final TransactionSystem system) { this.store = store; this.system = system; this.debug = system.Debug(); } /** * Delete all areas specified in the list (as a list of Long). */ private synchronized void deleteAllAreas(ArrayList list) { if (store != null) { try { store.lockForWrite(); int sz = list.size(); for (int i = 0; i < sz; ++i) { long id = ((Long) list.get(i)).longValue(); store.deleteArea(id); } } catch (IOException e) { debug.write(Lvl.ERROR, this, "Error when freeing old index block."); debug.writeException(e); } finally { store.unlockForWrite(); } } } // ---------- Private methods ---------- /** * Creates a new blank index block in the store and returns a pointer to the * area. */ private long createBlankIndexBlock() throws IOException { // Allocate the area AreaWriter a = store.createArea(16); long index_block_p = a.getID(); // Setup the header a.putInt(1); // version a.putInt(0); // reserved a.putLong(0); // block entries a.finish(); return index_block_p; } // ---------- Public methods ---------- /** * Creates a new black index set store and returns a pointer to a static * area that is later used to reference this index set in this store. * Remember to synch after this is called. */ public synchronized long create() throws IOException { // Create an empty index header area AreaWriter a = store.createArea(16); index_header_p = a.getID(); a.putInt(1); // version a.putInt(0); // reserved a.putLong(0); // number of indexes in the set a.finish(); // Set up the local Area object for the index header index_header_area = store.getArea(index_header_p); index_blocks = new IndexBlock[0]; // Allocate the starting header AreaWriter start_a = store.createArea(32); long start_p = start_a.getID(); // The magic start_a.putInt(MAGIC); // The version start_a.putInt(1); // Pointer to the index header start_a.putLong(index_header_p); start_a.finish(); // Set the 'start_area' value. start_area = store.getMutableArea(start_p); return start_p; } /** * Initializes this index set. This must be called during general * initialization of the table object. */ public synchronized void init(long start_p) throws IOException { // Set up the start area start_area = store.getMutableArea(start_p); int magic = start_area.getInt(); if (magic != MAGIC) { throw new IOException("Magic value for index set does not match."); } int version = start_area.getInt(); if (version != 1) { throw new IOException("Unknown version for index set."); } // Setup the index_header area index_header_p = start_area.getLong(); index_header_area = store.getArea(index_header_p); // Read the index header area version = index_header_area.getInt(); // version if (version != 1) { throw new IOException("Incorrect version"); } int reserved = index_header_area.getInt(); // reserved int index_count = (int) index_header_area.getLong(); index_blocks = new IndexBlock[index_count]; // Initialize each index block for (int i = 0; i < index_count; ++i) { int type = index_header_area.getInt(); int block_size = index_header_area.getInt(); long index_block_p = index_header_area.getLong(); if (type == 1) { index_blocks[i] = new IndexBlock(i, block_size, index_block_p); index_blocks[i].addReference(); } else { throw new IOException("Do not understand index type: " + type); } } } /** * Closes this index set (cleans up). */ public synchronized void close() { if (store != null) { for (int i = 0; i < index_blocks.length; ++i) { index_blocks[i].removeReference(); } store = null; index_blocks = null; } } /** * Overwrites all existing index information in this store and sets it to a * copy of the given IndexSet object. The 'source_index' must be a snapshot * as returned by the getSnapshotIndexSet method but not necessarily * generated from this index set. *

* This will create a new structure within this store that contains the copied * index data. This overwrites any existing data in this store so care should * be used when using this method. *

* This method is an optimized method of copying all the index data in an * index set and only requires a small buffer in memory. The index data * in 'index_set' is not altered in any way by using this. */ public synchronized void copyAllFrom(IndexSet index_set) throws IOException { // Assert that IndexSetStore is initialized if (index_blocks == null) { throw new RuntimeException( "Can't copy because this IndexSetStore is not initialized."); } // Drop any indexes in this index store. for (int i = 0; i < index_blocks.length; ++i) { commitDropIndex(i); } if (index_set instanceof SnapshotIndexSet) { // Cast to SnapshotIndexSet SnapshotIndexSet s_index_set = (SnapshotIndexSet) index_set; // The number of IndexBlock items to copy. int index_count = s_index_set.snapshot_index_blocks.length; // Record the old index_header_p long old_index_header_p = index_header_p; // Create the header in this store AreaWriter a = store.createArea(16 + (16 * index_count)); index_header_p = a.getID(); a.putInt(1); // version a.putInt(0); // reserved a.putLong(index_count); // number of indexes in the set // Fill in the information from the index_set for (int i = 0; i < index_count; ++i) { IndexBlock source_block = s_index_set.snapshot_index_blocks[i]; long index_block_p = source_block.copyTo(store); a.putInt(1); // NOTE: Only support for block type 1 a.putInt(source_block.getBlockSize()); a.putLong(index_block_p); } // The header area has now been initialized. a.finish(); // Modify the start area header to point to this new structure. start_area.position(8); start_area.putLong(index_header_p); // Check out the change start_area.checkOut(); // Free space associated with the old header_p store.deleteArea(old_index_header_p); } else { throw new RuntimeException("Can not copy non-IndexSetStore IndexSet"); } // Re-initialize the index init(start_area.getID()); } /** * Adds to the given ArrayList all the areas in the store that are used by * this structure (as Long). */ public void addAllAreasUsed(ArrayList list) throws IOException { list.add(new Long(start_area.getID())); list.add(new Long(index_header_p)); for (int i = 0; i < index_blocks.length; ++i) { IndexBlock block = index_blocks[i]; list.add(new Long(block.getPointer())); long[] block_pointers = block.getAllBlockPointers(); for (int n = 0; n < block_pointers.length; ++n) { list.add(new Long(block_pointers[n])); } } } /** * Adds a number of blank index tables to the index store. For example, * we may want this store to contain 16 index lists. *

* NOTE: This doesn't write the updated information to the file. You must * call 'flush' to write the information to the store. */ public synchronized void addIndexLists(int count, int type, int block_size) throws IOException { try { store.lockForWrite(); // Allocate a new area for the list int new_size = 16 + ((index_blocks.length + count) * 16); AreaWriter new_index_area = store.createArea(new_size); long new_index_p = new_index_area.getID(); IndexBlock[] new_index_blocks = new IndexBlock[(index_blocks.length + count)]; // Copy the existing area index_header_area.position(0); int version = index_header_area.getInt(); int reserved = index_header_area.getInt(); long icount = index_header_area.getLong(); new_index_area.putInt(version); new_index_area.putInt(reserved); new_index_area.putLong(icount + count); for (int i = 0; i < index_blocks.length; ++i) { int itype = index_header_area.getInt(); int iblock_size = index_header_area.getInt(); long index_block_p = index_header_area.getLong(); new_index_area.putInt(itype); new_index_area.putInt(iblock_size); new_index_area.putLong(index_block_p); new_index_blocks[i] = index_blocks[i]; } // Add the new entries for (int i = 0; i < count; ++i) { long new_blank_block_p = createBlankIndexBlock(); new_index_area.putInt(type); new_index_area.putInt(block_size); new_index_area.putLong(new_blank_block_p); IndexBlock i_block = new IndexBlock(index_blocks.length + i, block_size, new_blank_block_p); i_block.addReference(); new_index_blocks[index_blocks.length + i] = i_block; } // Finished initializing the index. new_index_area.finish(); // The old index header pointer long old_index_header_p = index_header_p; // Update the state of this object, index_header_p = new_index_p; index_header_area = store.getArea(new_index_p); index_blocks = new_index_blocks; // Update the start pointer start_area.position(8); start_area.putLong(new_index_p); start_area.checkOut(); // Free the old header store.deleteArea(old_index_header_p); } finally { store.unlockForWrite(); } } /** * Returns a current snapshot of the current indexes that are committed in * this store. The returned object can be used to create mutable * IntegerListInterface objects. The created index lists are isolated from * changes made to the rest of the indexes after this method returns. *

* A transaction must grab an IndexSet object when it opens. *

* NOTE: We MUST guarentee that the IndexSet is disposed when the * transaction finishes. */ public synchronized IndexSet getSnapshotIndexSet() { // Clone the blocks list. This represents the current snapshot of the // index state. IndexBlock[] snapshot_index_blocks = (IndexBlock[]) index_blocks.clone(); // Add this as the reference for (int i = 0; i < snapshot_index_blocks.length; ++i) { snapshot_index_blocks[i].addReference(); } return new SnapshotIndexSet(snapshot_index_blocks); } /** * Commits the index header with the current values set in 'index_blocks'. */ private synchronized void commitIndexHeader() throws IOException { // Make a new index header area for the changed set. AreaWriter a = store.createArea(16 + (index_blocks.length * 16)); long a_p = a.getID(); a.putInt(1); // version a.putInt(0); // reserved a.putLong(index_blocks.length); // count for (int i = 0; i < index_blocks.length; ++i) { IndexBlock ind_block = index_blocks[i]; a.putInt(1); a.putInt(ind_block.getBlockSize()); a.putLong(ind_block.getPointer()); } // Finish creating the updated header a.finish(); // The old index header pointer long old_index_header_p = index_header_p; // Set the new index header index_header_p = a_p; index_header_area = store.getArea(index_header_p); // Write the change to 'start_p' start_area.position(8); start_area.putLong(index_header_p); start_area.checkOut(); // Free the old header index store.deleteArea(old_index_header_p); } /** * Commits changes made to a snapshop of an IndexSet as being permanent * changes to the state of the index store. This will generate an error if * the given IndexSet is not the last set returned from the * 'getSnapshotIndexSet' method. *

* For this to be used, during the transaction commit function a * 'getSnapshopIndexSet' must be obtained, changes made to it from info in * the journal, then a call to this method. There must be a guarentee that * 'getSnapshotIndexSet' is not called again during this process. *

* NOTE: We must be guarenteed that when this method is called no other * calls to other methods in this object can be called. */ public void commitIndexSet(IndexSet index_set) { ArrayList removed_blocks = new ArrayList(); synchronized(this) { SnapshotIndexSet s_index_set = (SnapshotIndexSet) index_set; IndexIntegerList[] lists = s_index_set.getAllLists(); try { try { store.lockForWrite(); // For each IndexIntegerList in the index set, for (int n = 0; n < lists.length; ++n) { // Get the list IndexIntegerList list = (IndexIntegerList) lists[n]; int index_num = list.getIndexNumber(); // The IndexBlock we are changing IndexBlock cur_index_block = index_blocks[index_num]; // Get all the blocks in the list MappedListBlock[] blocks = list.getAllBlocks(); // Make up a new block list for this index set. AreaWriter a = store.createArea(16 + (blocks.length * 28)); long block_p = a.getID(); a.putInt(1); // version a.putInt(0); // reserved a.putLong(blocks.length); // block count for (int i = 0; i < blocks.length; ++i) { MappedListBlock b = blocks[i]; long bottom_int = 0; long top_int = 0; int block_size = b.size(); if (block_size > 0) { bottom_int = b.bottomInt(); top_int = b.topInt(); } long b_p = b.getBlockPointer(); // Is the block new or was it changed? if (b_p == -1 || b.hasChanged()) { // If this isn't -1 then put this sector on the list of // sectors to delete during GC. if (b_p != -1) { cur_index_block.addDeletedArea(b_p); } // This is a new block or a block that's been changed // Write the block to the file system b_p = b.writeToStore(); } a.putLong(bottom_int); a.putLong(top_int); a.putLong(b_p); a.putInt(block_size | (((int) b.getCompactType()) << 24)); } // Finish initializing the area a.finish(); // Add the deleted blocks MappedListBlock[] deleted_blocks = list.getDeletedBlocks(); for (int i = 0; i < deleted_blocks.length; ++i) { long del_block_p = deleted_blocks[i].getBlockPointer(); if (del_block_p != -1) { cur_index_block.addDeletedArea(del_block_p); } } // Mark the current block as deleted cur_index_block.markAsDeleted(); // Now create a new IndexBlock object IndexBlock new_index_block = new IndexBlock(index_num, cur_index_block.getBlockSize(), block_p); new_index_block.setParentIndexBlock(cur_index_block); // Add reference to the new one new_index_block.addReference(); // Update the index_blocks list index_blocks[index_num] = new_index_block; // We remove this later. removed_blocks.add(cur_index_block); } // Commit the new index header (index_blocks) commitIndexHeader(); } finally { store.unlockForWrite(); } // Commit finished. } catch (IOException e) { debug.writeException(e); throw new Error("IO Error: " + e.getMessage()); } } // synchronized // Remove all the references for the changed blocks, int sz = removed_blocks.size(); for (int i = 0; i < sz; ++i) { IndexBlock block = (IndexBlock) removed_blocks.get(i); block.removeReference(); } } /** * Commits a change that drops an index from the index set. This must be * called from within the conglomerate commit. The actual implementation of * this overwrites the index with with a 0 length index. This is also useful * if you want to reindex a column. */ public synchronized void commitDropIndex(int index_num) throws IOException { // The IndexBlock we are dropping IndexBlock cur_index_block = index_blocks[index_num]; int block_size = cur_index_block.getBlockSize(); try { store.lockForWrite(); // Add all the elements to the deleted areas in the block long[] all_block_pointers = cur_index_block.getAllBlockPointers(); for (int i = 0; i < all_block_pointers.length; ++i) { cur_index_block.addDeletedArea(all_block_pointers[i]); } // Mark the current block as deleted cur_index_block.markAsDeleted(); // Make up a new blank block list for this index set. long block_p = createBlankIndexBlock(); // Now create a new IndexBlock object IndexBlock new_index_block = new IndexBlock(index_num, block_size, block_p); // Add reference to the new one new_index_block.addReference(); // Remove reference to the old cur_index_block.removeReference(); // Update the index_blocks list index_blocks[index_num] = new_index_block; // Commit the new index header (index_blocks) commitIndexHeader(); } finally { store.unlockForWrite(); } } // ---------- Inner classes ---------- /** * A convenience static empty integer list array. */ private static IndexIntegerList[] EMPTY_INTEGER_LISTS = new IndexIntegerList[0]; /** * The implementation of IndexSet which represents a mutable snapshot of * the indices stored in this set. */ private class SnapshotIndexSet implements IndexSet { /** * The list of IndexBlock object that represent the view of the index set * when the view was created. */ private IndexBlock[] snapshot_index_blocks; /** * The list of IndexIntegerList objects that have been returned via the * 'getIndex(n)' method. */ private ArrayList integer_lists; /** * Set to true when this object is disposed. */ private boolean disposed; /** * Constructor. */ public SnapshotIndexSet(IndexBlock[] blocks) { this.snapshot_index_blocks = blocks; // Not disposed. disposed = false; } /** * Returns all the lists that have been created by calls to * 'getIndex' */ public IndexIntegerList[] getAllLists() { if (integer_lists == null) { return EMPTY_INTEGER_LISTS; } else { return (IndexIntegerList[]) integer_lists.toArray( new IndexIntegerList[integer_lists.size()]); } } // ---------- Implemented from IndexSet ---------- public IntegerListInterface getIndex(int n) { // Create if not exist. if (integer_lists == null) { integer_lists = new ArrayList(); } else { // If this list has already been created, return it for (int o = 0; o < integer_lists.size(); ++o) { IndexIntegerList i_list = (IndexIntegerList) integer_lists.get(o); if (i_list.getIndexNumber() == n) { return i_list; // throw new Error( // "IntegerListInterface already created for this n."); } } } try { IndexIntegerList ilist = snapshot_index_blocks[n].createIndexIntegerList(); integer_lists.add(ilist); return ilist; } catch (IOException e) { debug.writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } } public void dispose() { if (!disposed) { if (integer_lists != null) { for (int i = 0; i < integer_lists.size(); ++i) { IndexIntegerList ilist = (IndexIntegerList) integer_lists.get(i); ilist.dispose(); } integer_lists = null; } // Release reference to the index_blocks; for (int i = 0; i < snapshot_index_blocks.length; ++i) { IndexBlock iblock = snapshot_index_blocks[i]; iblock.removeReference(); } snapshot_index_blocks = null; disposed = true; } } public void finalize() { try { if (!disposed) { dispose(); } } catch (Throwable e) { debug.write(Lvl.ERROR, this, "Finalize error: " + e.getMessage()); debug.writeException(e); } } } /** * An IntegerListBlockInterface implementation that maps a block of a list * to an underlying file system representation. */ private final class MappedListBlock extends IntArrayListBlock { /** * The first entry in the block. */ private long first_entry; /** * The last entry in the block. */ private long last_entry; /** * A pointer to the area where this block can be found. */ private long block_p; /** * Lock object. */ private Object lock = new Object(); /** * Set to true if the loaded block is mutable. */ private boolean mutable_block; /** * How this block is compacted in the store. If this is 1 the elements are * stored as shorts, if it is 2 - ints, and if it is 3 - longs. */ private byte compact_type; /** * The maximum size of the block. */ private final int max_block_size; /** * Constructor. */ public MappedListBlock(long first_e, long last_e, long mapped_p, int size, byte compact_type, int max_block_size) { this.first_entry = first_e; this.last_entry = last_e; this.block_p = mapped_p; this.compact_type = compact_type; this.max_block_size = max_block_size; count = size; array = null; } /** * Creates an empty block. */ public MappedListBlock(int block_size_in) { super(block_size_in); this.block_p = -1; this.max_block_size = block_size_in; } /** * Returns a pointer to the area that contains this block. */ public long getBlockPointer() { return block_p; } /** * Returns the compact type of this block. */ public byte getCompactType() { return compact_type; } /** * Copies the index data in this block to a new block in the given store * and returns a pointer to the new block. */ public long copyTo(Store dest_store) throws IOException { // The number of bytes per entry int entry_size = compact_type; // The total size of the entry. int area_size = (count * entry_size); // Allocate the destination area AreaWriter dest = dest_store.createArea(area_size); long dest_block_p = dest.getID(); store.getArea(block_p).copyTo(dest, area_size); dest.finish(); return dest_block_p; } /** * Writes this block to a new sector in the index file and updates the * information in this object accordingly. *

* Returns the sector the block was written to. */ public long writeToStore() throws IOException { // Convert the int[] array to a byte[] array. // First determine how we compact this int array into a byte array. If // all the values are < 32768 then we store as shorts long largest_val = 0; for (int i = 0; i < count; ++i) { long v = (long) array[i]; if (Math.abs(v) > Math.abs(largest_val)) { largest_val = v; } } long lv = largest_val; if (lv >> 7 == 0 || lv >> 7 == -1) { compact_type = 1; } else if (lv >> 15 == 0 || lv >> 15 == -1) { compact_type = 2; } else if (lv >> 23 == 0 || lv >> 23 == -1) { compact_type = 3; } // NOTE: in the future we'll want to determine if we are going to store // as an int or long array. else { compact_type = 4; } // The number of bytes per entry int entry_size = compact_type; // The total size of the entry. int area_size = (count * entry_size); // Allocate an array to buffer the block to byte[] arr = new byte[area_size]; // Fill the array int p = 0; for (int i = 0; i < count; ++i) { int v = array[i]; for (int n = entry_size - 1; n >= 0; --n) { arr[p] = (byte) ((v >>> (n * 8)) & 0x0FF); ++p; } } // Create an area to store this AreaWriter a = store.createArea(area_size); block_p = a.getID(); // Write to the area a.put(arr, 0, area_size); // And finish the area initialization a.finish(); // Once written, the block is invalidated lock = null; return block_p; } /** * Overwritten from IntArrayListBlock, this returns the int[] array that * contains the contents of the block. In this implementation, we * determine if the array has been read from the index file. If it * hasn't we read it in, otherwise we use the version in memory. */ public int[] getArray(boolean immutable) { // We must synchronize this entire block because otherwise we could // return a partially loaded array. synchronized (lock) { if (array != null) { prepareMutate(immutable); return array; } // Create the int array array = new int[max_block_size]; // The number of bytes per entry int entry_size = compact_type; // The total size of the entry. int area_size = (count * entry_size); // Read in the byte array byte[] buf = new byte[area_size]; try { store.getArea(block_p).get(buf, 0, area_size); } catch (IOException e) { debug.write(Lvl.ERROR, this, "block_p = " + block_p); debug.writeException(e); throw new Error("IO Error: " + e.getMessage()); } // Uncompact it into the int array int p = 0; for (int i = 0; i < count; ++i) { int v = (((int) buf[p]) << ((entry_size - 1) * 8)); ++p; for (int n = entry_size - 2; n >= 0; --n) { v = v | ((((int) buf[p]) & 0x0FF) << (n * 8)); ++p; } array[i] = v; } mutable_block = false; prepareMutate(immutable); return array; } } /** * Overwritten from IntArrayListBlock, returns the capacity of the block. */ public int getArrayLength() { return max_block_size; } /** * Makes the block mutable if it is immutable. We must be synchronized on * 'lock' before this method is called. */ private void prepareMutate(boolean immutable) { // If list is to be mutable if (!immutable && !mutable_block) { array = (int[]) array.clone(); mutable_block = true; } } /** * Overwritten from IntArrayListBlock, returns the last entry of the block. */ public int topInt() { if (count == 0) { throw new Error("No first int in block."); } synchronized (lock) { if (array == null) { return (int) last_entry; } else { return array[count - 1]; } } } /** * Overwritten from IntArrayListBlock, returns the first entry of the * block. */ public int bottomInt() { if (count == 0) { throw new Error("No first int in block."); } synchronized (lock) { if (array == null) { return (int) first_entry; } else { return array[0]; } } } } /** * The IntegerListInterface implementation that is used to represent a * mutable snapshop of the indices at a given point in time. */ private final class IndexIntegerList extends AbstractBlockIntegerList { /** * The number of the index in the store that this list represents. */ private int index_num; /** * The maximum block size. */ private int max_block_size; /** * Set to true when disposed. */ private boolean disposed = false; /** * The mapped elements that were deleted. */ private ArrayList deleted_blocks = new ArrayList(); /** * Constructs the list with the given set of blocks. */ public IndexIntegerList(int index_num, int max_block_size, MappedListBlock[] blocks) { super(blocks); this.index_num = index_num; this.max_block_size = max_block_size; } /** * Creates a new block for the list. */ protected IntegerListBlockInterface newListBlock() { if (!disposed) { return new MappedListBlock(max_block_size); } throw new Error("Integer list has been disposed."); } /** * We must maintain a list of deleted blocks. */ protected void deleteListBlock(IntegerListBlockInterface list_block) { deleted_blocks.add(list_block); } /** * Returns the index number of this list. */ public int getIndexNumber() { return index_num; } /** * Returns the array of all MappedListBlock that are in this list. */ public MappedListBlock[] getAllBlocks() { return (MappedListBlock[]) block_list.toArray(new MappedListBlock[block_list.size()]); } /** * Returns the array of all MappedListBlock that were deleted from this * list. */ public MappedListBlock[] getDeletedBlocks() { return (MappedListBlock[]) deleted_blocks.toArray(new MappedListBlock[deleted_blocks.size()]); } public void dispose() { disposed = true; block_list = null; } } /** * Represents a single 'Index block' area in the store. *

* An index block area contains an entry for each index element in an index. * Each entry is 28 bytes in size and the area has a 16 byte header. *

* HEADER: ( version (int), reserved (int), index size (long) )
* ENTRY: ( first entry (long), last entry (long), * index element pointer (long), type/element size (int) ) *

* type/element size contains the number of elements in the block, and the * block compaction factor. For example, type 1 means the block contains * short sized index values, 2 is int sized index values, and 3 is long * sized index values. */ private class IndexBlock { /** * The number of references to this object. When this reaches 0, it is * safe to free any resources that this block deleted. */ private int reference_count; /** * The index of this block in the index set. */ private int index_num; /** * A pointer that references the area in the store. */ private final long index_block_p; /** * The total number of entries in the index block. */ private long block_entries; /** * The block size of elements in this block. */ private final int block_size; /** * The list of deleted areas that can safely be disposed when this object * is disposed. */ private ArrayList deleted_areas; /** * True if this block is marked as deleted. */ private boolean deleted = false; /** * Set to true when this index block is freed from the index store. */ private boolean freed = false; /** * The parent IndexBlock. This block is a child modification of the parent. */ private IndexBlock parent_block; /** * Constructs the IndexBlock. */ IndexBlock(int index_num, int block_size, long index_block_p) throws IOException { this.index_num = index_num; this.block_size = block_size; this.index_block_p = index_block_p; // Read the index count Area index_block_area = store.getArea(index_block_p); index_block_area.position(8); block_entries = index_block_area.getLong(); reference_count = 0; } /** * Sets the parent IndexBlock, the index that this index block succeeded. */ void setParentIndexBlock(IndexBlock parent) { this.parent_block = parent; } /** * Returns a list of pointers to all mapped blocks. */ long[] getAllBlockPointers() throws IOException { // Create an area for the index block pointer Area index_block_area = store.getArea(index_block_p); // First create the list of block entries for this list long[] blocks = new long[(int) block_entries]; if (block_entries != 0) { index_block_area.position(16); for (int i = 0; i < block_entries; ++i) { // NOTE: We cast to 'int' here because of internal limitations. index_block_area.getLong(); index_block_area.getLong(); long element_p = index_block_area.getLong(); index_block_area.getInt(); blocks[i] = element_p; } } return blocks; } /** * Creates and returns an array of all the MappedListBlock objects that make * up this view of the index integer list. */ private MappedListBlock[] createMappedListBlocks() throws IOException { // Create an area for the index block pointer Area index_block_area = store.getArea(index_block_p); // First create the list of block entries for this list MappedListBlock[] blocks = new MappedListBlock[(int) block_entries]; if (block_entries != 0) { index_block_area.position(16); for (int i = 0; i < block_entries; ++i) { // NOTE: We cast to 'int' here because of internal limitations. long first_entry = index_block_area.getLong(); long last_entry = index_block_area.getLong(); long element_p = index_block_area.getLong(); int type_size = index_block_area.getInt(); // size is the first 24 bits (max size = 16MB) int element_count = type_size & 0x0FFF; byte type = (byte) ((type_size >>> 24) & 0x0F); blocks[i] = new MappedListBlock(first_entry, last_entry, element_p, element_count, type, block_size); } } return blocks; } /** * Creates and returns a mutable IndexIntegerList object based on this * view of the index. */ IndexIntegerList createIndexIntegerList() throws IOException { // Create the MappedListBlock objects for this view MappedListBlock[] blocks = createMappedListBlocks(); // And return the IndexIntegerList return new IndexIntegerList(index_num, block_size, blocks); } /** * Copies this index block to the given Store and returns a pointer to the * block within the store. */ long copyTo(Store dest_store) throws IOException { // Create the MappedListBlock object list for this view MappedListBlock[] blocks = createMappedListBlocks(); try { dest_store.lockForWrite(); // Create the header area in the store for this block AreaWriter a = dest_store.createArea(16 + (blocks.length * 28)); long block_p = a.getID(); a.putInt(1); // version a.putInt(0); // reserved a.putLong(blocks.length); // block count for (int i = 0; i < blocks.length; ++i) { MappedListBlock entry = blocks[i]; long b_p = entry.copyTo(dest_store); int block_size = entry.size(); a.putLong(entry.first_entry); a.putLong(entry.last_entry); a.putLong(b_p); a.putInt(block_size | (((int) entry.getCompactType()) << 24)); } // Now finish the area initialization a.finish(); // Return pointer to the new area in dest_store. return block_p; } finally { dest_store.unlockForWrite(); } } /** * Recursively calls through the block hierarchy and deletes and blocks * that can be deleted. */ private boolean deleteBlockChain() { boolean parent_deleted = true; if (parent_block != null) { parent_deleted = parent_block.deleteBlockChain(); if (parent_deleted) { parent_block = null; } } // If the parent is deleted, if (parent_deleted) { // Can we delete this block? if (reference_count <= 0) { if (deleted && deleted_areas != null) { deleteAllAreas(deleted_areas); } deleted_areas = null; } else { // We can't delete this block so return false return false; } } return parent_deleted; } /** * Adds a reference to this object. */ public synchronized void addReference() { if (freed) { throw new RuntimeException("Assertion failed: Block was freed."); } ++reference_count; } /** * Removes a reference to this object. */ public void removeReference() { boolean pending_delete = false; synchronized(this) { --reference_count; if (reference_count <= 0) { if (freed) { throw new RuntimeException( "Assertion failed: remove reference called too many times."); } if (!deleted && deleted_areas != null) { throw new RuntimeException( "Assertion failed: !deleted and deleted_areas != null"); } freed = true; if (deleted) { addDeletedArea(index_block_p); // Delete these areas pending_delete = true; } } } // synchronized(this) if (pending_delete) { synchronized(IndexSetStore.this) { deleteBlockChain(); } } } /** * Returns the number of references to this object. */ public synchronized int getReferenceCount() { return reference_count; } /** * Returns the block size that has been set on this list. */ public int getBlockSize() { return block_size; } /** * Returns the pointer to this index block in the store. */ public long getPointer() { return index_block_p; } /** * Marks this block as deleted. */ public synchronized void markAsDeleted() { deleted = true; } /** * Adds to the list of deleted areas in this block. */ public synchronized void addDeletedArea(long pointer) { if (deleted_areas == null) { deleted_areas = new ArrayList(); } deleted_areas.add(new Long(pointer)); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/IndexStore.java000066400000000000000000001170141330501023400246500ustar00rootroot00000000000000/** * com.mckoi.database.IndexStore 19 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; import java.io.File; import java.io.DataOutputStream; import java.io.ByteArrayOutputStream; import java.util.ArrayList; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.AbstractBlockIntegerList; import com.mckoi.util.BlockIntegerList; import com.mckoi.util.BlockIntegerList.IntArrayListBlock; import com.mckoi.util.IntegerListBlockInterface; import com.mckoi.util.IntegerIterator; import com.mckoi.util.IndexComparator; import com.mckoi.util.ByteBuffer; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.IntegerVector; import com.mckoi.util.UserTerminal; import com.mckoi.util.Cache; import com.mckoi.debug.*; /** * A class that manages the storage of a set of transactional index lists in a * way that is fast to modify. This class has a number of objectives; *

*

*

* None intuitively, this object also handles unique ids. * * @author Tobias Downer */ public final class IndexStore { /** * A DebugLogger object used to log debug messages to. */ private DebugLogger debug; /** * The name of the index store file. */ private File file; /** * The size of a 'block' element of index information in a list. This has * a direct relation to the size of the sectors in the store. This value * can be tuned for specific tables. For example, a table that will only * ever contain a few items can save disk space by having a smaller block * size. */ private int block_size; /** * The FixedSizeDataStore that contains all the data of the index store. */ private FixedSizeDataStore index_store; /** * The list of table sector entries that are currently committed. Each * entry of this list points to a table index list. The list is formatted * as follows; *

   *   0 (byte)         - the type of block.
   *   1 (int)          - the number of blocks in this list.
   *   5 (int)          - the sector of column status information or -1 if
   *                      no stats available.
   *   9 to (n * (4 + 4 + 4 + 2))
   *                    - the sector (int), the first and last entry of the
   *                      block and the number of indices in the block
   *                      (short) of each block in this list.
   *   9 + (n * (4 + 4 + 4 + 2)) .... [ next block ] ....
   * 
*/ private ByteBuffer index_table_list; private byte[] index_table_list_buf; /** * The start sector where the block allocation information is currently * stored. */ private int allocation_sector; /** * The current of the allocation information. */ private int allocation_length; /** * The list of SnapshotIndexSet objects returned via the * 'getSnapshotIndexSet' method. This can be inspected to find all sectors * currently being used to store index information. */ private ArrayList memory_index_set_list; /** * The list of SnapshotIndexSet objects that have been deleted and are * ready for garbage collection. */ private ArrayList index_set_garbage; /** * Unique id field that contains a unique number that can be incremented * atomically. */ private long unique_id; /** * A cache of int[] array blocks that are accessed by this store. */ private Cache sector_cache; // private long cache_hit = 0, cache_miss = 0, cache_access = 0; /** * Constructs the IndexStore. * * @param file_name the path to the file of the index store in the file * system. */ public IndexStore(File file_name, DebugLogger logger) { this.debug = logger; this.file = file_name; this.memory_index_set_list = new ArrayList(); this.index_set_garbage = new ArrayList(); this.sector_cache = new Cache(47, 47, 10); } // ---------- Private methods ---------- /** * Reads the index table allocation list in to the ByteBuffer object. The * position of the table allocation list can be determined by looking in the * reserved area of the index file. */ private synchronized void readIndexTableList() throws IOException { // Read the reserved area for the sector of the allocation information byte[] buf = new byte[32]; index_store.readReservedBuffer(buf, 0, 32); allocation_sector = ByteArrayUtil.getInt(buf, 0); allocation_length = ByteArrayUtil.getInt(buf, 4); unique_id = ByteArrayUtil.getLong(buf, 8); // Read the entire allocation information into the ByteBuffer buf = new byte[allocation_length]; index_store.readAcross(allocation_sector, buf, 0, allocation_length); index_table_list_buf = new byte[allocation_length]; index_table_list = new ByteBuffer(index_table_list_buf); index_table_list.put(buf); } /** * Initializes the index store to a blank state. */ private synchronized void initBlank() throws IOException { // Write the blank allocation area first allocation_length = 0; byte[] buf = new byte[allocation_length]; allocation_sector = index_store.writeAcross(buf, 0, buf.length); // Write the reserved area buf = new byte[32]; ByteArrayUtil.setInt(allocation_sector, buf, 0); ByteArrayUtil.setInt(allocation_length, buf, 4); ByteArrayUtil.setLong(1, buf, 8); index_store.writeReservedBuffer(buf, 0, 32); } // ---------- Public methods ---------- /** * Returns true if the index store file exists. */ public synchronized boolean exists() throws IOException { return file.exists(); } /** * Creates a new black index store and returns leaving the newly created * store in an open state. This method initializes the various data in * the index store for a blank set of index tables. Must call the 'init' * method after this is called. *

* @param block_size the number of ints stored in each block. This * can be optimized for specific use. Must be between 0 and 32768. */ public synchronized void create(int block_size) throws IOException { // Make sure index store is closed if (index_store != null && !index_store.isClosed()) { throw new Error("Index store is already open."); } if (block_size > 32767) { throw new Error("block_size must be less than 32768"); } if (exists()) { throw new IOException("Index store file '" + file + "' already exists."); } // 'unique_id' now starts at 1 as requested unique_id = 1; // Set the block size this.block_size = block_size; // Calculate the size of a sector. The sector size is block_size * 4 int sector_size = block_size * 4; // NOTE: We don't cache access because the IndexStore manages caching this.index_store = new FixedSizeDataStore(file, sector_size, false, debug); // Create the index store file index_store.open(false); // Initialize the index store with blank information. initBlank(); } /** * Opens this index store. If 'read_only' is set to true then the store * is opened in read only mode. *

* Returns true if opening the store was dirty (was not closed properly) and * may need repair. *

* If the index store does not exist before this method is called then it * is created. */ public synchronized boolean open(boolean read_only) throws IOException { // Make sure index store is closed if (index_store != null && !index_store.isClosed()) { throw new Error("Index store is already open."); } if (index_store == null) { // NOTE: We don't cache access because the IndexStore manages caching this.index_store = new FixedSizeDataStore(file, -1, false, debug); } // Open the index store file boolean dirty_open = index_store.open(read_only); // What's the sector size? int sector_size = index_store.getSectorSize(); // Assert that sector_size is divisible by 4 if (sector_size % 4 != 0) { throw new Error("Assert failed, sector size must be divisible by 4"); } // The block size this.block_size = sector_size / 4; return dirty_open; } /** * Initializes the IndexStore. Must be called after it is opened for * normal use, however it should not be called if we are fixing or repairing * the store. */ public synchronized void init() throws IOException { // Read the index store and set up this store with the information. readIndexTableList(); } /** * Performs checks to determine that the index store * is stable. If an IndexStore is not stable and can not be fixed * cleanly then it deletes all information in the store and returns false * indicating the index information must be rebuilt. *

* Assumes the index store has been opened previous to calling this. *

* Returns true if the IndexStore is stable. */ public synchronized boolean fix(UserTerminal terminal) throws IOException { // Open the index store file index_store.fix(terminal); // Read the index store and set up this store with the information. readIndexTableList(); // The number of sectors (used and deleted) in the store. int raw_sector_count = index_store.rawSectorCount(); // Check that at least the reserved area is stable try { // Read the reserved area for the sector of the allocation information byte[] buf = new byte[32]; index_store.readReservedBuffer(buf, 0, 32); } catch (IOException e) { terminal.println("! Index store is irrepairable - " + "reserved area is missing."); // An IOException here means the table file is lost because we've lost // the unique sequence key for the table. throw new IOException("Irrepairable index store."); } try { readIndexTableList(); // A running count of all index items in all lists long used_block_count = 0; // A running count of all block sizes long total_block_count = 0; // Contains a list of all the sectors referenced BlockIntegerList sector_list = new BlockIntegerList(); // Set to the start of the buffer index_table_list.position(0); // Look at all the information in index_table_list and make sure it // is correct. while (index_table_list.position() < index_table_list.limit()) { byte type = index_table_list.getByte(); int block_count = index_table_list.getInt(); int stat_sector = index_table_list.getInt(); if (stat_sector != -1) { boolean b = sector_list.uniqueInsertSort(stat_sector); if (b == false) { terminal.println("! Index store is not stable - " + "double reference to stat_sector."); return false; } // Check this sector exists and is not deleted. if (stat_sector < 0 || stat_sector >= raw_sector_count || index_store.isSectorDeleted(stat_sector)) { terminal.println("! Index store is not stable - " + "referenced sector is deleted."); return false; } } for (int i = 0; i < block_count; ++i) { int first_entry = index_table_list.getInt(); int last_entry = index_table_list.getInt(); int block_sector = index_table_list.getInt(); short int_count = index_table_list.getShort(); // Update statistics used_block_count += int_count; total_block_count += block_size; // Block sector not double referenced? boolean b = sector_list.uniqueInsertSort(block_sector); if (b == false) { terminal.println("! Index store is not stable - " + "double reference to block sector."); return false; } // Block sector is present and not deleted. if (block_sector < 0 || block_sector >= raw_sector_count || index_store.isSectorDeleted(block_sector)) { terminal.println("! Index store is not stable - " + "referenced sector is deleted."); return false; } // Read the block byte[] block_contents = index_store.getSector(block_sector); // Check the first and last entry are the same as in the header. if (int_count > 0) { if (ByteArrayUtil.getInt(block_contents, 0) != first_entry || ByteArrayUtil.getInt(block_contents, (int_count - 1) * 4) != last_entry) { terminal.println("! A block of an index list does not " + "correctly correspond to its header info."); return false; } } } // For each block in a list } // while (position < limit) // Everything is good terminal.println("- Index store is stable."); // The total count of all index entries in the store terminal.println("- Total used block count = " + used_block_count); // The total space available in the store terminal.println("- Total available block count = " + total_block_count); // Calculate utilization if (total_block_count != 0) { double utilization = ((float) used_block_count / (float) total_block_count) * 100f; terminal.println("- Index store utilization = " + utilization + "%"); } return true; } catch (IOException e) { terminal.println("! IO Error scanning index store: " + e.getMessage()); return false; } } /** * Returns true if this store is read only. */ public synchronized boolean isReadOnly() { return index_store.isReadOnly(); } /** * Deletes the store. Must have been closed before this is called. */ public synchronized void delete() { index_store.delete(); } /** * Copies the persistant part of this to another store. Must be open * when this is called. */ public synchronized void copyTo(File path) throws IOException { index_store.copyTo(path); } /** * Cleanly closes the index store. */ public synchronized void close() throws IOException { index_store.close(); sector_cache = null; memory_index_set_list = null; index_set_garbage = null; } /** * Flushes all information in this index store to the file representing this * store in the file system. This is called to persistantly update the * state of the index store. */ public synchronized void flush() throws IOException { // Grab hold of the old allocation information int old_sector = allocation_sector; int old_length = allocation_length; // Write the index_table_list to the store allocation_length = index_table_list_buf.length; allocation_sector = index_store.writeAcross(index_table_list_buf, 0, allocation_length); // Write to the reserved area thus 'committing' the changes ByteArrayUtil.setInt(allocation_sector, flush_buffer, 0); ByteArrayUtil.setInt(allocation_length, flush_buffer, 4); ByteArrayUtil.setLong(unique_id, flush_buffer, 8); index_store.writeReservedBuffer(flush_buffer, 0, 32); // Delete the old allocation information index_store.deleteAcross(old_sector); } private byte[] flush_buffer = new byte[32]; /** * Performs a hard synchronization of this index store. This will force the * OS to synchronize the contents of the data store. *

* For this to be useful, 'flush' should be called before a hardSynch. */ public synchronized void hardSynch() throws IOException { index_store.hardSynch(); } /** * The current unique id. */ long currentUniqueID() { return unique_id - 1; } /** * Atomically returns the next 'unique_id' value from this file. */ long nextUniqueID() { return unique_id++; } /** * Sets the unique id for this store. This must only be used under * extraordinary circumstances, such as restoring from a backup, or * converting from one file to another. */ void setUniqueID(long value) { unique_id = value + 1; } /** * Returns the block size of this store. */ int getBlockSize() { return block_size; } /** * Adds a number of blank index tables to the index store. For example, * we may want this store to contain 16 index lists. *

* NOTE: This doesn't write the updated information to the file. You must * call 'flush' to write the information to the store. */ public synchronized void addIndexLists(int count, byte type) { int add_size = count * (1 + 4 + 4); ByteBuffer old_buffer = index_table_list; // Create a new buffer index_table_list_buf = new byte[old_buffer.limit() + add_size]; index_table_list = new ByteBuffer(index_table_list_buf); // Put the old buffer in to the new buffer old_buffer.position(0); index_table_list.put(old_buffer); // For each new list for (int i = 0; i < count; ++i) { // The type of the block index_table_list.putByte(type); // The number of blocks in the table list index_table_list.putInt(0); // The sector of statistics information (defaults to -1) index_table_list.putInt(-1); } } /** * Adds a SnapshotIndexSet to the list of sets that this store has * dispatched. */ private synchronized void addIndexSetToList(IndexSet index_set) { memory_index_set_list.add(index_set); } /** * Removes a SnapshotIndexSet from the list of sets that this store * is managing. *

* NOTE: This may be called by the finalizer of the IndexSet object if the * index_set is not disposed. */ private synchronized void removeIndexSetFromList(IndexSet index_set) { // If the store is closed, just return. if (index_set_garbage == null) { return; } SnapshotIndexSet s_index_set = (SnapshotIndexSet) index_set; // Remove from the set list boolean b = memory_index_set_list.remove(index_set); if (!b) { throw new Error("IndexSet was not in the list!"); } // Add to the list of garbage if it has deleted sectors if (s_index_set.hasDeletedSectors()) { index_set_garbage.add(index_set); // Do a garbage collection cycle. The lowest id that's currently open. long lowest_id = -1; //Integer.MAX_VALUE; if (memory_index_set_list.size() > 0) { lowest_id = ((SnapshotIndexSet) memory_index_set_list.get(0)).getID(); } // Delete all sectors in the garbage list that have an id lower than // this. boolean deleted; do { SnapshotIndexSet set = (SnapshotIndexSet) index_set_garbage.get(0); deleted = set.getID() < lowest_id; if (deleted) { // The list of sectors to delete IntegerVector to_delete = set.allDeletedSectors(); // For each sector to delete final int sz = to_delete.size(); int n = 0; try { for (n = 0; n < sz; ++n) { int sector = to_delete.intAt(n); index_store.deleteSector(sector); } } catch (IOException e) { debug.write(Lvl.ERROR, this, "Error deleting index " + n + " of list " + to_delete); debug.writeException(e); throw new Error("IO Error: " + e.getMessage()); } index_set_garbage.remove(0); } // if (deleted) } while (deleted && index_set_garbage.size() > 0); } } /** * Returns a current snapshot of the current indexes that are committed in * this store. The returned object can be used to create mutable * IntegerListInterface objects. The created index lists are isolated from * changes made to the rest of the indexes after this method returns. *

* A transaction must grab an IndexSet object when it opens. *

* NOTE: We MUST guarentee that the IndexSet is disposed when the * transaction finishes. */ public synchronized IndexSet getSnapshotIndexSet() { // We must guarentee that we can't generate SnapshotIndexSet // concurrently because it maintains its own ID key system. IndexSet index_set = new SnapshotIndexSet(index_table_list_buf, allocation_length); addIndexSetToList(index_set); return index_set; } /** * Commits changes made to a snapshop of an IndexSet as being permanent * changes to the state of the index store. This will generate an error if * the given IndexSet is not the last set returned from the * 'getSnapshotIndexSet' method. *

* For this to be used, during the transaction commit function a * 'getSnapshopIndexSet' must be obtained, changes made to it from info in * the journal, then a call to this method. There must be a guarentee that * 'getSnapshotIndexSet' is not called again during this process. *

* NOTE: This doesn't write the updated information to the file. You must * call 'flush' to write the information to the store. *

* NOTE: We must be guarenteed that when this method is called no other * calls to other methods in this object can be called. */ public synchronized void commitIndexSet(IndexSet index_set) { // index_set must be the last in the list of memory_index_set_list if (memory_index_set_list.get(memory_index_set_list.size() - 1) != index_set) { throw new Error("Can not commit IndexSet because it is not current."); } SnapshotIndexSet iset = (SnapshotIndexSet) index_set; byte[] new_buffer = iset.commit(); index_table_list_buf = new_buffer; index_table_list = new ByteBuffer(index_table_list_buf, 0, index_table_list_buf.length); } /** * Returns a string that contains diagnostic information. */ public synchronized String statusString() throws IOException { return index_store.statusString(); } // ---------- Inner classes ---------- /** * A unique key that is incremented each time a new IndexSet object is * created. */ private long SET_ID_KEY = 0; /** * A convenience static empty integer list array. */ private static IndexIntegerList[] EMPTY_INTEGER_LISTS = new IndexIntegerList[0]; /** * The implementation of IndexSet which represents a mutable snapshot of * the indices stored in this set. */ private class SnapshotIndexSet implements IndexSet { /** * A unique id given to this index set. */ private long set_id; /** * A snapshot of the allocation table. */ private ByteBuffer buf; /** * The length of the allocation table. */ private int buf_length; /** * The list of IndexIntegerList objects that have been returned via the * 'getIndex(n)' method. */ private ArrayList integer_lists; /** * The sectors that are to be deleted when a garbage collection cycle * occurs. */ private IntegerVector deleted_sectors; /** * Constructor. */ public SnapshotIndexSet(byte[] in_buf, int length) { this.set_id = SET_ID_KEY; ++SET_ID_KEY; // Wrap around a new ByteBuffer but we DON'T make a copy of the byte // array itself. We must be careful that the underlying byte[] array // is protected from modifications (it's immutable). this.buf = new ByteBuffer(in_buf); this.buf_length = length; } /** * Returns all the lists that have been created by calls to * 'getIndex' */ public IndexIntegerList[] getAllLists() { if (integer_lists == null) { return EMPTY_INTEGER_LISTS; } else { return (IndexIntegerList[]) integer_lists.toArray( new IndexIntegerList[integer_lists.size()]); } } /** * Returns the ByteBuffer for the snapshot of this store when it was * created. */ private ByteBuffer getByteBuffer() { return buf; } /** * Returns the unique id associated with this index store. */ long getID() { return set_id; } /** * Returns true if this store has deleted items. */ boolean hasDeletedSectors() { return (deleted_sectors != null && deleted_sectors.size() > 0); } /** * Returns the sectors that were deleted when this store committed. */ IntegerVector allDeletedSectors() { return deleted_sectors; } /** * Creates a new buffer for an index store if it is committed. This * also sets up the 'deleted_sectors' list which is a list of records * deleted when this store commits. */ byte[] commit() { if (deleted_sectors != null) { throw new Error("'deleted_sectors' contains sectors to delete."); } deleted_sectors = new IntegerVector(); // Look for any indices that have changed in the IndexSet. IndexIntegerList[] lists = getAllLists(); // Make all the lists immutable. int sz = lists.length; for (int i = 0; i < sz; ++i) { lists[i].setImmutable(); } // The new buffer we are making ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); // The original snapshot buffer ByteBuffer snapshot_buf = getByteBuffer(); synchronized (snapshot_buf) { int buf_size = snapshot_buf.limit(); snapshot_buf.position(0); try { int index_num = 0; while (snapshot_buf.position() < buf_size) { // Read the information for this block byte list_type = snapshot_buf.getByte(); int blocks_count = snapshot_buf.getInt(); int stat_sector = snapshot_buf.getInt(); byte[] buf = new byte[blocks_count * ( 4 + 4 + 4 + 2 )]; snapshot_buf.get(buf, 0, buf.length); // System.out.println("blocks_count = " + blocks_count); // System.out.println("blocks_capacity = " + blocks_capacity); // Is this index in the list of tables that changed? IndexIntegerList list = null; for (int i = 0; i < sz && list == null; ++i) { if (lists[i].getIndexNumber() == index_num) { // Found this number list = lists[i]; } } // We found the list in the set if (list != null) { // The blocks that were deleted (if any). MappedListBlock[] deleted_blocks = list.getDeletedBlocks(); for (int n = 0; n < deleted_blocks.length; ++n) { // Put all deleted blocks on the list to GC MappedListBlock block = (MappedListBlock) deleted_blocks[n]; // Make sure the block is mapped to a sector int sector = block.getIndexSector(); if (sector != -1) { deleted_sectors.addInt(sector); } } // So we need to construct a new set. // The blocks in the list, MappedListBlock[] blocks = list.getAllBlocks(); blocks_count = blocks.length; dout.writeByte(list_type); dout.writeInt(blocks_count); dout.writeInt(stat_sector); // For each block for (int n = 0; n < blocks_count; ++n) { MappedListBlock block = blocks[n]; int bottom_int = 0; int top_int = 0; short block_size = (short) block.size(); if (block_size > 0) { bottom_int = block.bottomInt(); top_int = block.topInt(); } int block_sector = block.getIndexSector(); // Is the block new or was it changed? if (block_sector == -1 || block.hasChanged()) { // If this isn't -1 then put this sector on the list of // sectors to delete during GC. if (block_sector != -1) { deleted_sectors.addInt(block_sector); } // This is a new block or a block that's been changed // Write the block to the file system block_sector = block.writeToStore(); } // Write the sector dout.writeInt(bottom_int); dout.writeInt(top_int); dout.writeInt(block_sector); dout.writeShort(block_size); } } // We didn't find the list else { // So what we do is copy the contents of the buffer dout.writeByte(list_type); dout.writeInt(blocks_count); dout.writeInt(stat_sector); dout.write(buf, 0, buf.length); } ++index_num; } // Flush the stream (strictly not necessary). dout.flush(); } catch (IOException e) { debug.writeException(e); throw new Error(e.getMessage()); } } // synchronized (snapshot_buf) // The finished array byte[] arr = bout.toByteArray(); // return the new buffer. return arr; } // ---------- Implemented from IndexSet ---------- public IntegerListInterface getIndex(int n) { int original_n = n; // Synchronize 'buf' for safe access. synchronized(buf) { // Create if not exist. if (integer_lists == null) { integer_lists = new ArrayList(); } else { // Assertion: If the list already contains this value throw an error. for (int o = 0; o < integer_lists.size(); ++o) { if (((IndexIntegerList) integer_lists.get(o)).getIndexNumber() == original_n) { throw new Error( "IntegerListInterface already created for this n."); } } } buf.position(0); while (n > 0) { byte list_type = buf.getByte(); // Ignore int offset = buf.getInt(); int stat_sector = buf.getInt(); // Ignore buf.position(buf.position() + (offset * (4 + 4 + 4 + 2))); --n; } int list_type = buf.getByte(); int list_size = buf.getInt(); int list_stat_sector = buf.getInt(); // sector_list is an ordered list of all sectors of blocks in the index // list. // Read in each sector and construct a MappedListBlock for each one. MappedListBlock[] list_blocks = new MappedListBlock[list_size]; for (int i = 0; i < list_size; ++i) { int first_entry = buf.getInt(); int last_entry = buf.getInt(); int block_sector = buf.getInt(); short block_size = buf.getShort(); list_blocks[i] = new MappedListBlock( first_entry, last_entry, block_sector, block_size); } // Create and return the mapped index integer list. IndexIntegerList ilist = new IndexIntegerList(original_n, list_blocks); integer_lists.add(ilist); return ilist; } // synchronized(buf) } public void dispose() { // Dispose all the integer lists created by this object. synchronized (buf) { if (integer_lists != null) { for (int i = 0; i < integer_lists.size(); ++i) { IndexIntegerList ilist = (IndexIntegerList) integer_lists.get(i); ilist.dispose(); } integer_lists = null; } } buf = null; removeIndexSetFromList(this); } public void finalize() { if (buf != null) { debug.write(Lvl.WARNING, this, "IndexStore was not disposed!"); // We remove it manually from the index set list removeIndexSetFromList(this); // debug.writeException(DEBUG_CONSTRUCTOR); } } } /** * An IntegerListBlockInterface implementation that maps a block of a list * to an underlying file system representation. */ private final class MappedListBlock extends IntArrayListBlock { /** * The first entry in the block. */ private int first_entry; /** * The last entry in the block. */ private int last_entry; /** * The sector in the index file that this block can be found. */ private int index_sector; /** * Lock object. */ private Object lock = new Object(); /** * Set to true if the loaded block is mutable. */ private boolean mutable_block; /** * Constructor. */ public MappedListBlock(int first_int, int last_int, int mapped_sector, int size) { this.first_entry = first_int; this.last_entry = last_int; this.index_sector = mapped_sector; count = size; array = null; } /** * Creates an empty block. */ public MappedListBlock(int block_size_in) { super(block_size_in); this.index_sector = -1; } /** * Returns the sector in the file of this block. */ public int getIndexSector() { return index_sector; } /** * Writes this block to a new sector in the index file and updates the * information in this object accordingly. *

* Returns the sector the block was written to. */ public int writeToStore() throws IOException { // Convert the int[] array to a byte[] array. int block_count = block_size; byte[] arr = new byte[block_count * 4]; int p = 0; for (int i = 0; i < block_count; ++i) { int v = array[i]; ByteArrayUtil.setInt(v, arr, p); p += 4; } // Write the sector to the store synchronized (IndexStore.this) { index_sector = index_store.addSector(arr, 0, arr.length); } // Write this sector to the cache synchronized (sector_cache) { sector_cache.put(new Integer(index_sector), array); } // Once written, the block is invalidated lock = null; return index_sector; } /** * Overwritten from IntArrayListBlock, this returns the int[] array that * contains the contents of the block. In this implementation, we * determine if the array has been read from the index file. If it * hasn't we read it in, otherwise we use the version in memory. */ public int[] getArray(boolean immutable) { // We must synchronize this entire block because otherwise we could // return a partially loaded array. synchronized (lock) { if (array != null) { prepareMutate(immutable); return array; } // Pull this from a cache Object elem; synchronized (sector_cache) { elem = sector_cache.get(new Integer(index_sector)); } if (elem != null) { array = (int[]) elem; mutable_block = false; prepareMutate(immutable); return array; } int block_count = block_size; // Read the sector from the index file. array = new int[block_count]; synchronized (IndexStore.this) { try { array = index_store.getSectorAsIntArray(index_sector, array); } catch (IOException e) { debug.writeException(e); throw new Error("IO Error: " + e.getMessage()); } } // Put in the cache synchronized (sector_cache) { sector_cache.put(new Integer(index_sector), (int[]) array); } mutable_block = false; prepareMutate(immutable); return array; } } /** * Overwritten from IntArrayListBlock, returns the capacity of the block. */ public int getArrayLength() { return block_size; } /** * Makes the block mutable if it is immutable. We must be synchronized on * 'lock' before this method is called. */ private void prepareMutate(boolean immutable) { // If list is to be mutable if (!immutable && !mutable_block) { array = (int[]) array.clone(); mutable_block = true; } } /** * Overwritten from IntArrayListBlock, returns the last entry of the block. */ public int topInt() { if (count == 0) { throw new Error("No first int in block."); } synchronized (lock) { if (array == null) { return last_entry; } else { return array[count - 1]; } } } /** * Overwritten from IntArrayListBlock, returns the first entry of the * block. */ public int bottomInt() { if (count == 0) { throw new Error("No first int in block."); } synchronized (lock) { if (array == null) { return first_entry; } else { return array[0]; } } } } /** * The IntegerListInterface implementation that is used to represent a * mutable snapshop of the indices at a given point in time. */ private final class IndexIntegerList extends AbstractBlockIntegerList { /** * The number of the index in the store that this list represents. */ private int index_num; /** * Set to true when disposed. */ private boolean disposed = false; /** * The mapped elements that were deleted. */ private ArrayList deleted_blocks = new ArrayList(); /** * Constructs the list with the given set of blocks. */ public IndexIntegerList(int index_num, MappedListBlock[] blocks) { super(blocks); this.index_num = index_num; } /** * Creates a new block for the list. */ protected IntegerListBlockInterface newListBlock() { if (!disposed) { return new MappedListBlock(block_size); } throw new Error("Integer list has been disposed."); } /** * We must maintain a list of deleted blocks. */ protected void deleteListBlock(IntegerListBlockInterface list_block) { deleted_blocks.add(list_block); } /** * Returns the index number of this list. */ public int getIndexNumber() { return index_num; } /** * Returns the array of all MappedListBlock that are in this list. */ public MappedListBlock[] getAllBlocks() { return (MappedListBlock[]) block_list.toArray(new MappedListBlock[block_list.size()]); } /** * Returns the array of all MappedListBlock that were deleted from this * list. */ public MappedListBlock[] getDeletedBlocks() { return (MappedListBlock[]) deleted_blocks.toArray(new MappedListBlock[deleted_blocks.size()]); } public void dispose() { disposed = true; block_list = null; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/InsertSearch.java000066400000000000000000000265531330501023400251650ustar00rootroot00000000000000/** * com.mckoi.database.InsertSearch 14 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.IndexComparator; import com.mckoi.util.IntegerIterator; import java.util.Comparator; import java.util.Arrays; import java.io.*; /** * This is a SelectableScheme similar in some ways to the binary tree. When * a new row is added, it is inserted into a sorted list of rows. We can then * use this list to select out the sorted list of elements. *

* This requires less memory than the BinaryTree, however it is not as fast. * Even though, it should still perform fairly well on medium size data sets. * On large size data sets, insert and remove performance may suffer. *

* This object retains knowledge of all set elements unlike BlindSearch which * has no memory overhead. *

* Performance should be very comparable to BinaryTree for sets that aren't * altered much. * * @author Tobias Downer */ public final class InsertSearch extends CollatedBaseSearch { /** * The sorted list of rows in this set. This is sorted from min to max * (not sorted by row number - sorted by entity row value). */ private IntegerListInterface set_list; /** * If this is true, then this SelectableScheme records additional rid * information that can be used to very quickly identify whether a value is * greater, equal or less. */ boolean RECORD_UID; /** * The IndexComparator that we use to refer elements in the set to actual * data objects. */ private IndexComparator set_comparator; // ----- DEBUGGING ----- /** * If this is immutable, this stores the number of entries in 'set_list' * when this object was made. */ private int DEBUG_immutable_set_size; /** * The Constructor. */ public InsertSearch(TableDataSource table, int column) { super(table, column); set_list = new BlockIntegerList(); // The internal comparator that enables us to sort and lookup on the data // in this column. setupComparator(); } /** * Constructor sets the scheme with a pre-sorted list. The Vector 'vec' * should not be used again after this is called. 'vec' must be sorted from * low key to high key. */ public InsertSearch(TableDataSource table, int column, IntegerVector vec) { this(table, column); for (int i = 0; i < vec.size(); ++i) { set_list.add(vec.intAt(i)); } // NOTE: This must be removed in final, this is a post condition check to // make sure 'vec' is infact sorted checkSchemeSorted(); } /** * Constructor sets the scheme with a pre-sorted list. The list 'list' * should not be used again after this is called. 'list' must be sorted from * low key to high key. */ InsertSearch(TableDataSource table, int column, IntegerListInterface list) { this(table, column); this.set_list = list; // NOTE: This must be removed in final, this is a post condition check to // make sure 'vec' is infact sorted checkSchemeSorted(); } /** * Constructs this as a copy of the given, either mutable or immutable * copy. */ private InsertSearch(TableDataSource table, InsertSearch from, boolean immutable) { super(table, from.getColumn()); if (immutable) { setImmutable(); } if (immutable) { // Immutable is a shallow copy set_list = from.set_list; DEBUG_immutable_set_size = set_list.size(); } else { set_list = new BlockIntegerList(from.set_list); } // Do we generate lookup caches? RECORD_UID = from.RECORD_UID; // The internal comparator that enables us to sort and lookup on the data // in this column. setupComparator(); } /** * Sets the internal comparator that enables us to sort and lookup on the * data in this column. */ private void setupComparator() { set_comparator = new IndexComparator() { private int internalCompare(int index, TObject cell2) { TObject cell1 = getCellContents(index); return cell1.compareTo(cell2); } public int compare(int index, Object val) { return internalCompare(index, (TObject) val); } public int compare(int index1, int index2) { TObject cell = getCellContents(index2); return internalCompare(index1, cell); } }; } /** * Inserts a row into the list. This will always be thread safe, table * changes cause a write lock which prevents reads while we are writing to * the table. */ public void insert(int row) { if (isImmutable()) { throw new Error("Tried to change an immutable scheme."); } final TObject cell = getCellContents(row); set_list.insertSort(cell, row, set_comparator); } /** * Removes a row from the list. This will always be thread safe, table * changes cause a write lock which prevents reads while we are writing to * the table. */ public void remove(int row) { if (isImmutable()) { throw new Error("Tried to change an immutable scheme."); } TObject cell = getCellContents(row); int removed = set_list.removeSort(cell, row, set_comparator); if (removed != row) { throw new Error("Removed value different than row asked to remove. " + "To remove: " + row + " Removed: " + removed); } } /** * This needs to be called to access 'set_comparator' in thread busy * methods. Because creating a UID cache will modify set_comparator, we * need to make sure we access this variable safely. *

* NOTE: This is a throwback method for an idea I had to speed up the * 'select*' methods, but it proved unworkable. The reason being that * the UID only contains knowledge of relations between rows, and the * 'select*' methods find the relationship of a TObject in the column * set. */ private final IndexComparator safeSetComparator() { // synchronized (uid_lock) { return set_comparator; // } } /** * Reads the entire state of the scheme from the input stream. Throws an * exception if the scheme is not empty. */ public void readFrom(InputStream in) throws IOException { if (set_list.size() != 0) { throw new RuntimeException("Error reading scheme, already a set in the Scheme"); } DataInputStream din = new DataInputStream(in); int vec_size = din.readInt(); int row_count = getTable().getRowCount(); // Check we read in as many indices as there are rows in the table if (row_count != vec_size) { throw new IOException( "Different table row count to indices in scheme. " + "table=" + row_count + ", vec_size=" + vec_size); } for (int i = 0; i < vec_size; ++i) { int row = din.readInt(); if (row < 0) { // || row >= row_count) { set_list = new BlockIntegerList(); throw new IOException("Scheme contains out of table bounds index."); } set_list.add(row); } getSystem().stats().add(vec_size, "{session} InsertSearch.read_indices"); // NOTE: This must be removed in final, this is a post condition check to // make sure 'vec' is infact sorted checkSchemeSorted(); } /** * Writes the entire state of the scheme to the output stream. */ public void writeTo(OutputStream out) throws IOException { DataOutputStream dout = new DataOutputStream(out); int list_size = set_list.size(); dout.writeInt(list_size); IntegerIterator i = set_list.iterator(0, list_size - 1); while (i.hasNext()) { dout.writeInt(i.next()); } } /** * Returns an exact copy of this scheme including any optimization * information. The copied scheme is identical to the original but does not * share any parts. Modifying any part of the copied scheme will have no * effect on the original and vice versa. */ public SelectableScheme copy(TableDataSource table, boolean immutable) { // ASSERTION: If immutable, check the size of the current set is equal to // when the scheme was created. if (isImmutable()) { if (DEBUG_immutable_set_size != set_list.size()) { throw new Error("Assert failed: " + "Immutable set size is different from when created."); } } // We must create a new InsertSearch object and copy all the state // information from this object to the new one. return new InsertSearch(table, this, immutable); } /** * Disposes this scheme. */ public void dispose() { // Close and invalidate. set_list = null; set_comparator = null; } /** * Checks that the scheme is in sorted order. This is a debug check to * ensure we maintain a sorted index. * NOTE: This *MUST* be removed in a release version because it uses up * many cycles for each check. */ private void checkSchemeSorted() { // int list_size = set_list.size(); // DataCell last_cell = null; // for (int i = 0; i < list_size; ++i) { // int row = set_list.intAt(i); // DataCell this_cell = getCellContents(row); // if (last_cell != null) { // if (this_cell.compareTo(last_cell) < 0) { // throw new Error("checkSchemeSorted failed. Corrupt index."); // } // } // last_cell = this_cell; // } // if (Debug().isInterestedIn(Lvl.WARNING)) { // StringBuffer info_string = new StringBuffer(); // info_string.append("POST CONDITION CHECK - Checked index of size: "); // info_string.append(list_size); // info_string.append(". Sorted correctly (REMOVE THIS CHECK IN FINAL)"); // Debug().write(Lvl.WARNING, this, new String(info_string)); // } } // ---------- Implemented/Overwritten from CollatedBaseSearch ---------- protected int searchFirst(TObject val) { return set_list.searchFirst(val, safeSetComparator()); } protected int searchLast(TObject val) { return set_list.searchLast(val, safeSetComparator()); } protected int setSize() { return set_list.size(); } protected TObject firstInCollationOrder() { return getCellContents(set_list.get(0)); } protected TObject lastInCollationOrder() { return getCellContents(set_list.get(setSize() - 1)); } protected IntegerVector addRangeToSet(int start, int end, IntegerVector ivec) { if (ivec == null) { ivec = new IntegerVector((end - start) + 2); } IntegerIterator i = set_list.iterator(start, end); while (i.hasNext()) { ivec.addInt(i.next()); } return ivec; } /** * The select operations for this scheme. */ public IntegerVector selectAll() { IntegerVector ivec = new IntegerVector(set_list); return ivec; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/InternalFunctionFactory.java000066400000000000000000002067021330501023400274010ustar00rootroot00000000000000/** * com.mckoi.database.InternalFunctionFactory 19 Sep 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.lang.reflect.*; import java.math.BigDecimal; import java.util.ArrayList; import java.util.HashMap; import java.util.Date; import java.util.Calendar; import java.util.Comparator; import java.util.Arrays; import java.util.Locale; import java.text.*; import java.io.InputStream; import java.io.IOException; import com.mckoi.util.Cache; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.global.CastHelper; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.BlobAccessor; import com.mckoi.database.global.StringObject; import com.mckoi.database.global.StringAccessor; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.jdbc.SQLQuery; import com.mckoi.util.BigNumber; /** * A FunctionFactory for all internal SQL functions (including aggregate, * mathematical, string functions). This FunctionFactory is registered with * the DatabaseSystem during initialization. * * @author Tobias Downer */ final class InternalFunctionFactory extends FunctionFactory { /** * Registers the function classes with this factory. */ public void init() { // Parses a date/time/timestamp string addFunction("dateob", DateObFunction.class); addFunction("timeob", TimeObFunction.class); addFunction("timestampob", TimeStampObFunction.class); addFunction("dateformat", DateFormatFunction.class); // Casting functions addFunction("tonumber", ToNumberFunction.class); addFunction("sql_cast", SQLCastFunction.class); // String functions addFunction("lower", LowerFunction.class); addFunction("upper", UpperFunction.class); addFunction("concat", ConcatFunction.class); addFunction("length", LengthFunction.class); addFunction("substring", SubstringFunction.class); addFunction("sql_trim", SQLTrimFunction.class); addFunction("ltrim", LTrimFunction.class); addFunction("rtrim", RTrimFunction.class); // Security addFunction("user", UserFunction.class); addFunction("privgroups", PrivGroupsFunction.class); // Aggregate addFunction("count", CountFunction.class, FunctionInfo.AGGREGATE); addFunction("distinct_count", DistinctCountFunction.class, FunctionInfo.AGGREGATE); addFunction("avg", AvgFunction.class, FunctionInfo.AGGREGATE); addFunction("sum", SumFunction.class, FunctionInfo.AGGREGATE); addFunction("min", MinFunction.class, FunctionInfo.AGGREGATE); addFunction("max", MaxFunction.class, FunctionInfo.AGGREGATE); addFunction("aggor", AggOrFunction.class, FunctionInfo.AGGREGATE); // Mathematical addFunction("abs", AbsFunction.class); addFunction("sign", SignFunction.class); addFunction("mod", ModFunction.class); addFunction("round", RoundFunction.class); addFunction("pow", PowFunction.class); addFunction("sqrt", SqrtFunction.class); // Sequence operations addFunction("uniquekey", UniqueKeyFunction.class, FunctionInfo.STATE_BASED); addFunction("nextval", NextValFunction.class, FunctionInfo.STATE_BASED); addFunction("currval", CurrValFunction.class, FunctionInfo.STATE_BASED); addFunction("setval", SetValFunction.class, FunctionInfo.STATE_BASED); // Misc addFunction("hextobinary", HexToBinaryFunction.class); addFunction("binarytohex", BinaryToHexFunction.class); // Lists addFunction("least", LeastFunction.class); addFunction("greatest", GreatestFunction.class); // Branch addFunction("if", IfFunction.class); addFunction("coalesce", CoalesceFunction.class); // Object instantiation (Internal) addFunction("_new_JavaObject", JavaObjectInstantiation2.class); // Internal functions addFunction("i_frule_convert", ForeignRuleConvert.class); addFunction("i_sql_type", SQLTypeString.class); addFunction("i_view_data", ViewDataConvert.class); addFunction("i_privilege_string", PrivilegeString.class); } // ---------- The internal functions ---------- // ---------- Grouping functions ---------- private static class CountFunction extends AbstractFunction { public CountFunction(Expression[] params) { super("count", params); setAggregate(true); if (parameterCount() != 1) { throw new RuntimeException("'count' function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { if (group == null) { throw new RuntimeException( "'count' can only be used as an aggregate function."); } int size = group.size(); TObject result; // if, count(*) if (size == 0 || isGlob()) { result = TObject.intVal(size); } else { // Otherwise we need to count the number of non-null entries in the // columns list(s). int total_count = size; Expression exp = getParameter(0); for (int i = 0; i < size; ++i) { TObject val = exp.evaluate(null, group.getVariableResolver(i), context); if (val.isNull()) { --total_count; } } result = TObject.intVal(total_count); } return result; } } // -- private static class DistinctCountFunction extends AbstractFunction { public DistinctCountFunction(Expression[] params) { super("distinct_count", params); setAggregate(true); if (parameterCount() <= 0) { throw new RuntimeException( "'distinct_count' function must have at least one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // There's some issues with implementing this function. // For this function to be efficient, we need to have access to the // underlying Table object(s) so we can use table indexing to sort the // columns. Otherwise, we will need to keep in memory the group // contents so it can be sorted. Or alternatively (and probably worst // of all) don't store in memory, but use an expensive iterative search // for non-distinct rows. // // An iterative search will be terrible for large groups with mostly // distinct rows. But would be okay for large groups with few distinct // rows. if (group == null) { throw new RuntimeException( "'count' can only be used as an aggregate function."); } final int rows = group.size(); if (rows <= 1) { // If count of entries in group is 0 or 1 return TObject.intVal(rows); } // Make an array of all cells in the group that we are finding which // are distinct. final int cols = parameterCount(); final TObject[] group_r = new TObject[rows * cols]; int n = 0; for (int i = 0; i < rows; ++i) { VariableResolver vr = group.getVariableResolver(i); for (int p = 0; p < cols; ++p) { Expression exp = getParameter(p); group_r[n + p] = exp.evaluate(null, vr, context); } n += cols; } // A comparator that sorts this set, Comparator c = new Comparator() { public int compare(Object ob1, Object ob2) { int r1 = ((Integer) ob1).intValue(); int r2 = ((Integer) ob2).intValue(); // Compare row r1 with r2 int index1 = r1 * cols; int index2 = r2 * cols; for (int n = 0; n < cols; ++n) { int v = group_r[index1 + n].compareTo(group_r[index2 + n]); if (v != 0) { return v; } } // If we got here then rows must be equal. return 0; } }; // The list of indexes, Object[] list = new Object[rows]; for (int i = 0; i < rows; ++i) { list[i] = new Integer(i); } // Sort the list, Arrays.sort(list, c); // The count of distinct elements, (there will always be at least 1) int distinct_count = 1; for (int i = 1; i < rows; ++i) { int v = c.compare(list[i], list[i - 1]); // If v == 0 then entry is not distinct with the previous element in // the sorted list therefore the distinct counter is not incremented. if (v > 0) { // If current entry is greater than previous then we've found a // distinct entry. ++distinct_count; } else if (v < 0) { // The current element should never be less if list is sorted in // ascending order. throw new Error("Assertion failed - the distinct list does not " + "appear to be sorted."); } } // If the first entry in the list is NULL then subtract 1 from the // distinct count because we shouldn't be counting NULL entries. if (list.length > 0) { int first_entry = ((Integer) list[0]).intValue(); // Assume first is null boolean first_is_null = true; for (int m = 0; m < cols && first_is_null == true; ++m) { TObject val = group_r[(first_entry * cols) + m]; if (!val.isNull()) { // First isn't null first_is_null = false; } } // Is first NULL? if (first_is_null) { // decrease distinct count so we don't count the null entry. distinct_count = distinct_count - 1; } } return TObject.intVal(distinct_count); } } // -- private static class AvgFunction extends AbstractAggregateFunction { public AvgFunction(Expression[] params) { super("avg", params); } public TObject evalAggregate(GroupResolver group, QueryContext context, TObject ob1, TObject ob2) { // This will sum, if (ob1 != null) { if (ob2.isNull()) { return ob1; } else { if (!ob1.isNull()) { return ob1.operatorAdd(ob2); } else { return ob2; } } } return ob2; } public TObject postEvalAggregate(GroupResolver group, QueryContext context, TObject result) { // Find the average from the sum result if (result.isNull()) { return result; } return result.operatorDivide(TObject.intVal(group.size())); } } // -- private static class SumFunction extends AbstractAggregateFunction { public SumFunction(Expression[] params) { super("sum", params); } public TObject evalAggregate(GroupResolver group, QueryContext context, TObject ob1, TObject ob2) { // This will sum, if (ob1 != null) { if (ob2.isNull()) { return ob1; } else { if (!ob1.isNull()) { return ob1.operatorAdd(ob2); } else { return ob2; } } } return ob2; } } // -- private static class MinFunction extends AbstractAggregateFunction { public MinFunction(Expression[] params) { super("min", params); } public TObject evalAggregate(GroupResolver group, QueryContext context, TObject ob1, TObject ob2) { // This will find min, if (ob1 != null) { if (ob2.isNull()) { return ob1; } else { if (!ob1.isNull() && ob1.compareToNoNulls(ob2) < 0) { return ob1; } else { return ob2; } } } return ob2; } public TType returnTType(VariableResolver resolver, QueryContext context) { // Set to return the same type object as this variable. return getParameter(0).returnTType(resolver, context); } } // -- private static class MaxFunction extends AbstractAggregateFunction { public MaxFunction(Expression[] params) { super("max", params); } public TObject evalAggregate(GroupResolver group, QueryContext context, TObject ob1, TObject ob2) { // This will find max, if (ob1 != null) { if (ob2.isNull()) { return ob1; } else { if (!ob1.isNull() && ob1.compareToNoNulls(ob2) > 0) { return ob1; } else { return ob2; } } } return ob2; } public TType returnTType(VariableResolver resolver, QueryContext context) { // Set to return the same type object as this variable. return getParameter(0).returnTType(resolver, context); } } // -- private static class AggOrFunction extends AbstractAggregateFunction { public AggOrFunction(Expression[] params) { super("aggor", params); } public TObject evalAggregate(GroupResolver group, QueryContext context, TObject ob1, TObject ob2) { // Assuming bitmap numbers, this will find the result of or'ing all the // values in the aggregate set. if (ob1 != null) { if (ob2.isNull()) { return ob1; } else { if (!ob1.isNull()) { return ob1.operatorOr(ob2); } else { return ob2; } } } return ob2; } } // ---------- User functions ---------- // Returns the user name private static class UserFunction extends AbstractFunction { public UserFunction(Expression[] params) { super("user", params); if (parameterCount() > 0) { throw new RuntimeException("'user' function must have no arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { return TObject.stringVal(context.getUserName()); } public TType returnTType() { return TType.STRING_TYPE; } } // Returns the comma (",") deliminated priv groups the user belongs to. private static class PrivGroupsFunction extends AbstractFunction { public PrivGroupsFunction(Expression[] params) { super("privgroups", params); if (parameterCount() > 0) { throw new RuntimeException( "'privgroups' function must have no arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { throw new RuntimeException( "'PrivGroups' function currently not working."); } public TType returnTType() { return TType.STRING_TYPE; } } // ---------- String functions ---------- private static class LowerFunction extends AbstractFunction { public LowerFunction(Expression[] params) { super("lower", params); if (parameterCount() != 1) { throw new RuntimeException("Lower function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } return new TObject(ob.getTType(), ob.getObject().toString().toLowerCase()); } public TType returnTType() { return TType.STRING_TYPE; } } // -- private static class UpperFunction extends AbstractFunction { public UpperFunction(Expression[] params) { super("upper", params); if (parameterCount() != 1) { throw new RuntimeException("Upper function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } return new TObject(ob.getTType(), ob.getObject().toString().toUpperCase()); } public TType returnTType() { return TType.STRING_TYPE; } } // -- private static class ConcatFunction extends AbstractFunction { public ConcatFunction(Expression[] params) { super("concat", params); if (parameterCount() < 1) { throw new RuntimeException( "Concat function must have at least one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { StringBuffer cc = new StringBuffer(); Locale str_locale = null; int str_strength = 0; int str_decomposition = 0; for (int i = 0; i < parameterCount(); ++i) { Expression cur_parameter = getParameter(i); TObject ob = cur_parameter.evaluate(group, resolver, context); if (!ob.isNull()) { cc.append(ob.getObject().toString()); TType type = ob.getTType(); if (str_locale == null && type instanceof TStringType) { TStringType str_type = (TStringType) type; str_locale = str_type.getLocale(); str_strength = str_type.getStrength(); str_decomposition = str_type.getDecomposition(); } } else { return ob; } } // We inherit the locale from the first string parameter with a locale, // or use a default STRING_TYPE if no locale found. TType type; if (str_locale != null) { type = new TStringType(SQLTypes.VARCHAR, -1, str_locale, str_strength, str_decomposition); } else { type = TType.STRING_TYPE; } return new TObject(type, new String(cc)); } public TType returnTType(VariableResolver resolver, QueryContext context) { // Determine the locale of the first string parameter. Locale str_locale = null; int str_strength = 0; int str_decomposition = 0; for (int i = 0; i < parameterCount() && str_locale == null; ++i) { TType type = getParameter(i).returnTType(resolver, context); if (type instanceof TStringType) { TStringType str_type = (TStringType) type; str_locale = str_type.getLocale(); str_strength = str_type.getStrength(); str_decomposition = str_type.getDecomposition(); } } if (str_locale != null) { return new TStringType(SQLTypes.VARCHAR, -1, str_locale, str_strength, str_decomposition); } else { return TType.STRING_TYPE; } } } // -- private static class LengthFunction extends AbstractFunction { public LengthFunction(Expression[] params) { super("length", params); if (parameterCount() != 1) { throw new RuntimeException("Length function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } if (ob.getTType() instanceof TBinaryType) { BlobAccessor blob = (BlobAccessor) ob.getObject(); return TObject.intVal(blob.length()); } if (ob.getTType() instanceof TStringType) { StringAccessor str = (StringAccessor) ob.getObject(); return TObject.intVal(str.length()); } return TObject.intVal(ob.getObject().toString().length()); } } // -- private static class SubstringFunction extends AbstractFunction { public SubstringFunction(Expression[] params) { super("substring", params); if (parameterCount() < 1 || parameterCount() > 3) { throw new RuntimeException( "Substring function needs one to three arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } String str = ob.getObject().toString(); int pcount = parameterCount(); int str_length = str.length(); int arg1 = 1; int arg2 = str_length; if (pcount >= 2) { arg1 = getParameter(1).evaluate(group, resolver, context).toBigNumber().intValue(); } if (pcount >= 3) { arg2 = getParameter(2).evaluate(group, resolver, context).toBigNumber().intValue(); // arg2 = Operator.toNumber( // getParameter(2).evaluate(group, resolver, context)).intValue(); } // Make sure this call is safe for all lengths of string. if (arg1 < 1) { arg1 = 1; } if (arg1 > str_length) { return TObject.stringVal(""); } if (arg2 + arg1 > str_length) { arg2 = (str_length - arg1) + 1; } if (arg2 < 1) { return TObject.stringVal(""); } return TObject.stringVal(str.substring(arg1 - 1, (arg1 + arg2) - 1)); } public TType returnTType() { return TType.STRING_TYPE; } } // -- private static class SQLTrimFunction extends AbstractFunction { public SQLTrimFunction(Expression[] params) { super("sql_trim", params); // System.out.println(parameterCount()); if (parameterCount() != 3) { throw new RuntimeException( "SQL Trim function must have three parameters."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // The type of trim (leading, both, trailing) TObject ttype = getParameter(0).evaluate(group, resolver, context); // Characters to trim TObject cob = getParameter(1).evaluate(group, resolver, context); if (cob.isNull()) { return cob; } else if (ttype.isNull()) { return TObject.stringVal((StringObject) null); } String characters = cob.getObject().toString(); String ttype_str = ttype.getObject().toString(); // The content to trim. TObject ob = getParameter(2).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } String str = ob.getObject().toString(); int skip = characters.length(); // Do the trim, if (ttype_str.equals("leading") || ttype_str.equals("both")) { // Trim from the start. int scan = 0; while (scan < str.length() && str.indexOf(characters, scan) == scan) { scan += skip; } str = str.substring(Math.min(scan, str.length())); } if (ttype_str.equals("trailing") || ttype_str.equals("both")) { // Trim from the end. int scan = str.length() - 1; int i = str.lastIndexOf(characters, scan); while (scan >= 0 && i != -1 && i == scan - skip + 1) { scan -= skip; i = str.lastIndexOf(characters, scan); } str = str.substring(0, Math.max(0, scan + 1)); } return TObject.stringVal(str); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // -- private static class LTrimFunction extends AbstractFunction { public LTrimFunction(Expression[] params) { super("ltrim", params); if (parameterCount() != 1) { throw new RuntimeException( "ltrim function may only have 1 parameter."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } String str = ob.getObject().toString(); // Do the trim, // Trim from the start. int scan = 0; while (scan < str.length() && str.indexOf(' ', scan) == scan) { scan += 1; } str = str.substring(Math.min(scan, str.length())); return TObject.stringVal(str); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // -- private static class RTrimFunction extends AbstractFunction { public RTrimFunction(Expression[] params) { super("rtrim", params); if (parameterCount() != 1) { throw new RuntimeException( "rtrim function may only have 1 parameter."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } String str = ob.getObject().toString(); // Do the trim, // Trim from the end. int scan = str.length() - 1; int i = str.lastIndexOf(" ", scan); while (scan >= 0 && i != -1 && i == scan - 2) { scan -= 1; i = str.lastIndexOf(" ", scan); } str = str.substring(0, Math.max(0, scan + 1)); return TObject.stringVal(str); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // ---------- Mathematical functions ---------- private static class AbsFunction extends AbstractFunction { public AbsFunction(Expression[] params) { super("abs", params); if (parameterCount() != 1) { throw new RuntimeException("Abs function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } BigNumber num = ob.toBigNumber(); return TObject.bigNumberVal(num.abs()); } } // -- private static class SignFunction extends AbstractFunction { public SignFunction(Expression[] params) { super("sign", params); if (parameterCount() != 1) { throw new RuntimeException("Sign function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } BigNumber num = ob.toBigNumber(); return TObject.intVal(num.signum()); } } // -- private static class ModFunction extends AbstractFunction { public ModFunction(Expression[] params) { super("mod", params); if (parameterCount() != 2) { throw new RuntimeException("Mod function must have two arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob1 = getParameter(0).evaluate(group, resolver, context); TObject ob2 = getParameter(1).evaluate(group, resolver, context); if (ob1.isNull()) { return ob1; } else if (ob2.isNull()) { return ob2; } double v = ob1.toBigNumber().doubleValue(); double m = ob2.toBigNumber().doubleValue(); return TObject.doubleVal(v % m); } } // -- private static class RoundFunction extends AbstractFunction { public RoundFunction(Expression[] params) { super("round", params); if (parameterCount() < 1 || parameterCount() > 2) { throw new RuntimeException( "Round function must have one or two arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob1 = getParameter(0).evaluate(group, resolver, context); if (ob1.isNull()) { return ob1; } BigNumber v = ob1.toBigNumber(); int d = 0; if (parameterCount() == 2) { TObject ob2 = getParameter(1).evaluate(group, resolver, context); if (ob2.isNull()) { d = 0; } else { d = ob2.toBigNumber().intValue(); } } return TObject.bigNumberVal(v.setScale(d, BigDecimal.ROUND_HALF_UP)); } } // -- private static class PowFunction extends AbstractFunction { public PowFunction(Expression[] params) { super("pow", params); if (parameterCount() != 2) { throw new RuntimeException("Pow function must have two arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob1 = getParameter(0).evaluate(group, resolver, context); TObject ob2 = getParameter(1).evaluate(group, resolver, context); if (ob1.isNull()) { return ob1; } else if (ob2.isNull()) { return ob2; } double v = ob1.toBigNumber().doubleValue(); double w = ob2.toBigNumber().doubleValue(); return TObject.doubleVal(Math.pow(v, w)); } } // -- private static class SqrtFunction extends AbstractFunction { public SqrtFunction(Expression[] params) { super("sqrt", params); if (parameterCount() != 1) { throw new RuntimeException("Sqrt function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } return TObject.bigNumberVal(ob.toBigNumber().sqrt()); } } // -- private static class LeastFunction extends AbstractFunction { public LeastFunction(Expression[] params) { super("least", params); if (parameterCount() < 1) { throw new RuntimeException( "Least function must have at least 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject least = null; for (int i = 0; i < parameterCount(); ++i) { TObject ob = getParameter(i).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } if (least == null || ob.compareTo(least) < 0) { least = ob; } } return least; } public TType returnTType(VariableResolver resolver, QueryContext context) { return getParameter(0).returnTType(resolver, context); } } // -- private static class GreatestFunction extends AbstractFunction { public GreatestFunction(Expression[] params) { super("greatest", params); if (parameterCount() < 1) { throw new RuntimeException( "Greatest function must have at least 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject great = null; for (int i = 0; i < parameterCount(); ++i) { TObject ob = getParameter(i).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } if (great == null || ob.compareTo(great) > 0) { great = ob; } } return great; } public TType returnTType(VariableResolver resolver, QueryContext context) { return getParameter(0).returnTType(resolver, context); } } // -- private static class UniqueKeyFunction extends AbstractFunction { public UniqueKeyFunction(Expression[] params) { super("uniquekey", params); // The parameter is the name of the table you want to bring the unique // key in from. if (parameterCount() != 1) { throw new RuntimeException( "'uniquekey' function must have only 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { String str = getParameter(0).evaluate(group, resolver, context).getObject().toString(); long v = context.nextSequenceValue(str); return TObject.longVal(v); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.NUMERIC_TYPE; } } private static class NextValFunction extends AbstractFunction { public NextValFunction(Expression[] params) { super("nextval", params); // The parameter is the name of the table you want to bring the unique // key in from. if (parameterCount() != 1) { throw new RuntimeException( "'nextval' function must have only 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { String str = getParameter(0).evaluate(group, resolver, context).getObject().toString(); long v = context.nextSequenceValue(str); return TObject.longVal(v); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.NUMERIC_TYPE; } } private static class CurrValFunction extends AbstractFunction { public CurrValFunction(Expression[] params) { super("currval", params); // The parameter is the name of the table you want to bring the unique // key in from. if (parameterCount() != 1) { throw new RuntimeException( "'currval' function must have only 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { String str = getParameter(0).evaluate(group, resolver, context).getObject().toString(); long v = context.currentSequenceValue(str); return TObject.longVal(v); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.NUMERIC_TYPE; } } private static class SetValFunction extends AbstractFunction { public SetValFunction(Expression[] params) { super("setval", params); // The parameter is the name of the table you want to bring the unique // key in from. if (parameterCount() != 2) { throw new RuntimeException( "'setval' function must have 2 arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { String str = getParameter(0).evaluate(group, resolver, context).getObject().toString(); BigNumber num = getParameter(1).evaluate(group, resolver, context).toBigNumber(); long v = num.longValue(); context.setSequenceValue(str, v); return TObject.longVal(v); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.NUMERIC_TYPE; } } // -- private static class HexToBinaryFunction extends AbstractFunction { public HexToBinaryFunction(Expression[] params) { super("hextobinary", params); // One parameter - our hex string. if (parameterCount() != 1) { throw new RuntimeException( "'hextobinary' function must have only 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { String str = getParameter(0).evaluate(group, resolver, context).getObject().toString(); int str_len = str.length(); if (str_len == 0) { return new TObject(TType.BINARY_TYPE, new ByteLongObject(new byte[0])); } // We translate the string to a byte array, byte[] buf = new byte[(str_len + 1) / 2]; int index = 0; if (buf.length * 2 != str_len) { buf[0] = (byte) Character.digit(str.charAt(0), 16); ++index; } int v = 0; for (int i = index; i < str_len; i += 2) { v = (Character.digit(str.charAt(i), 16) << 4) | (Character.digit(str.charAt(i + 1), 16)); buf[index] = (byte) (v & 0x0FF); ++index; } return new TObject(TType.BINARY_TYPE, new ByteLongObject(buf)); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.BINARY_TYPE; } } // -- private static class BinaryToHexFunction extends AbstractFunction { final static char[] digits = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' }; public BinaryToHexFunction(Expression[] params) { super("binarytohex", params); // One parameter - our hex string. if (parameterCount() != 1) { throw new RuntimeException( "'binarytohex' function must have only 1 argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); if (ob.isNull()) { return ob; } else if (ob.getTType() instanceof TBinaryType) { StringBuffer buf = new StringBuffer(); BlobAccessor blob = (BlobAccessor) ob.getObject(); InputStream bin = blob.getInputStream(); try { int bval = bin.read(); while (bval != -1) { buf.append(digits[((bval >> 4) & 0x0F)]); buf.append(digits[(bval & 0x0F)]); bval = bin.read(); } } catch (IOException e) { e.printStackTrace(); throw new RuntimeException("IO Error: " + e.getMessage()); } // for (int i = 0; i < arr.length; ++i) { // buf.append(digits[((arr[i] >> 4) & 0x0F)]); // buf.append(digits[(arr[i] & 0x0F)]); // } return TObject.stringVal(buf.toString()); } else { throw new RuntimeException( "'binarytohex' parameter type is not a binary object."); } } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // -- static class SQLCastFunction extends AbstractFunction { private TType cast_to_type; public SQLCastFunction(Expression[] params) { super("sql_cast", params); // Two parameters - the value to cast and the type to cast to (encoded) if (parameterCount() != 2) { throw new RuntimeException( "'sql_cast' function must have only 2 arguments."); } // Get the encoded type and parse it into a TType object and cache // locally in this object. We expect that the second parameter of this // function is always constant. Expression exp = params[1]; if (exp.size() != 1) { throw new RuntimeException( "'sql_cast' function must have simple second parameter."); } Object vob = params[1].last(); if (vob instanceof TObject) { TObject ob = (TObject) vob; String encoded_type = ob.getObject().toString(); cast_to_type = TType.decodeString(encoded_type); } else { throw new RuntimeException( "'sql_cast' function must have simple second parameter."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject ob = getParameter(0).evaluate(group, resolver, context); // If types are the same then no cast is necessary and we return this // object. if (ob.getTType().getSQLType() == cast_to_type.getSQLType()) { return ob; } // Otherwise cast the object and return the new typed object. Object casted_ob = TType.castObjectToTType(ob.getObject(), cast_to_type); return new TObject(cast_to_type, casted_ob); } public TType returnTType(VariableResolver resolver, QueryContext context) { return cast_to_type; } } // -- static class DateObFunction extends AbstractFunction { private final static TType DATE_TYPE = new TDateType(SQLTypes.DATE); /** * The date format object that handles the conversion of Date objects to a * string readable representation of the given date. *

* NOTE: Due to bad design these objects are not thread-safe. */ private final static DateFormat date_format_sho; private final static DateFormat date_format_sql; private final static DateFormat date_format_med; private final static DateFormat date_format_lon; private final static DateFormat date_format_ful; static { date_format_med = DateFormat.getDateInstance(DateFormat.MEDIUM); date_format_sho = DateFormat.getDateInstance(DateFormat.SHORT); date_format_lon = DateFormat.getDateInstance(DateFormat.LONG); date_format_ful = DateFormat.getDateInstance(DateFormat.FULL); // The SQL date format date_format_sql = new SimpleDateFormat("yyyy-MM-dd"); } private static TObject dateVal(Date d) { return new TObject(DATE_TYPE, d); } public DateObFunction(Expression[] params) { super("dateob", params); if (parameterCount() > 1) { throw new RuntimeException( "'dateob' function must have only one or zero parameters."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // No parameters so return the current date. if (parameterCount() == 0) { return dateVal(new Date()); } TObject exp_res = getParameter(0).evaluate(group, resolver, context); // If expression resolves to 'null' then return current date if (exp_res.isNull()) { return dateVal(new Date()); } // If expression resolves to a BigDecimal, then treat as number of // seconds since midnight Jan 1st, 1970 else if (exp_res.getTType() instanceof TNumericType) { BigNumber num = (BigNumber) exp_res.getObject(); return dateVal(new Date(num.longValue())); } String date_str = exp_res.getObject().toString(); // We need to synchronize here unfortunately because the Java // DateFormat objects are not thread-safe. synchronized (date_format_sho) { // Try and parse date try { return dateVal(date_format_sql.parse(date_str)); } catch (ParseException e) {} try { return dateVal(date_format_sho.parse(date_str)); } catch (ParseException e) {} try { return dateVal(date_format_med.parse(date_str)); } catch (ParseException e) {} try { return dateVal(date_format_lon.parse(date_str)); } catch (ParseException e) {} try { return dateVal(date_format_ful.parse(date_str)); } catch (ParseException e) {} throw new RuntimeException( "Unable to parse date string '" + date_str + "'"); } } public TType returnTType(VariableResolver resolver, QueryContext context) { return DATE_TYPE; } } // -- static class TimeObFunction extends AbstractFunction { private final static TType TIME_TYPE = new TDateType(SQLTypes.TIME); public TimeObFunction(Expression[] params) { super("timeob", params); if (parameterCount() > 1) { throw new RuntimeException( "'timeob' function must have only one or zero parameters."); } } private TObject timeNow() { Calendar c = Calendar.getInstance(); c.setLenient(false); int hour = c.get(Calendar.HOUR_OF_DAY); int minute = c.get(Calendar.MINUTE); int second = c.get(Calendar.SECOND); int millisecond = c.get(Calendar.MILLISECOND); c.set(1970, 0, 1); return new TObject(TIME_TYPE, c.getTime()); } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // No parameters so return the current time. if (parameterCount() == 0) { return timeNow(); } TObject exp_res = getParameter(0).evaluate(group, resolver, context); // If expression resolves to 'null' then return current date if (exp_res.isNull()) { return timeNow(); } String date_str = exp_res.getObject().toString(); return new TObject(TIME_TYPE, CastHelper.toTime(date_str)); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TIME_TYPE; } } // -- static class TimeStampObFunction extends AbstractFunction { private final static TType TIMESTAMP_TYPE = new TDateType(SQLTypes.TIMESTAMP); public TimeStampObFunction(Expression[] params) { super("timestampob", params); if (parameterCount() > 1) { throw new RuntimeException( "'timestampob' function must have only one or zero parameters."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // No parameters so return the current time. if (parameterCount() == 0) { return new TObject(TIMESTAMP_TYPE, new Date()); } TObject exp_res = getParameter(0).evaluate(group, resolver, context); // If expression resolves to 'null' then return current date if (exp_res.isNull()) { return new TObject(TIMESTAMP_TYPE, new Date()); } String date_str = exp_res.getObject().toString(); return new TObject(TIMESTAMP_TYPE, CastHelper.toTimeStamp(date_str)); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TIMESTAMP_TYPE; } } // -- // A function that formats an input java.sql.Date object to the format // given using the java.text.SimpleDateFormat class. static class DateFormatFunction extends AbstractFunction { final static Cache formatter_cache = new Cache(127, 90, 10); public DateFormatFunction(Expression[] params) { super("dateformat", params); if (parameterCount() != 2) { throw new RuntimeException( "'dateformat' function must have exactly two parameters."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject datein = getParameter(0).evaluate(group, resolver, context); TObject format = getParameter(1).evaluate(group, resolver, context); // If expression resolves to 'null' then return null if (datein.isNull()) { return datein; } Date d; if (!(datein.getTType() instanceof TDateType)) { throw new RuntimeException( "Date to format must be DATE, TIME or TIMESTAMP"); } else { d = (Date) datein.getObject(); } String format_string = format.toString(); synchronized(formatter_cache) { SimpleDateFormat formatter = (SimpleDateFormat) formatter_cache.get(format_string); if (formatter == null) { formatter = new SimpleDateFormat(format_string); formatter_cache.put(format_string, formatter); } return TObject.stringVal(formatter.format(d)); } } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // -- // Casts the expression to a BigDecimal number. Useful in conjunction with // 'dateob' private static class ToNumberFunction extends AbstractFunction { public ToNumberFunction(Expression[] params) { super("tonumber", params); if (parameterCount() != 1) { throw new RuntimeException("TONUMBER function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // Casts the first parameter to a number return getParameter(0).evaluate(group, resolver, context).castTo(TType.NUMERIC_TYPE); } } // -- // Conditional - IF(a < 0, NULL, a) private static class IfFunction extends AbstractFunction { public IfFunction(Expression[] params) { super("if", params); if (parameterCount() != 3) { throw new RuntimeException( "IF function must have exactly three arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject res = getParameter(0).evaluate(group, resolver, context); if (res.getTType() instanceof TBooleanType) { // Does the result equal true? if (res.compareTo(TObject.booleanVal(true)) == 0) { // Resolved to true so evaluate the first argument return getParameter(1).evaluate(group, resolver, context); } else { // Otherwise result must evaluate to NULL or false, so evaluate // the second parameter return getParameter(2).evaluate(group, resolver, context); } } // Result was not a boolean so return null return TObject.nullVal(); } public TType returnTType(VariableResolver resolver, QueryContext context) { // It's impossible to know the return type of this function until runtime // because either comparator could be returned. We could assume that // both branch expressions result in the same type of object but this // currently is not enforced. // Returns type of first argument TType t1 = getParameter(1).returnTType(resolver, context); // This is a hack for null values. If the first parameter is null // then return the type of the second parameter which hopefully isn't // also null. if (t1 instanceof TNullType) { return getParameter(2).returnTType(resolver, context); } return t1; } } // -- // Coalesce - COALESCE(address2, CONCAT(city, ', ', state, ' ', zip)) private static class CoalesceFunction extends AbstractFunction { public CoalesceFunction(Expression[] params) { super("coalesce", params); if (parameterCount() < 1) { throw new RuntimeException( "COALESCE function must have at least 1 parameter."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { int count = parameterCount(); for (int i = 0; i < count - 1; ++i) { TObject res = getParameter(i).evaluate(group, resolver, context); if (!res.isNull()) { return res; } } return getParameter(count - 1).evaluate(group, resolver, context); } public TType returnTType(VariableResolver resolver, QueryContext context) { // It's impossible to know the return type of this function until runtime // because either comparator could be returned. We could assume that // both branch expressions result in the same type of object but this // currently is not enforced. // Go through each argument until we find the first parameter we can // deduce the class of. int count = parameterCount(); for (int i = 0; i < count; ++i) { TType t = getParameter(i).returnTType(resolver, context); if (!(t instanceof TNullType)) { return t; } } // Can't work it out so return null type return TType.NULL_TYPE; } } // -- // Instantiates a new java object. private static class JavaObjectInstantiation extends AbstractFunction { public JavaObjectInstantiation(Expression[] params) { super("_new_JavaObject", params); if (parameterCount() < 1) { throw new RuntimeException( "_new_JavaObject function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // Resolve the parameters... final int arg_len = parameterCount() - 1; Object[] args = new Object[arg_len]; for (int i = 0; i < args.length; ++i) { args[i] = getParameter(i + 1).evaluate(group, resolver, context).getObject(); } Object[] casted_args = new Object[arg_len]; try { String clazz = getParameter(0).evaluate(null, resolver, context).getObject().toString(); Class c = Class.forName(clazz); Constructor[] constructs = c.getConstructors(); // Search for the first constructor that we can use with the given // arguments. search_constructs: for (int i = 0; i < constructs.length; ++i) { Class[] construct_args = constructs[i].getParameterTypes(); if (construct_args.length == arg_len) { for (int n = 0; n < arg_len; ++n) { // If we are dealing with a primitive, if (construct_args[n].isPrimitive()) { String class_name = construct_args[n].getName(); // If the given argument is a number, if (args[n] instanceof Number) { Number num = (Number) args[n]; if (class_name.equals("byte")) { casted_args[n] = new Byte(num.byteValue()); } else if (class_name.equals("char")) { casted_args[n] = new Character((char) num.intValue()); } else if (class_name.equals("double")) { casted_args[n] = new Double(num.doubleValue()); } else if (class_name.equals("float")) { casted_args[n] = new Float(num.floatValue()); } else if (class_name.equals("int")) { casted_args[n] = new Integer(num.intValue()); } else if (class_name.equals("long")) { casted_args[n] = new Long(num.longValue()); } else if (class_name.equals("short")) { casted_args[n] = new Short(num.shortValue()); } else { // Can't cast the primitive type to a number so break, break search_constructs; } } // If we are a boolean, we can cast to primitive boolean else if (args[n] instanceof Boolean) { // If primitive type constructor arg is a boolean also if (class_name.equals("boolean")) { casted_args[n] = args[n]; } else { break search_constructs; } } // Otherwise we can't cast, else { break search_constructs; } } // Not a primitive type constructor arg, else { // PENDING: Allow string -> char conversion if (construct_args[n].isInstance(args[n])) { casted_args[n] = args[n]; } else { break search_constructs; } } } // for (int n = 0; n < arg_len; ++n) // If we get here, we have a match... Object ob = constructs[i].newInstance(casted_args); ByteLongObject serialized_ob = ObjectTranslator.serialize(ob); return new TObject(new TJavaObjectType(clazz), serialized_ob); } } throw new RuntimeException( "Unable to find a constructor for '" + clazz + "' that matches given arguments."); } catch (ClassNotFoundException e) { throw new RuntimeException("Class not found: " + e.getMessage()); } catch (InstantiationException e) { throw new RuntimeException("Instantiation Error: " + e.getMessage()); } catch (IllegalAccessException e) { throw new RuntimeException("Illegal Access Error: " + e.getMessage()); } catch (IllegalArgumentException e) { throw new RuntimeException( "Illegal Argument Error: " + e.getMessage()); } catch (InvocationTargetException e) { throw new RuntimeException( "Invocation Target Error: " + e.getMessage()); } } public TType returnTType(VariableResolver resolver, QueryContext context) { String clazz = getParameter(0).evaluate(null, resolver, context).getObject().toString(); return new TJavaObjectType(clazz); } } // Instantiates a new java object using Jim McBeath's parameter seach // algorithm. private static class JavaObjectInstantiation2 extends AbstractFunction { public JavaObjectInstantiation2(Expression[] params) { super("_new_JavaObject", params); if (parameterCount() < 1) { throw new RuntimeException( "_new_JavaObject function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // Resolve the parameters... final int arg_len = parameterCount() - 1; TObject[] args = new TObject[arg_len]; for (int i = 0; i < args.length; ++i) { args[i] = getParameter(i + 1).evaluate(group, resolver, context); } Caster.deserializeJavaObjects(args); try { // Get the class name of the object to be constructed String clazz = getParameter(0).evaluate(null, resolver, context).getObject().toString(); Class c = Class.forName(clazz); Constructor[] constructs = c.getConstructors(); Constructor bestConstructor = Caster.findBestConstructor(constructs, args); if (bestConstructor == null) { // Didn't find a match - build a list of class names of the // args so the user knows what we were looking for. String argTypes = Caster.getArgTypesString(args); throw new RuntimeException( "Unable to find a constructor for '" + clazz + "' that matches given arguments: " + argTypes); } Object[] casted_args = Caster.castArgsToConstructor(args, bestConstructor); // Call the constructor to create the java object Object ob = bestConstructor.newInstance(casted_args); ByteLongObject serialized_ob = ObjectTranslator.serialize(ob); return new TObject(new TJavaObjectType(clazz), serialized_ob); } catch (ClassNotFoundException e) { throw new RuntimeException("Class not found: " + e.getMessage()); } catch (InstantiationException e) { throw new RuntimeException("Instantiation Error: " + e.getMessage()); } catch (IllegalAccessException e) { throw new RuntimeException("Illegal Access Error: " + e.getMessage()); } catch (IllegalArgumentException e) { throw new RuntimeException( "Illegal Argument Error: " + e.getMessage()); } catch (InvocationTargetException e) { String msg = e.getMessage(); if (msg == null) { Throwable th = e.getTargetException(); if (th != null) { msg = th.getClass().getName() + ": " + th.getMessage(); } } throw new RuntimeException("Invocation Target Error: " + msg); } } public TType returnTType(VariableResolver resolver, QueryContext context) { String clazz = getParameter(0).evaluate(null, resolver, context).getObject().toString(); return new TJavaObjectType(clazz); } } // -- // Used in the 'getxxxKeys' methods in DatabaseMetaData to convert the // update delete rule of a foreign key to the JDBC short enum. private static class ForeignRuleConvert extends AbstractFunction { public ForeignRuleConvert(Expression[] params) { super("i_frule_convert", params); if (parameterCount() != 1) { throw new RuntimeException( "i_frule_convert function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // The parameter should be a variable reference that is resolved TObject ob = getParameter(0).evaluate(group, resolver, context); String str = null; if (!ob.isNull()) { str = ob.getObject().toString(); } int v; if (str == null || str.equals("") || str.equals("NO ACTION")) { v = java.sql.DatabaseMetaData.importedKeyNoAction; } else if (str.equals("CASCADE")) { v = java.sql.DatabaseMetaData.importedKeyCascade; } else if (str.equals("SET NULL")) { v = java.sql.DatabaseMetaData.importedKeySetNull; } else if (str.equals("SET DEFAULT")) { v = java.sql.DatabaseMetaData.importedKeySetDefault; } else if (str.equals("RESTRICT")) { v = java.sql.DatabaseMetaData.importedKeyRestrict; } else { throw new Error("Unrecognised foreign key rule: " + str); } // Return the correct enumeration return TObject.intVal(v); } } // -- // Used to form an SQL type string that describes the SQL type and any // size/scale information together with it. private static class SQLTypeString extends AbstractFunction { public SQLTypeString(Expression[] params) { super("i_sql_type", params); if (parameterCount() != 3) { throw new RuntimeException( "i_sql_type function must have three arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // The parameter should be a variable reference that is resolved TObject type_string = getParameter(0).evaluate(group, resolver, context); TObject type_size = getParameter(1).evaluate(group, resolver, context); TObject type_scale = getParameter(2).evaluate(group, resolver, context); StringBuffer result_str = new StringBuffer(); result_str.append(type_string.toString()); long size = -1; long scale = -1; if (!type_size.isNull()) { size = type_size.toBigNumber().longValue(); } if (!type_scale.isNull()) { scale = type_scale.toBigNumber().longValue(); } if (size != -1) { result_str.append('('); result_str.append(size); if (scale != -1) { result_str.append(','); result_str.append(scale); } result_str.append(')'); } return TObject.stringVal(new String(result_str)); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // -- // Used to convert view data in the system view table to forms that are // human understandable. Useful function for debugging or inspecting views. private static class ViewDataConvert extends AbstractFunction { public ViewDataConvert(Expression[] params) { super("i_view_data", params); if (parameterCount() != 2) { throw new RuntimeException( "i_sql_type function must have two arguments."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { // Get the parameters. The first is a string describing the operation. // The second is the binary data to process and output the information // for. TObject command = getParameter(0).evaluate(group, resolver, context); TObject data = getParameter(1).evaluate(group, resolver, context); String command_str = command.getObject().toString(); ByteLongObject blob = (ByteLongObject) data.getObject(); if (command_str.equalsIgnoreCase("referenced tables")) { ViewDef view_def = ViewDef.deserializeFromBlob(blob); QueryPlanNode node = view_def.getQueryPlanNode(); ArrayList touched_tables = node.discoverTableNames(new ArrayList()); StringBuffer buf = new StringBuffer(); int sz = touched_tables.size(); for (int i = 0; i < sz; ++i) { buf.append(touched_tables.get(i)); if (i < sz - 1) { buf.append(", "); } } return TObject.stringVal(new String(buf)); } else if (command_str.equalsIgnoreCase("plan dump")) { ViewDef view_def = ViewDef.deserializeFromBlob(blob); QueryPlanNode node = view_def.getQueryPlanNode(); StringBuffer buf = new StringBuffer(); node.debugString(0, buf); return TObject.stringVal(new String(buf)); } else if (command_str.equalsIgnoreCase("query string")) { SQLQuery query = SQLQuery.deserializeFromBlob(blob); return TObject.stringVal(query.toString()); } return TObject.nullVal(); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } // -- // Given a priv_bit number (from SYS_INFO.sUSRGrant), this will return a // text representation of the privilege. private static class PrivilegeString extends AbstractFunction { public PrivilegeString(Expression[] params) { super("i_privilege_string", params); if (parameterCount() != 1) { throw new RuntimeException( "i_privilege_string function must have one argument."); } } public TObject evaluate(GroupResolver group, VariableResolver resolver, QueryContext context) { TObject priv_bit_ob = getParameter(0).evaluate(group, resolver, context); int priv_bit = ((BigNumber) priv_bit_ob.getObject()).intValue(); Privileges privs = new Privileges(); privs = privs.add(priv_bit); return TObject.stringVal(privs.toString()); } public TType returnTType(VariableResolver resolver, QueryContext context) { return TType.STRING_TYPE; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/InternalJDBCHelper.java000066400000000000000000000141501330501023400261200ustar00rootroot00000000000000/** * com.mckoi.database.InternalJDBCHelper 16 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.sql.Connection; import java.sql.SQLException; import com.mckoi.database.jdbc.MConnection; import com.mckoi.database.jdbc.DatabaseCallBack; import com.mckoi.database.jdbc.DatabaseInterface; import com.mckoi.database.jdbcserver.AbstractJDBCDatabaseInterface; /** * Helper and convenience methods and classes for creating a JDBC interface * that has direct access to an open transaction of a DatabaseConnection. * This class allows us to provide JDBC access to stored procedures from * inside the engine. * * @author Tobias Downer */ class InternalJDBCHelper { /** * Returns a java.sql.Connection object that is bound to the given * DatabaseConnection object. Queries executed on the Connection alter * the currently open transaction. *

* Note: It is assumed that the DatabaseConnection is locked in exclusive * mode when a query is executed (eg. via the 'executeXXX' methods in * Statement). *

* Note: Auto-commit is DISABLED for the SQL connection and can not * be enabled. */ static Connection createJDBCConnection(User user, DatabaseConnection connection) { InternalDatabaseInterface db_interface = new InternalDatabaseInterface(user, connection); return new InternalConnection(connection, db_interface, 11, 4092000); } /** * Disposes the JDBC Connection object returned by the 'createJDBCConnection' * method. This should be called to free resources associated with the * connection object. *

* After this has completed the given Connection object in invalidated. */ static void disposeJDBCConnection(Connection jdbc_connection) throws SQLException { InternalConnection connection = (InternalConnection) jdbc_connection; // Dispose the connection. connection.internalClose(); } // ---------- Inner classes ---------- /** * A derived java.sql.Connection class from MConnection. This class disables * auto commit, and inherits case insensitivity from the parent * DatabaseConnection. *

* The decision to disable auto-commit was because this connection will * typically be used as a sub-process for executing a complete command. * Disabling auto-commit makes handling an internal connection more user * friendly. Also, toggling this flag in the DatabaseConnection in mid- * command is probably a very bad idea. */ private static class InternalConnection extends MConnection { /** * The DatabaseInterface for this connection. */ private InternalDatabaseInterface internal_db_interface; /** * Constructs the internal java.sql.Connection. */ public InternalConnection(DatabaseConnection db, InternalDatabaseInterface jdbc_interface, int cache_size, int max_size) { super("", jdbc_interface, cache_size, max_size); internal_db_interface = jdbc_interface; setCaseInsensitiveIdentifiers(db.isInCaseInsensitiveMode()); } /** * Returns the InternalDatabaseInterface that is used in this * connection. */ InternalDatabaseInterface getDBInterface() { return internal_db_interface; } /** * Overwritten from MConnection - auto-commit is disabled and can not be * enabled. */ public void setAutoCommit(boolean status) throws SQLException { if (status == true) { throw new SQLException( "Auto-commit can not be enabled for an internal connection."); } } /** * Overwritten from MConnection - auto-commit is disabled and can not be * enabled. */ public boolean getAutoCommit() throws SQLException { return false; } /** * Overwritten from MConnection - closing an internal connection is a * no-op. An InternalConnection should only close when the underlying * transaction closes. *

* To dispose an InternalConnection, use the static * 'disposeJDBCConnection' method. */ public void close() { // IDEA: Perhaps we should use this as a hint to clear some caches // and free up some memory. } } /** * An implementation of DatabaseInterface used to execute queries on the * DatabaseConnection and return results to the JDBC client. *

* This is a thin implementation of jdbcserver.AbstractJDBCDatabaseInterface. */ private static class InternalDatabaseInterface extends AbstractJDBCDatabaseInterface { /** * The internal connection to the database. */ private DatabaseConnection database; /** * Constructor. */ public InternalDatabaseInterface(User user, DatabaseConnection db) { super(db.getDatabase()); this.database = db; init(user, db); } // ---------- Implemented from DatabaseInterface ---------- public boolean login(String default_schema, String username, String password, DatabaseCallBack call_back) throws SQLException { // This should never be used for an internal connection. throw new SQLException( "'login' is not supported for InterfaceDatabaseInterface"); } public void dispose() throws SQLException { internalDispose(); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/InternalTableInfo.java000066400000000000000000000050741330501023400261260ustar00rootroot00000000000000/** * com.mckoi.database.InternalTableInfo 23 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; /** * A class that acts as a container for any system tables that are generated * from information inside the database engine. For example, the database * statistics table is an internal system table, as well as the table that * describes all database table information, etc. *

* This object acts as a container and factory for generating such tables. *

* Note that implementations of this object should be thread-safe and * immutable so we can create static global implementations. * * @author Tobias Downer */ interface InternalTableInfo { /** * Returns the number of internal table sources that this object is * maintaining. */ int getTableCount(); /** * Finds the index in this container of the given table name, otherwise * returns -1. */ int findTableName(TableName name); /** * Returns the name of the table at the given index in this container. */ TableName getTableName(int i); /** * Returns the DataTableDef object that describes the table at the given * index in this container. */ DataTableDef getDataTableDef(int i); /** * Returns true if this container contains a table with the given name. */ boolean containsTableName(TableName name); /** * Returns a String that describes the type of the table at the given index. */ String getTableType(int i); /** * This is the factory method for generating the internal table for the * given table in this container. This should return an implementation of * MutableTableDataSource that is used to represent the internal data being * modelled. *

* This method is allowed to throw an exception for table objects that aren't * backed by a MutableTableDataSource, such as a view. */ MutableTableDataSource createInternalTable(int index); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/JoinedTable.java000066400000000000000000000354521330501023400247510ustar00rootroot00000000000000/** * com.mckoi.database.JoinedTable 20 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; /** * A Table that represents the result of one or more other tables joined * together. VirtualTable and NaturallyJoinedTable are derived from this * class. * * @author Tobias Downer */ public abstract class JoinedTable extends Table { /** * The list of tables that make up the join. */ protected Table[] reference_list; /** * The schemes to describe the entity relation in the given column. */ protected SelectableScheme[] column_scheme; /** * These two arrays are lookup tables created in the constructor. They allow * for quick resolution of where a given column should be 'routed' to in * the ancestors. */ /** * Maps the column number in this table to the reference_list array to route * to. */ protected int[] column_table; /** * Gives a column filter to the given column to route correctly to the * ancestor. */ protected int[] column_filter; /** * The column that we are sorted against. This is an optimization set by * the 'optimisedPostSet' method. */ private int sorted_against_column = -1; /** * The DataTableDef object that describes the columns and name of this * table. */ private DataTableDef vt_table_def; /** * Incremented when the roots are locked. * See the 'lockRoot' and 'unlockRoot' methods. * NOTE: This should only ever be 1 or 0. */ private byte roots_locked; /** * Constructs the JoinedTable with the list of tables in the parent. */ JoinedTable(Table[] tables) { super(); init(tables); } /** * Constructs the JoinedTable with a single table. */ JoinedTable(Table table) { super(); Table[] tables = new Table[1]; tables[0] = table; init(tables); } /** * Protected constructor. */ protected JoinedTable() { super(); } /** * Helper function for initializing the variables in the joined table. */ protected void init(Table[] tables) { int table_count = tables.length; reference_list = tables; final int col_count = getColumnCount(); column_scheme = new SelectableScheme[col_count]; vt_table_def = new DataTableDef(); // Generate look up tables for column_table and column_filter information column_table = new int[col_count]; column_filter = new int[col_count]; int index = 0; for (int i = 0; i < reference_list.length; ++i) { Table cur_table = reference_list[i]; DataTableDef cur_table_def = cur_table.getDataTableDef(); int ref_col_count = cur_table.getColumnCount(); // For each column for (int n = 0; n < ref_col_count; ++n) { column_filter[index] = n; column_table[index] = i; ++index; // Add this column to the data table def of this table. vt_table_def.addVirtualColumn( new DataTableColumnDef(cur_table_def.columnAt(n))); } } // Final setup the DataTableDef for this virtual table vt_table_def.setTableName(new TableName(null, "#VIRTUAL TABLE#")); vt_table_def.setImmutable(); } /** * Returns a row reference list. This is an IntegerVector that represents a * 'reference' to the rows in our virtual table. *

* ISSUE: We should be able to optimise these types of things out. */ private IntegerVector calculateRowReferenceList() { int size = getRowCount(); IntegerVector all_list = new IntegerVector(size); for (int i = 0; i < size; ++i) { all_list.addInt(i); } return all_list; } /** * We simply pick the first table to resolve the Database object. */ public Database getDatabase() { return reference_list[0].getDatabase(); } /** * Returns the number of columns in the table. This simply returns the * column counts in the parent table(s). */ public int getColumnCount() { int column_count_sum = 0; for (int i = 0; i < reference_list.length; ++i) { column_count_sum += reference_list[i].getColumnCount(); } return column_count_sum; } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public int findFieldName(Variable v) { int col_index = 0; for (int i = 0; i < reference_list.length; ++i) { int col = reference_list[i].findFieldName(v); if (col != -1) { return col + col_index; } col_index += reference_list[i].getColumnCount(); } return -1; } /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public final Variable getResolvedVariable(int column) { Table parent_table = reference_list[column_table[column]]; return parent_table.getResolvedVariable(column_filter[column]); } /** * Returns the list of Table objects that represent this VirtualTable. */ protected final Table[] getReferenceTables() { return reference_list; } /** * This is an optimisation that should only be called _after_ a 'set' method * has been called. Because the 'select' operation returns a set that is * ordered by the given column, we can very easily generate a * SelectableScheme object that can handle this column. * So 'column' is the column in which this virtual table is naturally ordered * by. * NOTE: The internals of this method may be totally commented out and the * database will still operate correctly. However this greatly speeds up * situations when you perform multiple consequtive operations on the same * column. */ void optimisedPostSet(int column) { sorted_against_column = column; } /** * Returns a SelectableScheme for the given column in the given VirtualTable * row domain. This searches down through the tables ancestors until it * comes across a table with a SelectableScheme where the given column is * fully resolved. In most cases, this will be the root DataTable. */ SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { // First check if the given SelectableScheme is in the column_scheme array SelectableScheme scheme = column_scheme[column]; if (scheme != null) { if (table == this) { return scheme; } else { return scheme.getSubsetScheme(table, original_column); } } // If it isn't then we need to calculate it SelectableScheme ss; // Optimization: The table may be naturally ordered by a column. If it // is we don't try to generate an ordered set. if (sorted_against_column != -1 && sorted_against_column == column) { InsertSearch isop = new InsertSearch(this, column, calculateRowReferenceList()); isop.RECORD_UID = false; ss = isop; column_scheme[column] = ss; if (table != this) { ss = ss.getSubsetScheme(table, original_column); } } else { // Otherwise we must generate the ordered set from the information in // a parent index. Table parent_table = reference_list[column_table[column]]; ss = parent_table.getSelectableSchemeFor( column_filter[column], original_column, table); if (table == this) { column_scheme[column] = ss; } } return ss; } /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. */ void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { if (ancestor == this) { return; } else { int table_num = column_table[column]; Table parent_table = reference_list[table_num]; // Resolve the rows into the parents indices. (MANGLES row_set) resolveAllRowsForTableAt(row_set, table_num); parent_table.setToRowTableDomain(column_filter[column], row_set, ancestor); return; } } /** * Returns an object that contains fully resolved, one level only information * about the DataTable and the row indices of the data in this table. * This information can be used to construct a new VirtualTable. We need * to supply an empty RawTableInformation object. */ RawTableInformation resolveToRawTable(RawTableInformation info, IntegerVector row_set) { if (this instanceof RootTable) { info.add((RootTable) this, calculateRowReferenceList()); } else { for (int i = 0; i < reference_list.length; ++i) { IntegerVector new_row_set = new IntegerVector(row_set); // Resolve the rows into the parents indices. resolveAllRowsForTableAt(new_row_set, i); Table table = reference_list[i]; if (table instanceof RootTable) { info.add((RootTable) table, new_row_set); } else { ((JoinedTable) table).resolveToRawTable(info, new_row_set); } } } return info; } /** * Return the list of DataTable and row sets that make up the raw information * in this table. */ RawTableInformation resolveToRawTable(RawTableInformation info) { IntegerVector all_list = new IntegerVector(); int size = getRowCount(); for (int i = 0; i < size; ++i) { all_list.addInt(i); } return resolveToRawTable(info, all_list); } /** * Returns the DataTableDef object that describes the columns in this * table. For a VirtualTable, this object contains the union of * all the columns in the children in the order set. The name of a * virtual table is the concat of all the parent table names. The * schema is set to null. */ public DataTableDef getDataTableDef() { return vt_table_def; } /** * Returns an object that represents the information in the given cell * in the table. */ public TObject getCellContents(int column, int row) { int table_num = column_table[column]; Table parent_table = reference_list[table_num]; row = resolveRowForTableAt(row, table_num); return parent_table.getCellContents(column_filter[column], row); } /** * Returns an Enumeration of the rows in this table. * The Enumeration is a fast way of retrieving consequtive rows in the table. */ public RowEnumeration rowEnumeration() { return new SimpleRowEnumeration(getRowCount()); } /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ void addDataTableListener(DataTableListener listener) { for (int i = 0; i < reference_list.length; ++i) { reference_list[i].addDataTableListener(listener); } } /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ void removeDataTableListener(DataTableListener listener) { for (int i = 0; i < reference_list.length; ++i) { reference_list[i].removeDataTableListener(listener); } } /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). * NOTE: This is nothing to do with the 'LockingMechanism' object. */ public void lockRoot(int lock_key) { // For each table, recurse. roots_locked++; for (int i = 0; i < reference_list.length; ++i) { reference_list[i].lockRoot(lock_key); } } /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public void unlockRoot(int lock_key) { // For each table, recurse. roots_locked--; for (int i = 0; i < reference_list.length; ++i) { reference_list[i].unlockRoot(lock_key); } } /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public boolean hasRootsLocked() { return roots_locked != 0; } /** * Prints a graph of the table hierarchy to the stream. */ public void printGraph(java.io.PrintStream out, int indent) { for (int i = 0; i < indent; ++i) { out.print(' '); } out.println("JT[" + getClass()); for (int i = 0; i < reference_list.length; ++i) { reference_list[i].printGraph(out, indent + 2); } for (int i = 0; i < indent; ++i) { out.print(' '); } out.println("]"); } // ---------- Abstract methods ---------- /** * Given a row and a table index (to a parent reference table), this will * return the row index in the given parent table for the given row. */ protected abstract int resolveRowForTableAt(int row_number, int table_num); /** * Given an IntegerVector that represents a list of pointers to rows in this * table, this resolves the rows to row indexes in the given parent table. * This method changes the 'row_set' IntegerVector object. */ protected abstract void resolveAllRowsForTableAt(IntegerVector row_set, int table_num); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/JoiningSet.java000066400000000000000000000143171330501023400246370ustar00rootroot00000000000000/** * com.mckoi.database.JoiningSet 20 Sep 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; /** * Used in TableSet to describe how we naturally join the tables together. * This is used when the TableSet has evaluated the search condition and it * is required for any straggling tables to be naturally joined. In SQL, * these joining types are specified in the FROM clause. *

* For example,

 *   FROM table_a LEFT OUTER JOIN table_b ON ( table_a.id = table_b.id ), ...
 * 

* A ',' should donate an INNER_JOIN in an SQL FROM clause. * * @author Tobias Downer */ public final class JoiningSet implements java.io.Serializable, Cloneable { static final long serialVersionUID = -380871062550922402L; /** * Statics for Join Types. */ // Donates a standard inner join (in SQL, this is ',' in the FROM clause) public final static int INNER_JOIN = 1; // Left Outer Join, public final static int LEFT_OUTER_JOIN = 2; // Right Outer Join, public final static int RIGHT_OUTER_JOIN = 3; // Full Outer Join public final static int FULL_OUTER_JOIN = 4; /** * The list of tables we are joining together a JoinPart object that * represents how the tables are joined. */ private ArrayList join_set; /** * Constructs the JoiningSet. */ public JoiningSet() { join_set = new ArrayList(); } /** * Resolves the schema of tables in this joining set. This runs through * each table in the joining set and if the schema has not been set for the * table then it attempts to resolve it against the given DatabaseConnection * object. This would typically be called in the preparation of a statement. */ public void prepare(DatabaseConnection connection) { } /** * Adds a new table into the set being joined. The table name should be the * unique name that distinguishes this table in the TableSet. */ public void addTable(TableName table_name) { join_set.add(table_name); } /** * Hack, add a joining type to the previous entry from the end. This is * an artifact of how joins are parsed. */ public void addPreviousJoin(int type, Expression on_expression) { join_set.add(join_set.size() - 1, new JoinPart(type, on_expression)); } /** * Adds a joining type to the set, and an 'on' expression. */ public void addJoin(int type, Expression on_expression) { join_set.add(new JoinPart(type, on_expression)); } /** * Adds a joining type to the set with no 'on' expression. */ public void addJoin(int type) { join_set.add(new JoinPart(type)); } /** * Returns the number of tables that are in this set. */ public int getTableCount() { return (join_set.size() + 1) / 2; } /** * Returns the first table in the join set. */ public TableName getFirstTable() { return getTable(0); } /** * Returns table 'n' in the result set where table 0 is the first table in * the join set. */ public TableName getTable(int n) { return (TableName) join_set.get(n * 2); } /** * Sets the table at the given position in this joining set. */ private void setTable(int n, TableName table) { join_set.set(n * 2, table); } /** * Returns the type of join after table 'n' in the set. An example * of using this;

   *
   * String table1 = joins.getFirstTable();
   * for (int i = 0; i < joins.getTableCount() - 1; ++i) {
   *   int type = joins.getJoinType(i);
   *   String table2 = getTable(i + 1);
   *   // ... Join table1 and table2 ...
   *   table1 = table2;
   * }
   *
   * 
*/ public int getJoinType(int n) { return ((JoinPart) join_set.get((n * 2) + 1)).type; } /** * Returns the ON Expression for the type of join after table 'n' in the * set. */ public Expression getOnExpression(int n) { return ((JoinPart) join_set.get((n * 2) + 1)).on_expression; } /** * Performs a deep clone on this object. */ public Object clone() throws CloneNotSupportedException { JoiningSet v = (JoiningSet) super.clone(); int size = join_set.size(); ArrayList cloned_join_set = new ArrayList(size); v.join_set = cloned_join_set; for (int i = 0; i < size; ++i) { Object element = join_set.get(i); if (element instanceof TableName) { // immutable so leave alone } else if (element instanceof JoinPart) { element = ((JoinPart) element).clone(); } else { throw new CloneNotSupportedException(element.getClass().toString()); } cloned_join_set.add(element); } return v; } // ---------- Inner classes ---------- public static class JoinPart implements java.io.Serializable, Cloneable { static final long serialVersionUID = -1664565759669808084L; /** * The type of join. Either LEFT_OUTER_JOIN, * RIGHT_OUTER_JOIN, FULL_OUTER_JOIN, INNER_JOIN. */ int type; /** * The expression that we are joining on (eg. ON clause in SQL). If there * is no ON expression (such as in the case of natural joins) then this is * null. */ Expression on_expression; /** * Constructs the JoinPart. */ public JoinPart(int type, Expression on_expression) { this.type = type; this.on_expression = on_expression; } public JoinPart(int type) { this(type, null); } public Object clone() throws CloneNotSupportedException { JoinPart v = (JoinPart) super.clone(); if (on_expression != null) { v.on_expression = (Expression) on_expression.clone(); } return v; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Lock.java000066400000000000000000000106561330501023400234600ustar00rootroot00000000000000/** * com.mckoi.database.Lock 11 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; /** * This is a lock on a table in the LockingMechanism class. A new instance * of this class is created whenever a new lock for a table is made. A Lock * may be either a READ lock or a WRITE lock. A lock is within a LockingQueue * object. *

* @author Tobias Downer */ public final class Lock { /** * These statics are used to define whether the lock is a READ or WRITE * lock. */ public static final int READ = 0; public static final int WRITE = 1; /** * This stores the type of lock. It is either set to 'READ' or 'WRITE' */ private int type; /** * The table queue this lock is 'inside'. */ private LockingQueue queue; /** * This is set to true when the 'checkAccess' method is called on this * lock. */ private boolean was_checked; /** * The DebugLogger object that we log debug message to. */ private final DebugLogger debug; /** * The Constructor. As well as setting up the state of this object, it * also puts this lock into the table queue. */ Lock(int type, LockingQueue queue, DebugLogger logger) { this.debug = logger; this.type = type; this.queue = queue; was_checked = false; queue.addLock(this); } /** * Returns the type of lock. */ int getType() { return type; } /** * Returns the type of the lock as a string. */ String getTypeAsString() { int type = getType(); if (type == READ) { return "READ"; } else { return "WRITE"; } } /** * Returns the DataTable object this lock is locking */ DataTable getTable() { return queue.getTable(); } /** * Removes this lock from the queue. This is called when lock is released * from the table queues. * NOTE: This method does not need to be synchronized because synchronization * is handled by the 'LockingMechanism.unlockTables' method. */ void release() { queue.removeLock(this); if (!was_checked) { // Prints out a warning if a lock was released from the table queue but // never had 'checkAccess' called for it. String table_name = queue.getTable().getTableName().toString(); debug.write(Lvl.ERROR, this, "Lock on table '" + getTable().getTableName() + "' was released but never checked. " + toString()); debug.writeException(new RuntimeException("Lock Error Dump")); } // else { // // Notify table we released read/write lock // getTable().notifyReleaseRWLock(type); // } } /** * Checks the access for this lock. This asks the queue that contains * this lock if it is currently safe to access the table. If it is unsafe * for the table to be accessed, then it blocks until it is safe. Therefore, * when this method returns, it is safe to access the table for this lock. * The 'access_type' variable contains either 'READ' or 'WRITE' and is set * to the type of access that is currently being done to the table. If * access_type == WRITE then this.type must be WRITE. If access_type == * READ then this.type may be either READ or WRITE. *

* NOTE: After the first call to this method, following calls will not * block. */ void checkAccess(int access_type) { if (access_type == WRITE && this.type != WRITE) { throw new Error( "Access error on Lock: Tried to write to a non write lock."); } if (was_checked == false) { queue.checkAccess(this); was_checked = true; // // Notify table we are read/write locked // getTable().notifyAddRWLock(type); } } public String toString() { return "[Lock] type: " + getTypeAsString() + " was_checked: " + was_checked; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/LockHandle.java000066400000000000000000000101571330501023400245700ustar00rootroot00000000000000/** * com.mckoi.database.LockHandle 11 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; /** * This represents a handle for a series of locks that a query has over the * tables in a database. It is returned by the 'LockingMechanism' object * after the 'lockTables' method is used. *

* @author Tobias Downer */ public final class LockHandle { /** * The array of Lock objects that are being used in this locking process. */ private Lock[] lock_list; /** * A temporary index used during initialisation of object to add locks. */ private int lock_index; /** * Set when the 'unlockAll' method is called for the first time. */ private boolean unlocked; /** * The DebugLogger object that we log debug messages to. */ private final DebugLogger debug; /** * The Constructor. Takes the number of locks that will be put into this * handle. */ LockHandle(int lock_count, DebugLogger logger) { this.debug = logger; lock_list = new Lock[lock_count]; lock_index = 0; unlocked = false; } /** * Adds a new lock to the locks for this handle. * NOTE: This method does not need to be synchronized because synchronization * is handled by the 'LockingMechanism.lockTables' method. */ void addLock(Lock lock) { lock_list[lock_index] = lock; ++lock_index; } /** * Unlocks all the locks in this handle. This removes the locks from its * table queue. * NOTE: This method does not need to be synchronized because synchronization * is handled by the 'LockingMechanism.unlockTables' method. */ void unlockAll() { if (!unlocked) { for (int i = lock_list.length - 1; i >= 0; --i) { lock_list[i].release(); } unlocked = true; } } /** * Blocks until access to the given DataTable object is safe. It blocks * using either the read or read/write privs that it has been given. * Note that this method is public and is a method that is intended to be * used outside the locking mechanism. * We also provide an 'access_type' field which is set to the type of access * that is happening for this check. This is either Lock.READ or Lock.WRITE. * NOTE: Any call to this method after the first call should be * instantanious. */ public void checkAccess(DataTable table, int access_type) { for (int i = lock_list.length - 1; i >= 0; --i) { Lock l = lock_list[i]; if (l.getTable() == table) { l.checkAccess(access_type); return; } } throw new RuntimeException( "The given DataTable was not found in the lock list for this handle"); } /** * On garbage collection, this will call 'unlockAll' just in case the * program did not use the 'LockingMechanism.unlockTables' method in error. * This should ensure the database does not deadlock. This method is a * 'just in case' clause. */ public void finalize() { if (!unlocked) { unlockAll(); debug.write(Lvl.ERROR, this, "Finalize released a table lock - " + "This indicates that there is a serious error. Locks should " + "only have a very short life span. The 'unlockAll' method should " + "have been called before finalization. " + toString()); } } public String toString() { StringBuffer str = new StringBuffer("LockHandle: "); for (int i = 0; i < lock_list.length; ++i) { str.append(lock_list[i].toString()); } return new String(str); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/LockingMechanism.java000066400000000000000000000255031330501023400260000ustar00rootroot00000000000000/** * com.mckoi.database.LockingMechanism 09 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; import java.util.HashMap; /** * This class represents a model for locking the tables in a database during * any sequence of concurrent read/write accesses. *

* Every table in the database has an 'access_queue' that is generated the * first time the table is accessed. When a read or write request happens, * the thread and the type of access is put onto the top of the queue. When * the read/write access to the table has completed, the access is removed * from the queue. *

* An access to the table may be 'blocked' until other threads have completed * their access of the table. *

* A table that has a 'read lock' can not be altered until the table object * is released. A table that has a 'write lock' may not be read until the * table object is released. *

* The general rules are: * a) A read request can go ahead if there are no write request infront of * this request in the access queue. * b) A write request can go ahead if the write request is at the front of * the access queue. *

* This class requires some for-sight to which tables will be read/written * to. We must pass all tables being read/written in a single stage. This * implies a 2 stage process, the 1st determining which tables are being * accessed and the 2nd performing the actual operations. *

* Some operations such as creating and dropping and modifying the security * tables may require that no threads interfere with the database state while * the operation is occuring. This is handled through an 'Excluside Mode'. * When an object calls the locking mechanism to switch into exclusive mode, it * blocks until all access to the database are complete, then continues, * blocking all other threads until the exclusive mode is cancelled. *

* The locking system, in simple terms, ensures that any multiple read * operations will happen concurrently, however write operations block until * all operations are complete. *

* SYNCHRONIZATION: This method implements some important concurrent models * for ensuring that queries can never be corrupted. *

* @author Tobias Downer */ public final class LockingMechanism { /** * Class statics. These are used in the 'setMode' method to request either * shared or exclusive access to the database. */ public final static int SHARED_MODE = 1; public final static int EXCLUSIVE_MODE = 2; /** * This Hashtable is a mapping from a 'DataTable' to the 'LockingQueue' * object that is available for it. */ private HashMap queues_map = new HashMap(); /** * This boolean is set as soon as a Thread requests to go into 'exclusive * mode'. */ private boolean in_exclusive_mode = false; /** * This contains the number of Threads that have requested to go into * 'shared mode'. It is incremented each time 'setMode(SHARED_MODE)' is * called. */ private int shared_mode = 0; /** * The DebugLogger object that we log debug messages to. */ private final DebugLogger debug; /** * Constructor. */ public LockingMechanism(DebugLogger logger) { this.debug = logger; } /** * This is a helper function for returning the LockingQueue object for the * DataTable object. If there has not previously been a queue instantiated * for the table, it creates a new one and adds it to the Hashtable. *

* ISSUE: Not synchronized because we guarenteed to be called from a * synchronized method right? */ private LockingQueue getQueueFor(DataTable table) { LockingQueue queue = (LockingQueue) queues_map.get(table); // If queue not in hashtable then create a new one and put it into mapping if (queue == null) { queue = new LockingQueue(table); queues_map.put(table, queue); } return queue; } /** * Resets this object so it may be reused. This will release all internal * DataTable queues that are being kept. */ public void reset() { synchronized (this) { // Check we are in exclusive mode, if (!isInExclusiveMode()) { // This is currently just a warning but should be upgraded to a // full error. debug.writeException(new RuntimeException("Should not clear a " + "LockingMechanism that's not in exclusive mode.")); } queues_map.clear(); } } /** * This method locks the given tables for either reading or writing. It * puts the access locks in a queue for the given tables. This 'reserves' * the rights for this thread to access the table in that way. This * reservation can be used by the system to decide table accessability. *

* NOTE: ** IMPORTANT ** We must ensure that a single Thread can not create * multiple table locks. Otherwise it will cause situations where deadlock * can result. * NOTE: ** IMPORTANT ** We must ensure that once a lock has occured, it * is unlocked at a later time _no matter what happens_. Otherwise there * will be situations where deadlock can result. * NOTE: A LockHandle should not be given to another Thread. *

* SYNCHRONIZATION: This method is synchronized to ensure multiple additions * to the locking queues can happen without interference. */ public LockHandle lockTables(DataTable[] t_write, DataTable[] t_read) { // Set up the local constants. final int lock_count = t_read.length + t_write.length; final LockHandle handle = new LockHandle(lock_count, debug); synchronized (this) { Lock lock; LockingQueue queue; int queue_index; // Add read and write locks to cache and to the handle. for (int i = t_write.length - 1; i >= 0; --i) { DataTable to_write_lock = t_write[i]; queue = getQueueFor(to_write_lock); // slightly confusing: this will add lock to given table queue lock = new Lock(Lock.WRITE, queue, debug); handle.addLock(lock); debug.write(Lvl.INFORMATION, this, "[LockingMechanism] Locking for WRITE: " + to_write_lock.getTableName()); } for (int i = t_read.length - 1; i >= 0; --i) { DataTable to_read_lock = t_read[i]; queue = getQueueFor(to_read_lock); // slightly confusing: this will add lock to given table queue lock = new Lock(Lock.READ, queue, debug); handle.addLock(lock); debug.write(Lvl.INFORMATION, this, "[LockingMechanism] Locking for READ: " + to_read_lock.getTableName()); } } debug.write(Lvl.INFORMATION, this, "Locked Tables"); return handle; } /** * Unlocks the tables that were previously locked by the 'lockTables' method. * It is required that this method is called after the table references made * by a query are released (set to null or forgotten). This usually means * _after_ the result set has been written to the client. * SYNCHRONIZATION: This method is synchronized so concurrent unlocking * can not corrupt the queues. */ public void unlockTables(LockHandle handle) { synchronized (this) { handle.unlockAll(); } debug.write(Lvl.INFORMATION, this, "UnLocked Tables"); } /** * Returns true if we are locked into exclusive mode. */ public synchronized boolean isInExclusiveMode() { return in_exclusive_mode; } /** * This method _must_ be called before a threads initial access to a Database * object. It registers whether the preceding database accesses will be in * an 'exclusive mode' or a 'shared mode'. In shared mode, any number of * threads are able to access the database. In exclusive, the current thread * may be the only one that may access the database. * On requesting exclusive mode, it blocks until exclusive mode is available. * On requesting shared mode, it blocks only if currently in exclusive mode. * NOTE: 'exclusive mode' should be used only in system maintenance type * operations such as creating and dropping tables from the database. */ public synchronized void setMode(int mode) { // If currently in exclusive mode, block until not. while (in_exclusive_mode == true) { try { // System.out.println("Waiting because in exclusive lock."); wait(); // System.out.println("Finish: Waiting because in exclusive lock."); } catch (InterruptedException e) {} } if (mode == EXCLUSIVE_MODE) { // Set this thread to exclusive mode, and wait until all shared modes // have completed. in_exclusive_mode = true; while (shared_mode > 0) { try { // System.out.println("Waiting on exclusive lock: " + shared_mode); wait(); // System.out.println("Finish: Waiting on exclusive lock: " + shared_mode); } catch (InterruptedException e) {} } debug.write(Lvl.INFORMATION, this, "Locked into ** EXCLUSIVE MODE **"); } else if (mode == SHARED_MODE) { // Increase the threads counter that are in shared mode. ++shared_mode; debug.write(Lvl.INFORMATION, this, "Locked into SHARED MODE"); } else { throw new Error("Invalid mode"); } } /** * This must be called when the calls to a Database object have finished. * It 'finishes' the mode that the locking mechanism was set into by the * call to the 'setMode' method. * NOTE: ** IMPORTANT ** This method __MUST__ be guarenteed to be called some * time after the 'setMode' method. Otherwise deadlock. */ public synchronized void finishMode(int mode) { if (mode == EXCLUSIVE_MODE) { in_exclusive_mode = false; notifyAll(); debug.write(Lvl.INFORMATION, this, "UnLocked from ** EXCLUSIVE MODE **"); } else if (mode == SHARED_MODE) { --shared_mode; if (shared_mode == 0 && in_exclusive_mode) { notifyAll(); } else if (shared_mode < 0) { shared_mode = 0; notifyAll(); throw new RuntimeException("Too many 'finishMode(SHARED_MODE)' calls"); } debug.write(Lvl.INFORMATION, this, "UnLocked from SHARED MODE"); } else { throw new Error("Invalid mode"); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/LockingQueue.java000066400000000000000000000123471330501023400251620ustar00rootroot00000000000000/** * com.mckoi.database.LockingQueue 11 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; import java.util.ArrayList; /** * This class is used in the 'LockingMechanism' class. It maintains a queue * of threads that have locked the table this queue refers to. A lock means * the table is either pending to be accessed, or the data in the table is * being used. *

* A write lock in the queue stops any concurrently running threads from * accessing the tables. A read lock can go ahead only if there is no write * lock in the queue below it. *

* The rules are simple, and allow for reading of tables to happen concurrently * and writing to happen sequentually. Once a table is pending being written * to, it must be guarenteed that no thread can read the table while the write * is happening. *

* @author Tobias Downer */ final class LockingQueue { /** * The DataTable this queue is 'protecting' */ private DataTable parent_table; /** * This is the queue that stores the table locks. */ private ArrayList queue; /** * The Constructor. */ LockingQueue(DataTable table) { parent_table = table; queue = new ArrayList(); } /** * Returns the DataTable object the queue is 'attached' to. */ DataTable getTable() { return parent_table; } /** * Adds a lock to the queue. * NOTE: This method is thread safe since it is only called from the * LockingMechanism synchronized methods. * SYNCHRONIZED: This has to be synchronized because we don't want new locks * being added while a 'checkAccess' is happening. */ synchronized void addLock(Lock lock) { queue.add(lock); } /** * Removes a lock from the queue. This also does a 'notifyAll()' to kick any * threads that might be blocking in the 'checkAccess' method. * SYNCHRONIZED: This has to be synchronized because we don't want locks to * be removed while a 'checkAccess' is happening. */ synchronized void removeLock(Lock lock) { queue.remove(lock); // Notify the table that we have released a lock from it. lock.getTable().notifyReleaseRWLock(lock.getType()); // System.out.println("Removing lock: " + lock); notifyAll(); } /** * Looks at the queue and _blocks_ if the access to the table by the means * specified in the lock is allowed or not. * The rules for determining this are as follows: *

* 1) If the lock is a READ lock and there is a WRITE lock 'infront' of * this lock on the queue then block. * 2) If the lock is a WRITE lock and the lock isn't at the front of the * queue then block. * 3) Retry when a lock is released from the queue. */ void checkAccess(Lock lock) { boolean blocked; int index, i; Lock test_lock; synchronized (this) { // Error checking. The queue must contain the lock. if (!queue.contains(lock)) { throw new Error("Queue does not contain the given lock"); } // If 'READ' if (lock.getType() == Lock.READ) { do { blocked = false; index = queue.indexOf(lock); for (i = index - 1; i >= 0 && blocked == false; --i) { test_lock = (Lock) queue.get(i); if (test_lock.getType() == Lock.WRITE) { blocked = true; } } if (blocked == true) { getTable().Debug().write(Lvl.INFORMATION, this, "Blocking on read."); // System.out.println("READ BLOCK: " + queue); try { wait(); } catch (InterruptedException ignore) {} } } while (blocked == true); } // Else must be 'WRITE' else { do { blocked = false; index = queue.indexOf(lock); if (index != 0) { blocked = true; getTable().Debug().write(Lvl.INFORMATION, this, "Blocking on write."); // System.out.println("WRITE BLOCK: " + queue); try { wait(); } catch (InterruptedException ignore) {} } } while (blocked == true); } // Notify the Lock table that we've got a lock on it. lock.getTable().notifyAddRWLock(lock.getType()); } /* synchronized (this) */ } public synchronized String toString() { StringBuffer str = new StringBuffer("[LockingQueue]: ("); for (int i = 0; i < queue.size(); ++i) { str.append(queue.get(i)); str.append(", "); } str.append(")"); return new String(str); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/MasterTableDataSource.java000066400000000000000000002061711330501023400267450ustar00rootroot00000000000000/** * com.mckoi.database.MasterTableDataSource 19 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import java.io.*; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.IntegerIterator; import com.mckoi.util.IntegerVector; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.UserTerminal; import com.mckoi.util.Cache; import com.mckoi.debug.*; /** * A master table data source provides facilities for read/writing and * maintaining low level data in a table. It provides primitive table * operations such as retrieving a cell from a table, accessing the table's * DataTableDef, accessing indexes, and providing views of transactional * versions of the data. *

* Logically, a master table data source contains a dynamic number of rows and * a fixed number of columns. Each row has an associated state - either * DELETED, UNCOMMITTED, COMMITTED_ADDED or COMMITTED_REMOVED. A DELETED * row is a row that can be reused by a new row added to the table. *

* When a new row is added to the table, it is marked as UNCOMMITTED. It is * later tagged as COMMITTED_ADDED when the transaction that caused the row * addition is committed. If a row commits a row removal, the row is tagged * as COMMITTED_REMOVED and later the row garbage collector marks the row as * DELETED when there are no remaining references to the row. *

* A master table also maintains a list of indexes for the table. *

* How the master table logical structure is translated to a form that is * stored persistantly is implementation specific. This allows us flexibility * with different types of storage schemes. * * @author Tobias Downer */ abstract class MasterTableDataSource { // ---------- System information ---------- /** * The global TransactionSystem object that points to the global system * that this table source belongs to. */ private TransactionSystem system; /** * The StoreSystem implementation that represents the data persistence * layer. */ private StoreSystem store_system; // ---------- State information ---------- /** * An integer that uniquely identifies this data source within the * conglomerate. */ protected int table_id; /** * True if this table source is closed. */ protected boolean is_closed; // ---------- Root locking ---------- /** * The number of root locks this table data source has on it. *

* While a MasterTableDataSource has at least 1 root lock, it may not * reclaim deleted space in the data store. A root lock means that data * is still being pointed to in this file (even possibly committed deleted * data). */ private int root_lock; // ---------- Persistant data ---------- /** * A DataTableDef object that describes the table topology. This includes * the name and columns of the table. */ protected DataTableDef table_def; /** * A DataIndexSetDef object that describes the indexes on the table. */ protected DataIndexSetDef index_def; /** * A cached TableName for this data source. */ private TableName cached_table_name; /** * A multi-version representation of the table indices kept for this table * including the row list and the scheme indices. This contains the * transaction journals. */ protected MultiVersionTableIndices table_indices; /** * The list of RIDList objects for each column in this table. This is * a sorting optimization. */ protected RIDList[] column_rid_list; // ---------- Cached information ---------- /** * Set to false to disable cell caching. */ protected boolean DATA_CELL_CACHING = true; /** * A reference to the DataCellCache object. */ protected final DataCellCache cache; /** * The number of columns in this table. This is a cached optimization. */ protected int column_count; // --------- Parent information ---------- /** * The list of all open transactions managed by the parent conglomerate. * This is a thread safe object, and is updated whenever new transactions * are created, or transactions are closed. */ private OpenTransactionList open_transactions; // ---------- Row garbage collection ---------- /** * Manages scanning and deleting of rows marked as deleted within this * data source. */ protected MasterTableGarbageCollector garbage_collector; // ---------- Blob management ---------- /** * An abstracted reference to a BlobStore for managing blob references and * blob data. */ protected BlobStoreInterface blob_store_interface; // ---------- Stat keys ---------- /** * The keys we use for Database.stats() for information for this table. */ protected String root_lock_key; protected String total_hits_key; protected String file_hits_key; protected String delete_hits_key; protected String insert_hits_key; /** * Constructs the MasterTableDataSource. The argument is a reference * to an object that manages the list of open transactions in the * conglomerate. This object uses this information to determine how journal * entries are to be merged with the master indices. */ MasterTableDataSource(TransactionSystem system, StoreSystem store_system, OpenTransactionList open_transactions, BlobStoreInterface blob_store_interface) { this.system = system; this.store_system = store_system; this.open_transactions = open_transactions; this.blob_store_interface = blob_store_interface; this.garbage_collector = new MasterTableGarbageCollector(this); this.cache = system.getDataCellCache(); is_closed = true; if (DATA_CELL_CACHING) { DATA_CELL_CACHING = (cache != null); } } /** * Returns the TransactionSystem for this table. */ public final TransactionSystem getSystem() { return system; } /** * Returns the DebugLogger object that can be used to log debug messages. */ public final DebugLogger Debug() { return getSystem().Debug(); } /** * Returns the TableName of this table source. */ public TableName getTableName() { return getDataTableDef().getTableName(); } /** * Returns the name of this table source. */ public String getName() { return getDataTableDef().getName(); } /** * Returns the schema name of this table source. */ public String getSchema() { return getDataTableDef().getSchema(); } /** * Returns a cached TableName for this data source. */ synchronized TableName cachedTableName() { if (cached_table_name != null) { return cached_table_name; } cached_table_name = getTableName(); return cached_table_name; } /** * Updates the master records from the journal logs up to the given * 'commit_id'. This could be a fairly expensive operation if there are * a lot of modifications because each change could require a lookup * of records in the data source. *

* NOTE: It's extremely important that when this is called, there are no * transaction open that are using the merged journal. If there is, then * a transaction may be able to see changes in a table that were made * after the transaction started. *

* After this method is called, it's best to update the index file * with a call to 'synchronizeIndexFiles' */ synchronized void mergeJournalChanges(long commit_id) { boolean all_merged = table_indices.mergeJournalChanges(commit_id); // If all journal entries merged then schedule deleted row collection. if (all_merged && !isReadOnly()) { checkForCleanup(); } } /** * Returns a list of all MasterTableJournal objects that have been * successfully committed against this table that have an 'commit_id' that * is greater or equal to the given. *

* This is part of the conglomerate commit check phase and will be on a * commit_lock. */ synchronized MasterTableJournal[] findAllJournalsSince(long commit_id) { return table_indices.findAllJournalsSince(commit_id); } // ---------- Getters ---------- /** * Returns table_id - the unique identifier for this data source. */ int getTableID() { return table_id; } /** * Returns the DataTableDef object that represents the topology of this * table data source (name, columns, etc). Note that this information * can't be changed during the lifetime of a data source. */ DataTableDef getDataTableDef() { return table_def; } /** * Returns the DataIndexSetDef object that represents the indexes on this * table. */ DataIndexSetDef getDataIndexSetDef() { return index_def; } // ---------- Convenient statics ---------- /** * Creates a unique table name to give a file. This could be changed to suit * a particular OS's style of filesystem namespace. Or it could return some * arbitarily unique number. However, for debugging purposes it's often * a good idea to return a name that a user can recognise. *

* The 'table_id' is a guarenteed unique number between all tables. */ protected static String makeTableFileName(TransactionSystem system, int table_id, TableName table_name) { // NOTE: We may want to change this for different file systems. // For example DOS is not able to handle more than 8 characters // and is case insensitive. String tid = Integer.toString(table_id); int pad = 3 - tid.length(); StringBuffer buf = new StringBuffer(); for (int i = 0; i < pad; ++i) { buf.append('0'); } String str = table_name.toString().replace('.', '_'); // Go through each character and remove each non a-z,A-Z,0-9,_ character. // This ensure there are no strange characters in the file name that the // underlying OS may not like. StringBuffer osified_name = new StringBuffer(); int count = 0; for (int i = 0; i < str.length() || count > 64; ++i) { char c = str.charAt(i); if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_') { osified_name.append(c); ++count; } } return new String(buf) + tid + new String(osified_name); } // ---------- Abstract methods ---------- /** * Returns a string that uniquely identifies this table within the * conglomerate context. For example, the filename of the table. This * string can be used to open and initialize the table also. */ abstract String getSourceIdent(); /** * Sets the record type for the given record in the table and returns the * previous state of the record. This is used to change the state of a * row in the table. */ abstract int writeRecordType(int row_index, int row_state) throws IOException; /** * Reads the record state for the given record in the table. */ abstract int readRecordType(int row_index) throws IOException; /** * Returns true if the record with the given index is deleted from the table. * A deleted row can not be read. */ abstract boolean recordDeleted(int row_index) throws IOException; /** * Returns the raw count or rows in the table, including uncommited, * committed and deleted rows. This is basically the maximum number of rows * we can iterate through. */ abstract int rawRowCount() throws IOException; /** * Removes the row at the given index so that any resources associated with * the row may be immediately available to be recycled. */ abstract void internalDeleteRow(int row_index) throws IOException; /** * Creates and returns an IndexSet object that is used to create indices * for this table source. The IndexSet represents a snapshot of the * table and the given point in time. *

* NOTE: Not synchronized because we synchronize in the IndexStore object. */ abstract IndexSet createIndexSet(); /** * Commits changes made to an IndexSet returned by the 'createIndexSet' * method. This method also disposes the IndexSet so it is no longer * valid. */ abstract void commitIndexSet(IndexSet index_set); /** * Adds a new row to this table and returns an index that is used to * reference this row by the 'getCellContents' method. *

* Note that this method will not effect the master index or column schemes. * This is a low level mechanism for adding unreferenced data into a * conglomerate. The data is referenced by committing the change where it * eventually migrates into the master index and schemes. */ abstract int internalAddRow(RowData data) throws IOException; /** * Returns the cell contents of the given cell in the table. It is the * responsibility of the implemented method to perform caching as it deems * fit. Some representations may not require such extensive caching as * others. */ abstract TObject internalGetCellContents(int column, int row); /** * Atomically returns the current 'unique_id' value for this table. */ abstract long currentUniqueID(); /** * Atomically returns the next 'unique_id' value from this table. */ abstract long nextUniqueID(); /** * Sets the unique id for this store. This must only be used under * extraordinary circumstances, such as restoring from a backup, or * converting from one file to another. */ abstract void setUniqueID(long value); /** * Disposes of all in-memory resources associated with this table and * invalidates this object. If 'pending_drop' is true then the table is * to be disposed pending a call to 'drop'. If 'pending_drop' is true then * any persistant resources that are allocated may be freed. */ abstract void dispose(boolean pending_drop) throws IOException; /** * Disposes and drops this table. If the dispose failed for any reason, * it returns false, otherwise true. If the drop failed, it should be * retried at a later time. */ abstract boolean drop() throws IOException; /** * Called by the 'shutdown hook' on the conglomerate. This method should * block until the table can by put into a safe mode and then prevent any * further access to the object after it returns. It must operate very * quickly. */ abstract void shutdownHookCleanup(); /** * Returns true if a compact table is necessary. By default, we return * true however it is recommended this method is overwritten and the table * tested. */ boolean isWorthCompacting() { return true; } /** * Creates a SelectableScheme object for the given column in this table. * This reads the index from the index set (if there is one) then wraps * it around the selectable schema as appropriate. *

* NOTE: This needs to be deprecated in support of composite indexes. */ synchronized SelectableScheme createSelectableSchemeForColumn( IndexSet index_set, TableDataSource table, int column) { // What's the type of scheme for this column? DataTableColumnDef column_def = getDataTableDef().columnAt(column); // If the column isn't indexable then return a BlindSearch object if (!column_def.isIndexableType()) { return new BlindSearch(table, column); } String scheme_type = column_def.getIndexScheme(); if (scheme_type.equals("InsertSearch")) { // Search the TableIndexDef for this column DataIndexSetDef index_set_def = getDataIndexSetDef(); int index_i = index_set_def.findIndexForColumns( new String[] { column_def.getName() }); return createSelectableSchemeForIndex(index_set, table, index_i); } else if (scheme_type.equals("BlindSearch")) { return new BlindSearch(table, column); } else { throw new Error("Unknown scheme type"); } } /** * Creates a SelectableScheme object for the given index in the index set def * in this table. * This reads the index from the index set (if there is one) then wraps * it around the selectable schema as appropriate. */ synchronized SelectableScheme createSelectableSchemeForIndex( IndexSet index_set, TableDataSource table, int index_i) { // Get the IndexDef object DataIndexDef index_def = getDataIndexSetDef().indexAt(index_i); if (index_def.getType().equals("BLIST")) { String[] cols = index_def.getColumnNames(); DataTableDef table_def = getDataTableDef(); if (cols.length == 1) { // If a single column int col_index = table_def.findColumnName(cols[0]); // Get the index from the index set and set up the new InsertSearch // scheme. IntegerListInterface index_list = index_set.getIndex(index_def.getPointer()); InsertSearch iis = new InsertSearch(table, col_index, index_list); return iis; } else { throw new RuntimeException( "Multi-column indexes not supported at this time."); } } else { throw new RuntimeException("Unrecognised type."); } } /** * Creates a minimal TableDataSource object that represents this * MasterTableDataSource. It does not implement the 'getColumnScheme' * method. */ protected TableDataSource minimalTableDataSource( final IntegerListInterface master_index) { // Make a TableDataSource that represents the master table over this // index. return new TableDataSource() { public TransactionSystem getSystem() { return system; } public DataTableDef getDataTableDef() { return MasterTableDataSource.this.getDataTableDef(); } public int getRowCount() { // NOTE: Returns the number of rows in the master index before journal // entries have been made. return master_index.size(); } public RowEnumeration rowEnumeration() { // NOTE: Returns iterator across master index before journal entry // changes. // Get an iterator across the row list. final IntegerIterator iterator = master_index.iterator(); // Wrap it around a RowEnumeration object. return new RowEnumeration() { public boolean hasMoreRows() { return iterator.hasNext(); } public int nextRowIndex() { return iterator.next(); } }; } public SelectableScheme getColumnScheme(int column) { throw new Error("Not implemented."); } public TObject getCellContents(int column, int row) { return MasterTableDataSource.this.getCellContents(column, row); } }; } /** * Builds a complete index set on the data in this table. This must only be * called when either, a) we are under a commit lock, or b) there is a * guarentee that no concurrect access to the indexing information can happen * (such as when we are creating the table). *

* NOTE: We assume that the index information for this table is blank before * this method is called. */ synchronized void buildIndexes() throws IOException { IndexSet index_set = createIndexSet(); DataIndexSetDef index_set_def = getDataIndexSetDef(); final int row_count = rawRowCount(); // Master index is always on index position 0 IntegerListInterface master_index = index_set.getIndex(0); // First, update the master index for (int row_index = 0; row_index < row_count; ++row_index) { // If this row isn't deleted, set the index information for it, if (!recordDeleted(row_index)) { // First add to master index boolean inserted = master_index.uniqueInsertSort(row_index); if (!inserted) { throw new RuntimeException( "Assertion failed: Master index entry was duplicated."); } } } // Commit the master index commitIndexSet(index_set); // Now go ahead and build each index in this table int index_count = index_set_def.indexCount(); for (int i = 0; i < index_count; ++i) { buildIndex(i); } } /** * Builds the given index number (from the DataIndexSetDef). This must only * be called when either, a) we are under a commit lock, or b) there is a * guarentee that no concurrect access to the indexing information can happen * (such as when we are creating the table). *

* NOTE: We assume that the index number in this table is blank before this * method is called. */ synchronized void buildIndex(final int index_number) throws IOException { DataIndexSetDef index_set_def = getDataIndexSetDef(); IndexSet index_set = createIndexSet(); // Master index is always on index position 0 IntegerListInterface master_index = index_set.getIndex(0); // A minimal TableDataSource for constructing the indexes TableDataSource min_table_source = minimalTableDataSource(master_index); // Set up schemes for the index, SelectableScheme scheme = createSelectableSchemeForIndex(index_set, min_table_source, index_number); // Rebuild the entire index int row_count = rawRowCount(); for (int row_index = 0; row_index < row_count; ++row_index) { // If this row isn't deleted, set the index information for it, if (!recordDeleted(row_index)) { scheme.insert(row_index); } } // Commit the index commitIndexSet(index_set); } /** * Adds a new transaction modification to this master table source. This * information represents the information that was added/removed in the * table in this transaction. The IndexSet object represents the changed * index information to commit to this table. *

* It's guarenteed that 'commit_id' additions will be sequential. */ synchronized void commitTransactionChange(long commit_id, MasterTableJournal change, IndexSet index_set) { // ASSERT: Can't do this if source is read only. if (isReadOnly()) { throw new Error("Can't commit transaction journal, table is read only."); } change.setCommitID(commit_id); try { // Add this journal to the multi version table indices log table_indices.addTransactionJournal(change); // Write the modified index set to the index store // (Updates the index file) commitIndexSet(index_set); // Update the state of the committed added data to the file system. // (Updates data to the allocation file) // // ISSUE: This can add up to a lot of changes to the allocation file and // the Java runtime could potentially be terminated in the middle of // the update. If an interruption happens the allocation information // may be incorrectly flagged. The type of corruption this would // result in would be; // + From an 'update' the updated record may disappear. // + From a 'delete' the deleted record may not delete. // + From an 'insert' the inserted record may not insert. // // Note, the possibility of this type of corruption occuring has been // minimized as best as possible given the current architecture. // Also note that is not possible for a table file to become corrupted // beyond recovery from this issue. int size = change.entries(); for (int i = 0; i < size; ++i) { byte b = change.getCommand(i); int row_index = change.getRowIndex(i); // Was a row added or removed? if (MasterTableJournal.isAddCommand(b)) { // Record commit added int old_type = writeRecordType(row_index, 0x010); // Check the record was in an uncommitted state before we changed // it. if ((old_type & 0x0F0) != 0) { writeRecordType(row_index, old_type & 0x0F0); throw new Error("Record " + row_index + " of table " + this + " was not in an uncommitted state!"); } } else if (MasterTableJournal.isRemoveCommand(b)) { // Record commit removed int old_type = writeRecordType(row_index, 0x020); // Check the record was in an added state before we removed it. if ((old_type & 0x0F0) != 0x010) { writeRecordType(row_index, old_type & 0x0F0); // System.out.println(change); throw new Error("Record " + row_index + " of table " + this + " was not in an added state!"); } // Notify collector that this row has been marked as deleted. garbage_collector.markRowAsDeleted(row_index); } } } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } } /** * Rolls back a transaction change in this table source. Any rows added * to the table will be uncommited rows (type_key = 0). Those rows must be * marked as committed deleted. */ synchronized void rollbackTransactionChange(MasterTableJournal change) { // ASSERT: Can't do this is source is read only. if (isReadOnly()) { throw new Error( "Can't rollback transaction journal, table is read only."); } // Any rows added in the journal are marked as committed deleted and the // journal is then discarded. try { // Mark all rows in the data_store as appropriate to the changes. int size = change.entries(); for (int i = 0; i < size; ++i) { byte b = change.getCommand(i); int row_index = change.getRowIndex(i); // Make row as added or removed. if (MasterTableJournal.isAddCommand(b)) { // Record commit removed (we are rolling back remember). // int old_type = data_store.writeRecordType(row_index + 1, 0x020); int old_type = writeRecordType(row_index, 0x020); // Check the record was in an uncommitted state before we changed // it. if ((old_type & 0x0F0) != 0) { // data_store.writeRecordType(row_index + 1, old_type & 0x0F0); writeRecordType(row_index, old_type & 0x0F0); throw new Error("Record " + row_index + " was not in an " + "uncommitted state!"); } // Notify collector that this row has been marked as deleted. garbage_collector.markRowAsDeleted(row_index); } else if (MasterTableJournal.isRemoveCommand(b)) { // Any journal entries marked as TABLE_REMOVE are ignored because // we are rolling back. This means the row is not logically changed. } } // The journal entry is discarded, the indices do not need to be updated // to reflect this rollback. } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } } /** * Returns a MutableTableDataSource object that represents this data source * at the time the given transaction started. Any modifications to the * returned table are logged in the table journal. *

* This is a key method in this object because it allows us to get a data * source that represents the data in the table before any modifications * may have been committed. */ MutableTableDataSource createTableDataSourceAtCommit( SimpleTransaction transaction) { return createTableDataSourceAtCommit(transaction, new MasterTableJournal(getTableID())); } /** * Returns a MutableTableDataSource object that represents this data source * at the time the given transaction started, and also also makes any * modifications that are described by the journal in the table. *

* This method is useful for merging the changes made by a transaction into * a view of the table. */ MutableTableDataSource createTableDataSourceAtCommit( SimpleTransaction transaction, MasterTableJournal journal) { return new MMutableTableDataSource(transaction, journal); } // ---------- File IO level table modification ---------- /** * Sets up the DataIndexSetDef object from the information set in this object. * This will only setup a default IndexSetDef on the information in the * DataTableDef. */ protected synchronized void setupDataIndexSetDef() { // Create the initial DataIndexSetDef object. index_def = new DataIndexSetDef(table_def.getTableName()); for (int i = 0; i < table_def.columnCount(); ++i) { DataTableColumnDef col_def = table_def.columnAt(i); if (col_def.isIndexableType() && col_def.getIndexScheme().equals("InsertSearch")) { index_def.addDataIndexDef(new DataIndexDef("ANON-COLUMN:" + i, new String[] { col_def.getName() }, i + 1, "BLIST", false)); } } } /** * Sets up the DataTableDef. This would typically only ever be called from * the 'create' method. */ protected synchronized void setupDataTableDef(DataTableDef table_def) { // Check table_id isn't too large. if ((table_id & 0x0F0000000) != 0) { throw new Error("'table_id' exceeds maximum possible keys."); } this.table_def = table_def; // The name of the table to create, TableName table_name = table_def.getTableName(); // Create table indices table_indices = new MultiVersionTableIndices(getSystem(), table_name, table_def.columnCount()); // The column rid list cache column_rid_list = new RIDList[table_def.columnCount()]; // Setup the DataIndexSetDef setupDataIndexSetDef(); } /** * Loads the internal variables. */ protected synchronized void loadInternal() { // Set up the stat keys. String table_name = table_def.getName(); String schema_name = table_def.getSchema(); String n = table_name; if (schema_name.length() > 0) { n = schema_name + "." + table_name; } root_lock_key = "MasterTableDataSource.RootLocks." + n; total_hits_key = "MasterTableDataSource.Hits.Total." + n; file_hits_key = "MasterTableDataSource.Hits.File." + n; delete_hits_key = "MasterTableDataSource.Hits.Delete." + n; insert_hits_key = "MasterTableDataSource.Hits.Insert." + n; column_count = table_def.columnCount(); is_closed = false; } /** * Returns true if this table source is closed. */ synchronized boolean isClosed() { return is_closed; } /** * Returns true if the source is read only. */ boolean isReadOnly() { return system.readOnlyAccess(); } /** * Returns the StoreSystem object used to manage stores in the persistence * system. */ protected StoreSystem storeSystem() { return store_system; } /** * Adds a new row to this table and returns an index that is used to * reference this row by the 'getCellContents' method. *

* Note that this method will not effect the master index or column schemes. * This is a low level mechanism for adding unreferenced data into a * conglomerate. The data is referenced by committing the change where it * eventually migrates into the master index and schemes. */ int addRow(RowData data) throws IOException { int row_number; synchronized(this) { row_number = internalAddRow(data); } // synchronized // Update stats getSystem().stats().increment(insert_hits_key); // Return the record index of the new data in the table return row_number; } /** * Actually deletes the row from the table. This is a permanent removal of * the row from the table. After this method is called, the row can not * be retrieved again. This is generally only used by the row garbage * collector. *

* There is no checking in this method. */ private synchronized void doHardRowRemove(int row_index) throws IOException { // If we have a rid_list for any of the columns, then update the indexing // there, for (int i = 0; i < column_count; ++i) { RIDList rid_list = column_rid_list[i]; if (rid_list != null) { rid_list.removeRID(row_index); } } // Internally delete the row, internalDeleteRow(row_index); // Update stats system.stats().increment(delete_hits_key); } /** * Permanently removes a row from this table. This must only be used when * it is determined that a transaction does not reference this row, and * that an open result set does not reference this row. This will remove * the row permanently from the underlying file representation. Calls to * 'getCellContents(col, row)' where row is deleted will be undefined after * this method is called. *

* Note that the removed row must not be contained within the master index, * or be referenced by the index schemes, or be referenced in the * transaction modification list. */ synchronized void hardRemoveRow(final int record_index) throws IOException { // ASSERTION: We are not under a root lock. if (!isRootLocked()) { // int type_key = data_store.readRecordType(record_index + 1); int type_key = readRecordType(record_index); // Check this record is marked as committed removed. if ((type_key & 0x0F0) == 0x020) { doHardRowRemove(record_index); } else { throw new Error( "Row isn't marked as committed removed: " + record_index); } } else { throw new Error("Assertion failed: " + "Can't remove row, table is under a root lock."); } } /** * Checks the given record index, and if it's possible to reclaim it then * it does so here. Rows are only removed if they are marked as committed * removed. */ synchronized boolean hardCheckAndReclaimRow(final int record_index) throws IOException { // ASSERTION: We are not under a root lock. if (!isRootLocked()) { // Row already deleted? if (!recordDeleted(record_index)) { int type_key = readRecordType(record_index); // Check this record is marked as committed removed. if ((type_key & 0x0F0) == 0x020) { // System.out.println("[" + getName() + "] " + // "Hard Removing: " + record_index); doHardRowRemove(record_index); return true; } } return false; } else { throw new Error("Assertion failed: " + "Can't remove row, table is under a root lock."); } } /** * Returns the record type of the given record index. Returns a type that * is compatible with RawDiagnosticTable record type. */ synchronized int recordTypeInfo(int record_index) throws IOException { // ++record_index; if (recordDeleted(record_index)) { return RawDiagnosticTable.DELETED; } int type_key = readRecordType(record_index) & 0x0F0; if (type_key == 0) { return RawDiagnosticTable.UNCOMMITTED; } else if (type_key == 0x010) { return RawDiagnosticTable.COMMITTED_ADDED; } else if (type_key == 0x020) { return RawDiagnosticTable.COMMITTED_REMOVED; } return RawDiagnosticTable.RECORD_STATE_ERROR; } /** * This is called by the 'open' method. It performs a scan of the records * and marks any rows that are uncommitted as deleted. It also checks * that the row is not within the master index. */ protected synchronized void doOpeningScan() throws IOException { long in_time = System.currentTimeMillis(); // ASSERTION: No root locks and no pending transaction changes, // VERY important we assert there's no pending transactions. if (isRootLocked() || hasTransactionChangesPending()) { // This shouldn't happen if we are calling from 'open'. throw new RuntimeException( "Odd, we are root locked or have pending journal changes."); } // This is pointless if we are in read only mode. if (!isReadOnly()) { // A journal of index changes during this scan... MasterTableJournal journal = new MasterTableJournal(); // Get the master index of rows in this table IndexSet index_set = createIndexSet(); IntegerListInterface master_index = index_set.getIndex(0); // NOTE: We assume the index information is correct and that the // allocation information is potentially bad. int row_count = rawRowCount(); for (int i = 0 ; i < row_count; ++i) { // Is this record marked as deleted? if (!recordDeleted(i)) { // Get the type flags for this record. int type = recordTypeInfo(i); // Check if this record is marked as committed removed, or is an // uncommitted record. if (type == RawDiagnosticTable.COMMITTED_REMOVED || type == RawDiagnosticTable.UNCOMMITTED) { // Check it's not in the master index... if (!master_index.contains(i)) { // Delete it. doHardRowRemove(i); } else { Debug().write(Lvl.ERROR, this, "Inconsistant: Row is indexed but marked as " + "removed or uncommitted."); Debug().write(Lvl.ERROR, this, "Row: " + i + " Type: " + type + " Table: " + getTableName()); // Mark the row as committed added because it is in the index. writeRecordType(i, 0x010); } } else { // Must be committed added. Check it's indexed. if (!master_index.contains(i)) { // Not indexed, so data is inconsistant. Debug().write(Lvl.ERROR, this, "Inconsistant: Row committed added but not in master index."); Debug().write(Lvl.ERROR, this, "Row: " + i + " Type: " + type + " Table: " + getTableName()); // Mark the row as committed removed because it is not in the // index. writeRecordType(i, 0x020); } } } else { // if deleted // Check this record isn't in the master index. if (master_index.contains(i)) { // It's in the master index which is wrong! We should remake the // indices. Debug().write(Lvl.ERROR, this, "Inconsistant: Row is removed but in index."); Debug().write(Lvl.ERROR, this, "Row: " + i + " Table: " + getTableName()); // Mark the row as committed added because it is in the index. writeRecordType(i, 0x010); } } } // for (int i = 0 ; i < row_count; ++i) // Dispose the index set index_set.dispose(); } long bench_time = System.currentTimeMillis() - in_time; if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Opening scan for " + toString() + " (" + getTableName() + ") took " + bench_time + "ms."); } } /** * Returns an implementation of RawDiagnosticTable that we can use to * diagnose problems with the data in this source. */ RawDiagnosticTable getRawDiagnosticTable() { return new MRawDiagnosticTable(); } /** * Returns the cell contents of the given cell in the table. This will * look up the cell in the file if it can't be found in the cell cache. This * method is undefined if row has been removed or was not returned by * the 'addRow' method. */ TObject getCellContents(int column, int row) { if (row < 0) { throw new Error("'row' is < 0"); } return internalGetCellContents(column, row); } /** * Grabs a root lock on this table. *

* While a MasterTableDataSource has at least 1 root lock, it may not * reclaim deleted space in the data store. A root lock means that data * is still being pointed to in this file (even possibly committed deleted * data). */ synchronized void addRootLock() { system.stats().increment(root_lock_key); ++root_lock; } /** * Removes a root lock from this table. *

* While a MasterTableDataSource has at least 1 root lock, it may not * reclaim deleted space in the data store. A root lock means that data * is still being pointed to in this file (even possibly committed deleted * data). */ synchronized void removeRootLock() { if (!is_closed) { system.stats().decrement(root_lock_key); if (root_lock == 0) { throw new Error("Too many root locks removed!"); } --root_lock; // If the last lock is removed, schedule a possible collection. if (root_lock == 0) { checkForCleanup(); } } } /** * Returns true if the table is currently under a root lock (has 1 or more * root locks on it). */ synchronized boolean isRootLocked() { return root_lock > 0; } /** * Clears all root locks on the table. Should only be used during cleanup * of the table and will by definition invalidate the table. */ protected synchronized void clearAllRootLocks() { root_lock = 0; } /** * Checks to determine if it is safe to clean up any resources in the * table, and if it is safe to do so, the space is reclaimed. */ abstract void checkForCleanup(); synchronized String transactionChangeString() { return table_indices.transactionChangeString(); } /** * Returns true if this table has any journal modifications that have not * yet been incorporated into master index. */ synchronized boolean hasTransactionChangesPending() { return table_indices.hasTransactionChangesPending(); } // ---------- Inner classes ---------- /** * A RawDiagnosticTable implementation that provides direct access to the * root data of this table source bypassing any indexing schemes. This * interface allows for the inspection and repair of data files. */ private final class MRawDiagnosticTable implements RawDiagnosticTable { // ---------- Implemented from RawDiagnosticTable ----------- public int physicalRecordCount() { try { return rawRowCount(); } catch (IOException e) { throw new Error(e.getMessage()); } } public DataTableDef getDataTableDef() { return MasterTableDataSource.this.getDataTableDef(); } public int recordState(int record_index) { try { return recordTypeInfo(record_index); } catch (IOException e) { throw new Error(e.getMessage()); } } public int recordSize(int record_index) { return -1; } public TObject getCellContents(int column, int record_index) { return MasterTableDataSource.this.getCellContents(column, record_index); } public String recordMiscInformation(int record_index) { return null; } } /** * A MutableTableDataSource object as returned by the * 'createTableDataSourceAtCommit' method. *

* NOTE: This object is NOT thread-safe and it is assumed any use of this * object will be thread exclusive. This is okay because multiple * instances of this object can be created on the same MasterTableDataSource * if multi-thread access to a MasterTableDataSource is desirable. */ private final class MMutableTableDataSource implements MutableTableDataSource { /** * The Transaction object that this MutableTableDataSource was * generated from. This reference should be used only to query * database constraint information. */ private SimpleTransaction transaction; /** * True if the transaction is read-only. */ private boolean tran_read_only; /** * The name of this table. */ private TableName table_name; /** * The 'recovery point' to which the row index in this table source has * rebuilt to. */ private int row_list_rebuild; /** * The index that represents the rows that are within this * table data source within this transaction. */ private IntegerListInterface row_list; /** * The 'recovery point' to which the schemes in this table source have * rebuilt to. */ private int[] scheme_rebuilds; /** * The IndexSet for this mutable table source. */ private IndexSet index_set; /** * The SelectableScheme array that represents the schemes for the * columns within this transaction. */ private SelectableScheme[] column_schemes; /** * A journal of changes to this source since it was created. */ private MasterTableJournal table_journal; /** * The last time any changes to the journal were check for referential * integrity violations. */ private int last_entry_ri_check; /** * Constructs the data source. */ public MMutableTableDataSource(SimpleTransaction transaction, MasterTableJournal journal) { this.transaction = transaction; this.index_set = transaction.getIndexSetForTable(MasterTableDataSource.this); int col_count = getDataTableDef().columnCount(); this.table_name = getDataTableDef().getTableName(); this.tran_read_only = transaction.isReadOnly(); row_list_rebuild = 0; scheme_rebuilds = new int[col_count]; column_schemes = new SelectableScheme[col_count]; table_journal = journal; last_entry_ri_check = table_journal.entries(); } /** * Executes an update referential action. If the update action is * "NO ACTION", and the constraint is INITIALLY_IMMEDIATE, and the new key * doesn't exist in the referral table, an exception is thrown. */ private void executeUpdateReferentialAction( Transaction.ColumnGroupReference constraint, TObject[] original_key, TObject[] new_key, QueryContext context) { final String update_rule = constraint.update_rule; if (update_rule.equals("NO ACTION") && constraint.deferred != Transaction.INITIALLY_IMMEDIATE) { // Constraint check is deferred return; } // So either update rule is not NO ACTION, or if it is we are initially // immediate. MutableTableDataSource key_table = transaction.getTable(constraint.key_table_name); DataTableDef table_def = key_table.getDataTableDef(); int[] key_cols = TableDataConglomerate.findColumnIndices( table_def, constraint.key_columns); IntegerVector key_entries = TableDataConglomerate.findKeys(key_table, key_cols, original_key); // Are there keys effected? if (key_entries.size() > 0) { if (update_rule.equals("NO ACTION")) { // Throw an exception; throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION, TableDataConglomerate.deferredString(constraint.deferred) + " foreign key constraint violation on update (" + constraint.name + ") Columns = " + constraint.key_table_name.toString() + "( " + TableDataConglomerate.stringColumnList(constraint.key_columns) + " ) -> " + constraint.ref_table_name.toString() + "( " + TableDataConglomerate.stringColumnList(constraint.ref_columns) + " )"); } else { // Perform a referential action on each updated key int sz = key_entries.size(); for (int i = 0; i < sz; ++i) { int row_index = key_entries.intAt(i); RowData row_data = new RowData(key_table); row_data.setFromRow(row_index); if (update_rule.equals("CASCADE")) { // Update the keys for (int n = 0; n < key_cols.length; ++n) { row_data.setColumnData(key_cols[n], new_key[n]); } key_table.updateRow(row_index, row_data); } else if (update_rule.equals("SET NULL")) { for (int n = 0; n < key_cols.length; ++n) { row_data.setColumnToNull(key_cols[n]); } key_table.updateRow(row_index, row_data); } else if (update_rule.equals("SET DEFAULT")) { for (int n = 0; n < key_cols.length; ++n) { row_data.setColumnToDefault(key_cols[n], context); } key_table.updateRow(row_index, row_data); } else { throw new RuntimeException( "Do not understand referential action: " + update_rule); } } // Check referential integrity of modified table, key_table.constraintIntegrityCheck(); } } } /** * Executes a delete referential action. If the delete action is * "NO ACTION", and the constraint is INITIALLY_IMMEDIATE, and the new key * doesn't exist in the referral table, an exception is thrown. */ private void executeDeleteReferentialAction( Transaction.ColumnGroupReference constraint, TObject[] original_key, QueryContext context) { final String delete_rule = constraint.delete_rule; if (delete_rule.equals("NO ACTION") && constraint.deferred != Transaction.INITIALLY_IMMEDIATE) { // Constraint check is deferred return; } // So either delete rule is not NO ACTION, or if it is we are initially // immediate. MutableTableDataSource key_table = transaction.getTable(constraint.key_table_name); DataTableDef table_def = key_table.getDataTableDef(); int[] key_cols = TableDataConglomerate.findColumnIndices( table_def, constraint.key_columns); IntegerVector key_entries = TableDataConglomerate.findKeys(key_table, key_cols, original_key); // Are there keys effected? if (key_entries.size() > 0) { if (delete_rule.equals("NO ACTION")) { // Throw an exception; throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION, TableDataConglomerate.deferredString(constraint.deferred) + " foreign key constraint violation on delete (" + constraint.name + ") Columns = " + constraint.key_table_name.toString() + "( " + TableDataConglomerate.stringColumnList(constraint.key_columns) + " ) -> " + constraint.ref_table_name.toString() + "( " + TableDataConglomerate.stringColumnList(constraint.ref_columns) + " )"); } else { // Perform a referential action on each updated key int sz = key_entries.size(); for (int i = 0; i < sz; ++i) { int row_index = key_entries.intAt(i); RowData row_data = new RowData(key_table); row_data.setFromRow(row_index); if (delete_rule.equals("CASCADE")) { // Cascade the removal of the referenced rows key_table.removeRow(row_index); } else if (delete_rule.equals("SET NULL")) { for (int n = 0; n < key_cols.length; ++n) { row_data.setColumnToNull(key_cols[n]); } key_table.updateRow(row_index, row_data); } else if (delete_rule.equals("SET DEFAULT")) { for (int n = 0; n < key_cols.length; ++n) { row_data.setColumnToDefault(key_cols[n], context); } key_table.updateRow(row_index, row_data); } else { throw new RuntimeException( "Do not understand referential action: " + delete_rule); } } // Check referential integrity of modified table, key_table.constraintIntegrityCheck(); } } } /** * Returns the entire row list for this table. This will request this * information from the master source. */ private IntegerListInterface getRowIndexList() { if (row_list == null) { row_list = index_set.getIndex(0); } return row_list; } /** * Ensures that the row list is as current as the latest journal change. * We can be assured that when this is called, no journal changes will * occur concurrently. However we still need to synchronize because * multiple reads are valid. */ private void ensureRowIndexListCurrent() { int rebuild_index = row_list_rebuild; int journal_count = table_journal.entries(); while (rebuild_index < journal_count) { byte command = table_journal.getCommand(rebuild_index); int row_index = table_journal.getRowIndex(rebuild_index); if (MasterTableJournal.isAddCommand(command)) { // Add to 'row_list'. boolean b = getRowIndexList().uniqueInsertSort(row_index); if (b == false) { throw new Error( "Row index already used in this table (" + row_index + ")"); } } else if (MasterTableJournal.isRemoveCommand(command)) { // Remove from 'row_list' boolean b = getRowIndexList().removeSort(row_index); if (b == false) { throw new Error("Row index removed that wasn't in this table!"); } } else { throw new Error("Unrecognised journal command."); } ++rebuild_index; } // It's now current (row_list_rebuild == journal_count); row_list_rebuild = rebuild_index; } /** * Ensures that the scheme column index is as current as the latest * journal change. */ private void ensureColumnSchemeCurrent(int column) { SelectableScheme scheme = column_schemes[column]; // NOTE: We should be assured that no write operations can occur over // this section of code because writes are exclusive operations // within a transaction. // Are there journal entries pending on this scheme since? int rebuild_index = scheme_rebuilds[column]; int journal_count = table_journal.entries(); while (rebuild_index < journal_count) { byte command = table_journal.getCommand(rebuild_index); int row_index = table_journal.getRowIndex(rebuild_index); if (MasterTableJournal.isAddCommand(command)) { scheme.insert(row_index); } else if (MasterTableJournal.isRemoveCommand(command)) { scheme.remove(row_index); } else { throw new Error("Unrecognised journal command."); } ++rebuild_index; } scheme_rebuilds[column] = rebuild_index; } // ---------- Implemented from MutableTableDataSource ---------- public TransactionSystem getSystem() { return MasterTableDataSource.this.getSystem(); } public DataTableDef getDataTableDef() { return MasterTableDataSource.this.getDataTableDef(); } public int getRowCount() { // Ensure the row list is up to date. ensureRowIndexListCurrent(); return getRowIndexList().size(); } public RowEnumeration rowEnumeration() { // Ensure the row list is up to date. ensureRowIndexListCurrent(); // Get an iterator across the row list. final IntegerIterator iterator = getRowIndexList().iterator(); // Wrap it around a RowEnumeration object. return new RowEnumeration() { public boolean hasMoreRows() { return iterator.hasNext(); } public int nextRowIndex() { return iterator.next(); } }; } public TObject getCellContents(int column, int row) { return MasterTableDataSource.this.getCellContents(column, row); } // NOTE: Returns an immutable version of the scheme... public SelectableScheme getColumnScheme(int column) { SelectableScheme scheme = column_schemes[column]; // Cache the scheme in this object. if (scheme == null) { scheme = createSelectableSchemeForColumn(index_set, this, column); column_schemes[column] = scheme; } // Update the underlying scheme to the most current version. ensureColumnSchemeCurrent(column); return scheme; } // ---------- Table Modification ---------- public int addRow(RowData row_data) { // Check the transaction isn't read only. if (tran_read_only) { throw new RuntimeException("Transaction is read only."); } // Check this isn't a read only source if (isReadOnly()) { throw new Error("Can not add row - table is read only."); } // Add to the master. int row_index; try { row_index = MasterTableDataSource.this.addRow(row_data); } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } // Note this doesn't need to be synchronized because we are exclusive on // this table. // Add this change to the table journal. table_journal.addEntry(MasterTableJournal.TABLE_ADD, row_index); return row_index; } public void removeRow(int row_index) { // Check the transaction isn't read only. if (tran_read_only) { throw new RuntimeException("Transaction is read only."); } // Check this isn't a read only source if (isReadOnly()) { throw new Error("Can not remove row - table is read only."); } // NOTE: This must NOT call 'removeRow' in MasterTableDataSource. // We do not want to delete a row permanently from the underlying // file because the transaction using this data source may yet decide // to roll back the change and not delete the row. // Note this doesn't need to be synchronized because we are exclusive on // this table. // Add this change to the table journal. table_journal.addEntry(MasterTableJournal.TABLE_REMOVE, row_index); } public int updateRow(int row_index, RowData row_data) { // Check the transaction isn't read only. if (tran_read_only) { throw new RuntimeException("Transaction is read only."); } // Check this isn't a read only source if (isReadOnly()) { throw new Error("Can not update row - table is read only."); } // Note this doesn't need to be synchronized because we are exclusive on // this table. // Add this change to the table journal. table_journal.addEntry(MasterTableJournal.TABLE_UPDATE_REMOVE, row_index); // Add to the master. int new_row_index; try { new_row_index = MasterTableDataSource.this.addRow(row_data); } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } // Note this doesn't need to be synchronized because we are exclusive on // this table. // Add this change to the table journal. table_journal.addEntry(MasterTableJournal.TABLE_UPDATE_ADD, new_row_index); return new_row_index; } public void flushIndexChanges() { ensureRowIndexListCurrent(); // This will flush all of the column schemes for (int i = 0; i < column_schemes.length; ++i) { getColumnScheme(i); } } public void constraintIntegrityCheck() { try { // Early exit condition if (last_entry_ri_check == table_journal.entries()) { return; } // This table name DataTableDef table_def = getDataTableDef(); TableName table_name = table_def.getTableName(); QueryContext context = new SystemQueryContext(transaction, table_name.getSchema()); // Are there any added, deleted or updated entries in the journal since // we last checked? IntegerVector rows_updated = new IntegerVector(); IntegerVector rows_deleted = new IntegerVector(); IntegerVector rows_added = new IntegerVector(); int size = table_journal.entries(); for (int i = last_entry_ri_check; i < size; ++i) { byte tc = table_journal.getCommand(i); int row_index = table_journal.getRowIndex(i); if (tc == MasterTableJournal.TABLE_REMOVE || tc == MasterTableJournal.TABLE_UPDATE_REMOVE) { rows_deleted.addInt(row_index); // If this is in the rows_added list, remove it from rows_added int ra_i = rows_added.indexOf(row_index); if (ra_i != -1) { rows_added.removeIntAt(ra_i); } } else if (tc == MasterTableJournal.TABLE_ADD || tc == MasterTableJournal.TABLE_UPDATE_ADD) { rows_added.addInt(row_index); } if (tc == MasterTableJournal.TABLE_UPDATE_REMOVE) { rows_updated.addInt(row_index); } else if (tc == MasterTableJournal.TABLE_UPDATE_ADD) { rows_updated.addInt(row_index); } } // Were there any updates or deletes? if (rows_deleted.size() > 0) { // Get all references on this table Transaction.ColumnGroupReference[] foreign_constraints = Transaction.queryTableImportedForeignKeyReferences(transaction, table_name); // For each foreign constraint for (int n = 0; n < foreign_constraints.length; ++n) { Transaction.ColumnGroupReference constraint = foreign_constraints[n]; // For each deleted/updated record in the table, for (int i = 0; i < rows_deleted.size(); ++i) { int row_index = rows_deleted.intAt(i); // What was the key before it was updated/deleted int[] cols = TableDataConglomerate.findColumnIndices( table_def, constraint.ref_columns); TObject[] original_key = new TObject[cols.length]; int null_count = 0; for (int p = 0; p < cols.length; ++p) { original_key[p] = getCellContents(cols[p], row_index); if (original_key[p].isNull()) { ++null_count; } } // Check the original key isn't null if (null_count != cols.length) { // Is is an update? int update_index = rows_updated.indexOf(row_index); if (update_index != -1) { // Yes, this is an update int row_index_add = rows_updated.intAt(update_index + 1); // It must be an update, so first see if the change caused any // of the keys to change. boolean key_changed = false; TObject[] key_updated_to = new TObject[cols.length]; for (int p = 0; p < cols.length; ++p) { key_updated_to[p] = getCellContents(cols[p], row_index_add); if (original_key[p].compareTo(key_updated_to[p]) != 0) { key_changed = true; } } if (key_changed) { // Allow the delete, and execute the action, // What did the key update to? executeUpdateReferentialAction(constraint, original_key, key_updated_to, context); } // If the key didn't change, we don't need to do anything. } else { // No, so it must be a delete, // This will look at the referencee table and if it contains // the key, work out what to do with it. executeDeleteReferentialAction(constraint, original_key, context); } } // If the key isn't null } // for each deleted rows } // for each foreign key reference to this table } // Were there any rows added (that weren't deleted)? if (rows_added.size() > 0) { int[] row_indices = rows_added.toIntArray(); // Check for any field constraint violations in the added rows TableDataConglomerate.checkFieldConstraintViolations( transaction, this, row_indices); // Check this table, adding the given row_index, immediate TableDataConglomerate.checkAddConstraintViolations( transaction, this, row_indices, Transaction.INITIALLY_IMMEDIATE); } } catch (DatabaseConstraintViolationException e) { // If a constraint violation, roll back the changes since the last // check. int rollback_point = table_journal.entries() - last_entry_ri_check; if (row_list_rebuild <= rollback_point) { table_journal.rollbackEntries(rollback_point); } else { System.out.println( "WARNING: rebuild_pointer is after rollback point so we can't " + "rollback to the point before the constraint violation."); } throw e; } finally { // Make sure we update the 'last_entry_ri_check' variable last_entry_ri_check = table_journal.entries(); } } public MasterTableJournal getJournal() { return table_journal; } public void dispose() { // Dispose and invalidate the schemes // This is really a safety measure to ensure the schemes can't be // used outside the scope of the lifetime of this object. for (int i = 0; i < column_schemes.length; ++i) { SelectableScheme scheme = column_schemes[i]; if (scheme != null) { scheme.dispose(); column_schemes[i] = null; } } row_list = null; table_journal = null; scheme_rebuilds = null; index_set = null; transaction = null; } public void addRootLock() { MasterTableDataSource.this.addRootLock(); } public void removeRootLock() { MasterTableDataSource.this.removeRootLock(); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/MasterTableGarbageCollector.java000066400000000000000000000152331330501023400301070ustar00rootroot00000000000000/** * com.mckoi.database.MasterTableGarbageCollector 28 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.BlockIntegerList; import com.mckoi.debug.*; import java.io.IOException; /** * A row garbage collector for a master table data source that manages * garbage collection over a MasterTableDataSource object. Each time a row * is committed deleted from a master table, this object is notified. When * the master table has no root locks on it, then the garbage collector * can kick in and mark all deleted rows as reclaimable. * * @author Tobias Downer */ final class MasterTableGarbageCollector { /** * The MasterTableDataSource that this collector is managing. */ private MasterTableDataSource data_source; /** * If this is true, then a full sweep of the table is due to reclaim all * deleted rows from the table. */ private boolean full_sweep_due; /** * The list of all rows from the master table that we have been notified * of being deleted. *

* NOTE: This list shouldn't get too large. If it does, we should clear it * and toggle the 'full_sweep_due' variable to true. */ private BlockIntegerList deleted_rows; /** * The time when the last garbage collection event occurred. */ private long last_garbage_success_event; private long last_garbage_try_event; /** * Constructs the garbage collector. */ MasterTableGarbageCollector(MasterTableDataSource data_source) { this.data_source = data_source; full_sweep_due = false; deleted_rows = new BlockIntegerList(); last_garbage_success_event = System.currentTimeMillis(); last_garbage_try_event = -1; } /** * Returns the DebugLogger object that we can use to log debug messages. */ public final DebugLogger Debug() { return data_source.Debug(); } /** * Called by the MasterTableDataSoruce to notify the collector that a row * has been marked as committed deleted. *

* SYNCHRONIZATION: We must be synchronized over 'data_source' when this * is called. (This is guarenteed if called from MasterTableDataSource). */ void markRowAsDeleted(int row_index) { if (full_sweep_due == false) { boolean b = deleted_rows.uniqueInsertSort(row_index); if (b == false) { throw new Error("Row marked twice for deletion."); } } } /** * Called by the MasterTableDataSoruce to notify the collector to do a full * sweep and remove of records in the table at the next scheduled collection. *

* SYNCHRONIZATION: We must be synchronized over 'data_source' when this * is called. (This is guarenteed if called from MasterTableDataSource). */ void markFullSweep() { full_sweep_due = true; if (deleted_rows.size() > 0) { deleted_rows = new BlockIntegerList(); } } /** * Performs the actual garbage collection event. This is called by the * CollectionEvent object. Note that it synchronizes over the master table * data source object. *

* If 'force' is true, then the collection event is forced even if there are * root locks or transaction changes pending. It is only recommended that * force is true when the table is shut down. */ void performCollectionEvent(boolean force) { try { int check_count = 0; int delete_count = 0; // Synchronize over the master data table source so no other threads // can interfere when we collect this information. synchronized (data_source) { if (data_source.isClosed()) { return; } // If root is locked, or has transaction changes pending, then we // can't delete any rows marked as deleted because they could be // referenced by transactions or result sets. if (force || (!data_source.isRootLocked() && !data_source.hasTransactionChangesPending())) { last_garbage_success_event = System.currentTimeMillis(); last_garbage_try_event = -1; // Are we due a full sweep? if (full_sweep_due) { int raw_row_count = data_source.rawRowCount(); for (int i = 0; i < raw_row_count; ++i) { // Synchronized in data_source. boolean b = data_source.hardCheckAndReclaimRow(i); if (b) { ++delete_count; } ++check_count; } full_sweep_due = false; } else { // Are there any rows marked as deleted? int size = deleted_rows.size(); if (size > 0) { // Go remove all rows marked as deleted. for (int i = 0; i < size; ++i) { int row_index = deleted_rows.get(i); // Synchronized in data_source. data_source.hardRemoveRow(row_index); ++delete_count; ++check_count; } } deleted_rows = new BlockIntegerList(); } if (check_count > 0) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Row GC: [" + data_source.getName() + "] check_count=" + check_count + " delete count=" + delete_count); Debug().write(Lvl.INFORMATION, this, "GC row sweep deleted " + delete_count + " rows."); } } } // if not roots locked and not transactions pending } // synchronized } catch (IOException e) { Debug().writeException(e); } } // ---------- Inner classes ---------- /** * The garbage collection event. This is an event run from the database * dispatcher thread that performs the garbage collection of committed * deleted rows on the data source. This can not delete rows from a table * that has its roots locked. */ private class CollectionEvent implements Runnable { public void run() { performCollectionEvent(false); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/MasterTableJournal.java000066400000000000000000000260551330501023400263260ustar00rootroot00000000000000/** * com.mckoi.database.MasterTableJournal 19 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import java.io.*; /** * A journal of changes that occured to a table in a data conglomerate during * a transaction. * * @author Tobias Downer */ final class MasterTableJournal { /** * Journal commands. */ final static byte TABLE_ADD = 1; // Add a row to a table. // (params: table_id, row_index) final static byte TABLE_REMOVE = 2; // Remove a row from a table. // (params: table_id, row_index) final static byte TABLE_UPDATE_ADD = 5; // Add a row from an update. final static byte TABLE_UPDATE_REMOVE = 6; // Remove a row from an update. /** * The commit id given to this change when it is committed. This is only * set when the journal is a committed change to the database. */ private long commit_id; /** * The master table id. */ private int table_id; /** * The number of entries in this journal. */ private int journal_entries; /** * A byte[] array that represents the set of commands a transaction * performed on this table. */ private byte[] command_journal; /** * An IntegerVector that is filled with parameters from the command journal. * For example, a 'TABLE_ADD' journal log will have as parameters the * row_index that was added to this table. */ private IntegerVector command_parameters; /** * Constructs the master table journal. */ MasterTableJournal(int table_id) { this.table_id = table_id; command_journal = new byte[16]; command_parameters = new IntegerVector(32); } MasterTableJournal() { this(-1); } /** * Sets the 'commit_id'. This is only set when this change becomes a * committed change to the database. */ void setCommitID(long commit_id) { this.commit_id = commit_id; } /** * Returns true if the given command is an addition command. */ static boolean isAddCommand(byte command) { return ((command & 0x03) == TABLE_ADD); } /** * Returns true if the given command is a removal command. */ static boolean isRemoveCommand(byte command) { return ((command & 0x03) == TABLE_REMOVE); } /** * Adds a command to the journal. */ private void addCommand(byte command) { if (journal_entries >= command_journal.length) { // Resize command array. int grow_size = Math.min(4000, journal_entries); grow_size = Math.max(4, grow_size); byte[] new_command_journal = new byte[journal_entries + grow_size]; System.arraycopy(command_journal, 0, new_command_journal, 0, journal_entries); command_journal = new_command_journal; } command_journal[journal_entries] = command; ++journal_entries; } /** * Adds a parameter to the journal command parameters. */ private void addParameter(int param) { command_parameters.addInt(param); } /** * Removes the top n entries from the journal. */ private void removeTopEntries(int n) { journal_entries = journal_entries - n; command_parameters.crop(0, command_parameters.size() - n); } /** * Adds a new command to this journal. */ void addEntry(byte command, int row_index) { addCommand(command); addParameter(row_index); } // ---------- Getters ---------- // These methods assume the journal has been setup and no more entries // will be made. /** * Returns the commit_id that has been set for this journal. */ long getCommitID() { return commit_id; } /** * Returns the table id of the master table this journal is for. */ int getTableID() { return table_id; } /** * Returns the total number of journal entries. */ int entries() { return journal_entries; } /** * Returns the command of the nth entry in the journal. */ byte getCommand(int n) { return command_journal[n]; } /** * Returns the row index of the nth entry in the journal. */ int getRowIndex(int n) { return command_parameters.intAt(n); } /** * Returns a normalized list of all rows that were added in this journal, * but not including those rows also removed. For example, if rows * 1, 2, and 3 were added and 2 was removed, this will return a list of * 1 and 3. */ int[] normalizedAddedRows() { IntegerVector list = new IntegerVector(); int size = entries(); for (int i = 0; i < size; ++i) { byte tc = getCommand(i); if (tc == TABLE_ADD || tc == TABLE_UPDATE_ADD) { int row_index = getRowIndex(i); // If row added, add to list list.addInt(row_index); } else if (tc == TABLE_REMOVE || tc == TABLE_UPDATE_REMOVE) { // If row removed, if the row is already in the list // it's removed from the list, otherwise we leave as is. int row_index = getRowIndex(i); int found_at = list.indexOf(row_index); if (found_at != -1) { list.removeIntAt(found_at); } } else { throw new Error("Unknown command in journal."); } } return list.toIntArray(); } /** * Returns a normalized list of all rows that were removed from this * journal. */ int[] normalizedRemovedRows() { IntegerVector list = new IntegerVector(); int size = entries(); for (int i = 0; i < size; ++i) { byte tc = getCommand(i); if (tc == TABLE_REMOVE || tc == TABLE_UPDATE_REMOVE) { // If removed add to the list. int row_index = getRowIndex(i); list.addInt(row_index); } } return list.toIntArray(); } /** * Returns three lists - a list of all rows that were inserted, a list of all * rows that were deleted, and a list of all updates. All the lists are * ordered by the order of the command. The update list contains two * entries per 'update', the row that was removed and the row that was * added with the updated info. *

* This method is useful for collecting all modification information on the * table. */ IntegerVector[] allChangeInformation() { IntegerVector[] lists = new IntegerVector[3]; for (int i = 0; i < 3; ++i) { lists[i] = new IntegerVector(); } int size = entries(); for (int i = 0; i < size; ++i) { byte tc = getCommand(i); int row_index = getRowIndex(i); if (tc == TABLE_ADD) { lists[0].addInt(row_index); } else if (tc == TABLE_REMOVE) { lists[1].addInt(row_index); } else if (tc == TABLE_UPDATE_ADD || tc == TABLE_UPDATE_REMOVE) { lists[2].addInt(row_index); } else { throw new RuntimeException("Don't understand journal command."); } } return lists; } /** * Rolls back the last n entries of this journal. This method takes into * account the transient nature of rows (all added rows in the journal are * exclusively referenced by this journal). The algorithm works as follows; * any rows added are deleted, and rows deleted (that weren't added) are * removed from the journal. */ void rollbackEntries(int n) { if (n > journal_entries) { throw new RuntimeException( "Trying to roll back more journal entries than are in the journal."); } IntegerVector to_add = new IntegerVector(); // Find all entries and added new rows to the table int size = entries(); for (int i = size - n; i < size; ++i) { byte tc = getCommand(i); if (tc == TABLE_ADD || tc == TABLE_UPDATE_ADD) { to_add.addInt(getRowIndex(i)); } } // Delete the top entries removeTopEntries(n); // Mark all added entries to deleted. for (int i = 0; i < to_add.size(); ++i) { addEntry(TABLE_ADD, to_add.intAt(i)); addEntry(TABLE_REMOVE, to_add.intAt(i)); } } // ---------- Testing methods ---------- /** * Throws a transaction clash exception if it detects a clash between * journal entries. It assumes that this journal is the journal that is * attempting to be compatible with the given journal. A journal clashes * when they both contain a row that is deleted. */ void testCommitClash(DataTableDef table_def, MasterTableJournal journal) throws TransactionException { // Very nasty search here... // int cost = entries() * journal.entries(); // System.out.print(" CLASH COST = " + cost + " "); for (int i = 0; i < entries(); ++i) { byte tc = getCommand(i); if (isRemoveCommand(tc)) { // command - row remove int row_index = getRowIndex(i); // System.out.println("* " + row_index); for (int n = 0; n < journal.entries(); ++n) { // System.out.print(" " + journal.getRowIndex(n)); if (isRemoveCommand(journal.getCommand(n)) && journal.getRowIndex(n) == row_index) { throw new TransactionException( TransactionException.ROW_REMOVE_CLASH, "Concurrent Serializable Transaction Conflict(1): " + "Current row remove clash ( row: " + row_index + ", table: " + table_def.getTableName() + " )"); } } // System.out.println(); } } } // ---------- Stream serialization methods ---------- /** * Reads the journal entries from the given DataInputStream to this object. *

* This method is only around because we might need it to convert a * 0.91 era database that stored index data as journals in the file system. */ void readFrom(DataInputStream din) throws IOException { commit_id = din.readInt(); table_id = din.readInt(); journal_entries = din.readInt(); command_journal = new byte[journal_entries]; din.readFully(command_journal, 0, journal_entries); int size = din.readInt(); for (int i = 0; i < size; ++i) { command_parameters.addInt(din.readInt()); } } /** * Debugging. */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[MasterTableJournal] ["); buf.append(commit_id); buf.append("] ("); for (int i = 0; i < entries(); ++i) { byte c = getCommand(i); int row_index = getRowIndex(i); buf.append("("); buf.append(c); buf.append(")"); buf.append(row_index); buf.append(" "); } buf.append(")"); return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/MasterTableListener.java000066400000000000000000000047231330501023400264770ustar00rootroot00000000000000/** * com.mckoi.database.MasterTableListener 28 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An interface that is notified of add/remove events on a * MasterTableDataSource. The purpose of this interface is so that a high level * function can listen for changes to the underlying table and cache high * level representations of the rows as appropriate. * * @author Tobias Downer */ interface MasterTableListener { /** * Notifies of a new row addition to the underlying representation. Note * that this addition doesn't necessarily mean that the change is a committed * change. There is no way to tell if a change is committed or not. *

* SYNCHRONIZATION ISSUE: Note that extreme care should be taken with * deadlock issues with this method. This is a call-back from * MasterTableDataSource when its monikor is in a synchronized state. This * means there is potential for deadlock if care is not taken. Listeners of * this should event should not try and inspect the state of the database. */ void rowAdded(int row_number, RowData row_data); /** * Notifies that a row has been permanently removed from the underlying * representation. This means the row has been committed removed and the * table row garbage collector has decided it is eligible to be recycled. *

* Normally the garbage collector thread will notify of this event. *

* SYNCHRONIZATION ISSUE: Note that extreme care should be taken with * deadlock issues with this method. This is a call-back from * MasterTableDataSource when its monikor is in a synchronized state. This * means there is potential for deadlock if care is not taken. Listeners of * this should event should not try and inspect the state of the database. */ void rowRemoved(int row_number); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/MultiVersionTableIndices.java000066400000000000000000000150721330501023400274740ustar00rootroot00000000000000/** * com.mckoi.database.MultiVersionTableIndices 05 Dec 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.debug.*; /** * This class manages a set of indices for a table over different versions. * The indices include the list of rows (required), and any index data * (optional). This object manages table indexes at multiple revision levels. * When indexes are requested, what is returned is an isolated version of the * current indexes. Index changes automatically create a new version and * each version of the index found is isolated from any concurrent changes. *

* This class is not thread safe, but it assumes thread safety by the * callee. It is not safe for multi-threaded access. * * @author Tobias Downer */ final class MultiVersionTableIndices { /** * The name of the table. */ private final TableName table_name; /** * The number of columns in the referenced table. */ private final int column_count; /** * The system object. */ private final TransactionSystem system; /** * A list of MasterTableJournal objects that represent the changes * that have occurred to this master index after various transactions * have been committed. *

* This list can be used to build the indices and a table row enumerator for * snapshots of the table at various transaction check points. */ private ArrayList transaction_mod_list; // ---------- Stat keys ---------- private String journal_count_stat_key; /** * Constructs this object with the given number of column. */ MultiVersionTableIndices(TransactionSystem system, TableName table_name, int column_count) { this.system = system; this.table_name = table_name; this.column_count = column_count; transaction_mod_list = new ArrayList(); journal_count_stat_key = "MultiVersionTableIndices.journal_entries." + table_name; } private long TS_merge_count = 0; private long TS_merge_size = 0; /** * Returns the DebugLogger object used to log debug messages. */ public final DebugLogger Debug() { return system.Debug(); } /** * Updates the master records from the journal logs up to the given * 'commit_id'. This could be a fairly expensive operation if there are * a lot of modifications because each change could require a lookup * of records in the data source. *

* NOTE: It's extremely important that when this is called, there are no * transactions open that are using the merged journal. If there is, then * a transaction may be able to see changes in a table that were made * after the transaction started. *

* Returns true if all journal changes were merged. */ boolean mergeJournalChanges(long commit_id) { // Average size of pending transactions when this method is called... ++TS_merge_count; TS_merge_size += transaction_mod_list.size(); if ((TS_merge_count % 32) == 0) { system.stats().set( (int) ((TS_merge_size * 1000000L) / TS_merge_count), "MultiVersionTableIndices.average_journal_merge_mul_1000000"); // DatabaseSystem.stats().set( // TS_merge_size / TS_merge_count, // "MultiVersionTableIndices.average_journal_merge"); // DatabaseSystem.stats().set( // TS_merge_size, // "MultiVersionTableIndices.TS_merge_size"); // DatabaseSystem.stats().set( // TS_merge_count, // "MultiVersionTableIndices.TS_merge_count"); } int merge_count = 0; int size = transaction_mod_list.size(); while (transaction_mod_list.size() > 0) { MasterTableJournal journal = (MasterTableJournal) transaction_mod_list.get(0); if (commit_id > journal.getCommitID()) { ++merge_count; if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Merging '" + table_name + "' journal: " + journal); } // Remove the top journal entry from the list. transaction_mod_list.remove(0); system.stats().decrement(journal_count_stat_key); } else { // If (commit_id <= journal.getCommitID()) return false; } } return true; } /** * Returns a list of all MasterTableJournal objects that have been * successfully committed against this table that have an 'commit_id' that * is greater or equal to the given. *

* This is part of the conglomerate commit check phase and will be on a * commit_lock. */ MasterTableJournal[] findAllJournalsSince(long commit_id) { ArrayList all_since = new ArrayList(); int size = transaction_mod_list.size(); for (int i = 0; i < size; ++i) { MasterTableJournal journal = (MasterTableJournal) transaction_mod_list.get(i); long journal_commit_id = journal.getCommitID(); // All journals that are greater or equal to the given commit id if (journal_commit_id >= commit_id) { all_since.add(journal); } } return (MasterTableJournal[]) all_since.toArray(new MasterTableJournal[all_since.size()]); } /** * Adds a transaction journal to the list of modifications on the indices * kept here. */ void addTransactionJournal(MasterTableJournal change) { transaction_mod_list.add(change); system.stats().increment(journal_count_stat_key); } /** * Returns true if this table has any journal modifications that have not * yet been incorporated into master index. */ boolean hasTransactionChangesPending() { // System.out.println(transaction_mod_list); return transaction_mod_list.size() > 0; } /** * Returns a string describing the transactions pending on this table. */ String transactionChangeString() { return transaction_mod_list.toString(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/MutableTableDataSource.java000066400000000000000000000120341330501023400270740ustar00rootroot00000000000000/** * com.mckoi.database.MutableTableDataSource 19 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A mutable data source that allows for the addition and removal of rows. * * @author Tobias Downer */ public interface MutableTableDataSource extends TableDataSource { /** * Adds a row to the source. This will add a permanent record into the * the underlying data structure. It will also update the indexing * schemes as appropriate, and also add the row into the set returned by * the 'rowEnumeration' iterator. *

* It returns a row index that is used to reference this data in future * queries. Throws an exception if the row additional was not possible * because of IO reasons. */ int addRow(RowData row_data); /** * Completely removes a row from the source. This will permanently remove * the record from the underlying data structure. It also updates the * indexing schemes and removes the row index from the set returned by * the 'rowEnumeration' iterator. *

* Throws an exception if the row index does not reference a valid row within * the context of this data source. */ void removeRow(int row_index); /** * Updates a row in the source. This will make a permanent change to the * underlying data structure. It will update the indexing schemes as * appropriate, and also add the row into the set returned by the * 'rowEnumeration' iterator. *

* It returns a row index for the new updated records. Throws an exception * if the row update was not possible because of IO reasons or the row * index not being a valid reference to a record in this data source. */ int updateRow(int row_index, RowData row_data); /** * Flushes all changes made on this MutableTableDataSource to the backing * index scheme (IndexSet). This is used during the commit phase of this * objects lifetime. The transaction control mechanism has found that there * are no clashes and now we need to commit the current table view to the * conglomerate. Because this object may not update index information * immediately, we call this to flush all the changes to the table to the * backing index set. *

* When this method returns, the backing IndexSet of this view will be * completely up to date. */ void flushIndexChanges(); /** * Performs all constraint integrity checks and actions to any modifications * based on any changes that happened to the table since that last call to * this method. It is important that is called after any call to 'addRow', * 'removeRow' or 'updateRow'. *

* Any constraints that are marked as INITIALLY_IMMEDIATE are checked when * this is called, otherwise the constraint is checked at commit time. *

* Any referential actions are performed when this method is called. If a * referential action causes a modification to another table, this method * is recursively called on the table modified. *

* If a referential integrity constraint is violated and a referential action * is unable to maintain the integrity of the database, any changes made to * the table are reverted. */ void constraintIntegrityCheck(); /** * Returns a journal that details the changes to this data source since it * was created. This method may return a 'null' object to denote that no * logging is being done. If this returns a MasterTableJournal, then all * 'addRow' and 'removeRow' method calls and their relative order will be * described in this journal. */ MasterTableJournal getJournal(); /** * Disposes this table data source. After this method is called, most use * of this object is undefined, except for the 'getCellContent' and * 'compareCellContent' methods which are valid provided the source is * under a root lock. */ void dispose(); /** * Puts this source under a 'root lock'. A root lock means the root row * structure of this object must not change. A root lock is obtained on * a table when a ResultSet keeps hold of an object outside the life of * the transaction that created the table. It is important that the order * of the rows stays constant (committed deleted rows are not really * deleted and reused, etc) while a table holds at least 1 root lock. */ void addRootLock(); /** * Removes a root lock from this source. */ void removeRootLock(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/NaturallyJoinedTable.java000066400000000000000000000103301330501023400266310ustar00rootroot00000000000000/** * com.mckoi.database.NaturallyJoinedTable 18 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * A table that is the cartesian product of two tables. This provides * better memory-use and efficiency than a materialized table backed by a * VirtualTable. * * @author Tobias Downer */ public final class NaturallyJoinedTable extends JoinedTable { /** * The row counts of the left and right tables. */ private final int left_row_count, right_row_count; /** * The lookup row set for the left and right tables. Basically, these point * to each row in either the left or right tables. */ private final IntegerVector left_set, right_set; private final boolean left_is_simple_enum, right_is_simple_enum; /** * Constructs the table. */ public NaturallyJoinedTable(Table left, Table right) { super.init(new Table[] { left, right }); left_row_count = left.getRowCount(); right_row_count = right.getRowCount(); // Build lookup tables for the rows in the parent tables if necessary // (usually it's not necessary). // If the left or right tables are simple enumerations, we can optimize // our access procedure, left_is_simple_enum = (left.rowEnumeration() instanceof SimpleRowEnumeration); right_is_simple_enum = (right.rowEnumeration() instanceof SimpleRowEnumeration); if (!left_is_simple_enum) { left_set = createLookupRowList(left); } else { left_set = null; } if (!right_is_simple_enum) { right_set = createLookupRowList(right); } else { right_set = null; } } /** * Creates a lookup list for rows in the given table. */ private static IntegerVector createLookupRowList(Table t) { IntegerVector ivec = new IntegerVector(); RowEnumeration en = t.rowEnumeration(); while (en.hasMoreRows()) { int row_index = en.nextRowIndex(); ivec.addInt(row_index); } return ivec; } /** * Given a row index between 0 and left table row count, this will return a * row index into the left table's row domain. */ private int getLeftRowIndex(int row_index) { if (left_is_simple_enum) { return row_index; } return left_set.intAt(row_index); } /** * Given a row index between 0 and right table row count, this will return a * row index into the right table's row domain. */ private int getRightRowIndex(int row_index) { if (right_is_simple_enum) { return row_index; } return right_set.intAt(row_index); } // ---------- Implemented from JoinedTable ---------- public int getRowCount() { // Natural join row count is (left table row count * right table row count) return left_row_count * right_row_count; } protected int resolveRowForTableAt(int row_number, int table_num) { if (table_num == 0) { return getLeftRowIndex(row_number / right_row_count); } else { return getRightRowIndex(row_number % right_row_count); } } protected void resolveAllRowsForTableAt( IntegerVector row_set, int table_num) { boolean pick_right_table = (table_num == 1); for (int n = row_set.size() - 1; n >= 0; --n) { int aa = row_set.intAt(n); // Reverse map row index to parent domain int parent_row; if (pick_right_table) { parent_row = getRightRowIndex(aa % right_row_count); } else { parent_row = getLeftRowIndex(aa / right_row_count); } row_set.setIntAt(parent_row, n); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/OpenTransactionList.java000066400000000000000000000132201330501023400265210ustar00rootroot00000000000000/** * com.mckoi.database.OpenTransactionList 26 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.debug.Lvl; /** * The list of all currently open transactions. This is a thread safe * object that is shared between a TableDataConglomerate and its children * MasterDataTableSource objects. It is used for maintaining a list of * transactions that are currently open in the system. It also provides * various utility methods around the list. *

* This class is thread safe and can safely be accessed by multiple threads. * This is so threads accessing table source information as well as * conglomerate 'commit' stages can safely access this object. * * @author Tobias Downer */ final class OpenTransactionList { /** * True to enable transaction tracking. */ private static final boolean TRACKING = false; /** * The system that this transaction list is part of. */ private TransactionSystem system; /** * The list of open transactions. * (Transaction). */ private ArrayList open_transactions; /** * A list of Error objects created when the transaction is added to the open * transactions list. */ private ArrayList open_transaction_stacks; /** * The minimum commit id of the current list. */ private long minimum_commit_id; /** * The maximum commit id of the current list. */ private long maximum_commit_id; /** * Creates the list. */ OpenTransactionList(TransactionSystem system) { this.system = system; open_transactions = new ArrayList(); if (TRACKING) { open_transaction_stacks = new ArrayList(); } minimum_commit_id = Long.MAX_VALUE; maximum_commit_id = 0; } /** * Adds a new open transaction to the list. Transactions must be added * in order of commit_id. */ synchronized void addTransaction(Transaction transaction) { long current_commit_id = transaction.getCommitID(); if (current_commit_id >= maximum_commit_id) { open_transactions.add(transaction); if (TRACKING) { open_transaction_stacks.add(new Error()); } system.stats().increment("OpenTransactionList.count"); maximum_commit_id = current_commit_id; } else { throw new Error( "Added a transaction with a lower than maximum commit_id"); } } /** * Removes an open transaction from the list. */ synchronized void removeTransaction(Transaction transaction) { int size = open_transactions.size(); int i = open_transactions.indexOf(transaction); if (i == 0) { // First in list. if (i == size - 1) { // And last. minimum_commit_id = Integer.MAX_VALUE; maximum_commit_id = 0; } else { minimum_commit_id = ((Transaction) open_transactions.get(i + 1)).getCommitID(); } } else if (i == open_transactions.size() - 1) { // Last in list. maximum_commit_id = ((Transaction) open_transactions.get(i - 1)).getCommitID(); } else if (i == -1) { throw new Error("Unable to find transaction in the list."); } open_transactions.remove(i); if (TRACKING) { open_transaction_stacks.remove(i); } system.stats().decrement("OpenTransactionList.count"); if (TRACKING) { system.Debug().write(Lvl.MESSAGE, this, "Stacks:"); for (int n = 0; n < open_transaction_stacks.size(); ++n) { system.Debug().writeException(Lvl.MESSAGE, (Error) open_transaction_stacks.get(n)); } } } /** * Returns the number of transactions that are open on the conglomerate. */ synchronized int count() { return open_transactions.size(); } /** * Returns the minimum commit id not including the given transaction object. * Returns Long.MAX_VALUE if there are no open transactions in the list * (not including the given transaction). */ synchronized long minimumCommitID(Transaction transaction) { long minimum_commit_id = Long.MAX_VALUE; if (open_transactions.size() > 0) { // If the bottom transaction is this transaction, then go to the // next up from the bottom (we don't count this transaction as the // minimum commit_id). Transaction test_transaction = (Transaction)open_transactions.get(0); if (test_transaction != transaction) { minimum_commit_id = test_transaction.getCommitID(); } else if (open_transactions.size() > 1) { minimum_commit_id = ((Transaction) open_transactions.get(1)).getCommitID(); } } return minimum_commit_id; } public synchronized String toString() { StringBuffer buf = new StringBuffer(); buf.append("[ OpenTransactionList: "); for (int i = 0; i < open_transactions.size(); ++i) { Transaction t = (Transaction) open_transactions.get(i); buf.append(t.getCommitID()); buf.append(", "); } buf.append(" ]"); return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Operator.java000066400000000000000000000637541330501023400243720ustar00rootroot00000000000000/** * com.mckoi.database.Operator 11 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.HashMap; import java.util.ArrayList; /** * An operator for an expression. * * @author Tobias Downer */ public abstract class Operator implements java.io.Serializable { static final long serialVersionUID = 516615288995154064L; // ---------- Statics ---------- /** * The ANY and ALL enumerator. */ public static final int NONE = 0, ANY = 1, ALL = 2; // ---------- Member ---------- /** * A string that represents this operator. */ private String op; /** * If this is a set operator such as ANY or ALL then this is set with the * flag type. */ private int set_type; /** * The precedence of this operator. */ private int precedence; /** * Constructs the Operator. */ protected Operator(String op) { this(op, 0, NONE); } protected Operator(String op, int precedence) { this(op, precedence, NONE); } protected Operator(String op, int precedence, int set_type) { if (set_type != NONE && set_type != ANY && set_type != ALL) { throw new Error("Invalid set_type."); } this.op = op; this.precedence = precedence; this.set_type = set_type; } /** * Returns true if this operator is equal to the operator string. */ public boolean is(String given_op) { return given_op.equals(op); } public abstract TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context); public int precedence() { return precedence; } public boolean isCondition() { return (equals(eq_op) || equals(neq_op) || equals(g_op) || equals(l_op) || equals(geq_op) || equals(leq_op) || equals(is_op) || equals(isn_op)); } public boolean isMathematical() { return (equals(add_op) || equals(sub_op) || equals(mul_op) || equals(div_op) || equals(concat_op)); } public boolean isPattern() { return (equals(like_op) || equals(nlike_op) || equals(regex_op)); } public boolean isLogical() { return (equals(and_op) || equals(or_op)); } public boolean isNot() { return equals(not_op); } public boolean isSubQuery() { return (set_type != NONE || equals(in_op) || equals(nin_op)); } /** * Returns an Operator that is the reverse of this Operator. This is used * for reversing a conditional expression. eg. 9 > id becomes id < 9. */ public Operator reverse() { if (equals(eq_op) || equals(neq_op) || equals(is_op) || equals(isn_op)) { return this; } else if (equals(g_op)) { return l_op; } else if (equals(l_op)) { return g_op; } else if (equals(geq_op)) { return leq_op; } else if (equals(leq_op)) { return geq_op; } throw new Error("Can't reverse a non conditional operator."); } /** * Returns true if this operator is not inversible. */ public boolean isNotInversible() { // The REGEX op, and mathematical operators are not inversible. return equals(regex_op) || isMathematical(); } /** * Returns the inverse operator of this operator. For example, = becomes <>, * > becomes <=, AND becomes OR. */ public Operator inverse() { if (isSubQuery()) { int inv_type; if (isSubQueryForm(ANY)) { inv_type = ALL; } else if (isSubQueryForm(ALL)) { inv_type = ANY; } else { throw new RuntimeException("Can not handle sub-query form."); } Operator inv_op = Operator.get(op).inverse(); return inv_op.getSubQueryForm(inv_type); } else if (equals(eq_op)) { return neq_op; } else if (equals(neq_op)) { return eq_op; } else if (equals(g_op)) { return leq_op; } else if (equals(l_op)) { return geq_op; } else if (equals(geq_op)) { return l_op; } else if (equals(leq_op)) { return g_op; } else if (equals(and_op)) { return or_op; } else if (equals(or_op)) { return and_op; } else if (equals(like_op)) { return nlike_op; } else if (equals(nlike_op)) { return like_op; } else if (equals(is_op)) { return isn_op; } else if (equals(isn_op)) { return is_op; } else { throw new Error("Can't inverse operator '" + op + "'"); } } /** * Given a parameter of either NONE, ANY, ALL or SINGLE, this returns true * if this operator is of the given type. */ public boolean isSubQueryForm(int type) { return type == set_type; } /** * Returns the sub query representation of this operator. */ int getSubQueryFormRepresentation() { return set_type; } /** * Returns the ANY or ALL form of this operator. */ public Operator getSubQueryForm(int type) { Operator result_op = null; if (type == ANY) { result_op = (Operator) any_map.get(op); } else if (type == ALL) { result_op = (Operator) all_map.get(op); } else if (type == NONE) { result_op = get(op); } if (result_op == null) { throw new Error("Couldn't change the form of operator '" + op + "'."); } return result_op; } /** * Same as above only it handles the type as a string. */ public Operator getSubQueryForm(String type_str) { String s = type_str.toUpperCase(); if (s.equals("SINGLE") || s.equals("ANY") || s.equals("SOME")) { return getSubQueryForm(ANY); } else if (s.equals("ALL")) { return getSubQueryForm(ALL); } throw new Error("Do not understand subquery type '" + type_str + "'"); } /** * The type of object this Operator evaluates to. */ public TType returnTType() { if (equals(concat_op)) { return TType.STRING_TYPE; } else if (isMathematical()) { return TType.NUMERIC_TYPE; } else { return TType.BOOLEAN_TYPE; } } /** * Returns the string value of this operator. */ String stringRepresentation() { return op; } public String toString() { StringBuffer buf = new StringBuffer(); buf.append(op); if (set_type == ANY) { buf.append(" ANY"); } else if (set_type == ALL) { buf.append(" ALL"); } return new String(buf); } public boolean equals(Object ob) { if (this == ob) return true; Operator oob = (Operator) ob; return op.equals(oob.op) && set_type == oob.set_type; } /** * Returns an Operator with the given string. */ public static Operator get(String op) { if (op.equals("+")) { return add_op; } else if (op.equals("-")) { return sub_op; } else if (op.equals("*")) { return mul_op; } else if (op.equals("/")) { return div_op; } else if (op.equals("||")) { return concat_op; } else if (op.equals("=") | op.equals("==")) { return eq_op; } else if (op.equals("<>") | op.equals("!=")) { return neq_op; } else if (op.equals(">")) { return g_op; } else if (op.equals("<")) { return l_op; } else if (op.equals(">=")) { return geq_op; } else if (op.equals("<=")) { return leq_op; } else if (op.equals("(")) { return par1_op; } else if (op.equals(")")) { return par2_op; } // Operators that are words, convert to lower case... op = op.toLowerCase(); if (op.equals("is")) { return is_op; } else if (op.equals("is not")) { return isn_op; } else if (op.equals("like")) { return like_op; } else if (op.equals("not like")) { return nlike_op; } else if (op.equals("regex")) { return regex_op; } else if (op.equals("in")) { return in_op; } else if (op.equals("not in")) { return nin_op; } else if (op.equals("not")) { return not_op; } else if (op.equals("and")) { return and_op; } else if (op.equals("or")) { return or_op; } throw new Error("Unrecognised operator type: " + op); } // ---------- Convenience methods ---------- /** * Returns true if the given TObject is a boolean and is true. If the * TObject is not a boolean value or is null or is false, then it returns * false. */ private static boolean isTrue(TObject bool) { return (!bool.isNull() && bool.getTType() instanceof TBooleanType && bool.getObject().equals(Boolean.TRUE)); } // ---------- The different types of operator's we can have ---------- private final static AddOperator add_op = new AddOperator(); private final static SubtractOperator sub_op = new SubtractOperator(); private final static MultiplyOperator mul_op = new MultiplyOperator(); private final static DivideOperator div_op = new DivideOperator(); private final static ConcatOperator concat_op = new ConcatOperator(); private final static EqualOperator eq_op = new EqualOperator(); private final static NotEqualOperator neq_op = new NotEqualOperator(); private final static GreaterOperator g_op = new GreaterOperator(); private final static LesserOperator l_op = new LesserOperator(); private final static GreaterEqualOperator geq_op = new GreaterEqualOperator(); private final static LesserEqualOperator leq_op = new LesserEqualOperator(); private final static IsOperator is_op = new IsOperator(); private final static IsNotOperator isn_op = new IsNotOperator(); private final static PatternMatchTrueOperator like_op = new PatternMatchTrueOperator(); private final static PatternMatchFalseOperator nlike_op = new PatternMatchFalseOperator(); private final static RegexOperator regex_op = new RegexOperator(); private final static Operator in_op; private final static Operator nin_op; private final static Operator not_op = new SimpleOperator("not", 3); private final static AndOperator and_op = new AndOperator(); private final static OrOperator or_op = new OrOperator(); private final static ParenOperator par1_op = new ParenOperator("("); private final static ParenOperator par2_op = new ParenOperator(")"); // Maps from operator to 'any' operator private final static HashMap any_map = new HashMap(); // Maps from operator to 'all' operator. private final static HashMap all_map = new HashMap(); static { // Populate the static ANY and ALL mapping any_map.put("=", new AnyOperator("=")); any_map.put("<>", new AnyOperator("<>")); any_map.put(">", new AnyOperator(">")); any_map.put(">=", new AnyOperator(">=")); any_map.put("<", new AnyOperator("<")); any_map.put("<=", new AnyOperator("<=")); all_map.put("=", new AllOperator("=")); all_map.put("<>", new AllOperator("<>")); all_map.put(">", new AllOperator(">")); all_map.put(">=", new AllOperator(">=")); all_map.put("<", new AllOperator("<")); all_map.put("<=", new AllOperator("<=")); // The IN and NOT IN operator are '= ANY' and '<> ALL' respectively. in_op = (Operator) any_map.get("="); nin_op = (Operator) all_map.get("<>"); } static class AddOperator extends Operator { static final long serialVersionUID = 6995379384325694391L; public AddOperator() { super("+", 10); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorAdd(ob2); } }; static class SubtractOperator extends Operator { static final long serialVersionUID = 3035882496296296786L; public SubtractOperator() { super("-", 15); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorSubtract(ob2); } }; static class MultiplyOperator extends Operator { static final long serialVersionUID = 8191233936463163847L; public MultiplyOperator() { super("*", 20); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorMultiply(ob2); } }; static class DivideOperator extends Operator { static final long serialVersionUID = -2695205152105036247L; public DivideOperator() { super("/", 20); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorDivide(ob2); } }; static class ConcatOperator extends Operator { public ConcatOperator() { super("||", 10); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorConcat(ob2); } }; static class EqualOperator extends Operator { static final long serialVersionUID = -5022271093834866261L; public EqualOperator() { super("=", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorEquals(ob2); } } static class NotEqualOperator extends Operator { static final long serialVersionUID = 5868174826733282297L; public NotEqualOperator() { super("<>", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorNotEquals(ob2); } } static class GreaterOperator extends Operator { static final long serialVersionUID = -6870425685250387549L; public GreaterOperator() { super(">", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorGreater(ob2); } } static class LesserOperator extends Operator { static final long serialVersionUID = 2962736161551360032L; public LesserOperator() { super("<", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorLess(ob2); } } static class GreaterEqualOperator extends Operator { static final long serialVersionUID = 6040843932499067476L; public GreaterEqualOperator() { super(">=", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorGreaterEquals(ob2); } } static class LesserEqualOperator extends Operator { static final long serialVersionUID = 4298966494510169621L; public LesserEqualOperator() { super("<=", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorLessEquals(ob2); } } static class IsOperator extends Operator { static final long serialVersionUID = -5537856102106541908L; public IsOperator() { super("is", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorIs(ob2); } } static class IsNotOperator extends Operator { static final long serialVersionUID = 1224184162192790982L; public IsNotOperator() { super("is not", 4); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { return ob1.operatorIs(ob2).operatorNot(); } } static class AnyOperator extends Operator { static final long serialVersionUID = 6421321961221271735L; public AnyOperator(String op) { super(op, 8, ANY); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { if (ob2.getTType() instanceof TQueryPlanType) { // The sub-query plan QueryPlanNode plan = (QueryPlanNode) ob2.getObject(); // Discover the correlated variables for this plan. ArrayList list = plan.discoverCorrelatedVariables(1, new ArrayList()); if (list.size() > 0) { // Set the correlated variables from the VariableResolver for (int i = 0; i < list.size(); ++i) { ((CorrelatedVariable) list.get(i)).setFromResolver(resolver); } // Clear the cache in the context context.clearCache(); } // Evaluate the plan, Table t = plan.evaluate(context); // The ANY operation Operator rev_plain_op = getSubQueryForm(NONE).reverse(); if (t.columnMatchesValue(0, rev_plain_op, ob1)) { return TObject.BOOLEAN_TRUE; } return TObject.BOOLEAN_FALSE; } else if (ob2.getTType() instanceof TArrayType) { Operator plain_op = getSubQueryForm(NONE); Expression[] exp_list = (Expression[]) ob2.getObject(); // Assume there are no matches TObject ret_val = TObject.BOOLEAN_FALSE; for (int i = 0; i < exp_list.length; ++i) { TObject exp_item = exp_list[i].evaluate(group, resolver, context); // If null value, return null if there isn't otherwise a match found. if (exp_item.isNull()) { ret_val = TObject.BOOLEAN_NULL; } // If there is a match, the ANY set test is true else if (isTrue(plain_op.eval(ob1, exp_item, null, null, null))) { return TObject.BOOLEAN_TRUE; } } // No matches, so return either false or NULL. If there are no matches // and no nulls, return false. If there are no matches and there are // nulls present, return null. return ret_val; } else { throw new Error("Unknown RHS of ANY."); } } } static class AllOperator extends Operator { static final long serialVersionUID = -4605268759294925687L; public AllOperator(String op) { super(op, 8, ALL); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { if (ob2.getTType() instanceof TQueryPlanType) { // The sub-query plan QueryPlanNode plan = (QueryPlanNode) ob2.getObject(); // Discover the correlated variables for this plan. ArrayList list = plan.discoverCorrelatedVariables(1, new ArrayList()); if (list.size() > 0) { // Set the correlated variables from the VariableResolver for (int i = 0; i < list.size(); ++i) { ((CorrelatedVariable) list.get(i)).setFromResolver(resolver); } // Clear the cache in the context context.clearCache(); } // Evaluate the plan, Table t = plan.evaluate(context); Operator rev_plain_op = getSubQueryForm(NONE).reverse(); if (t.allColumnMatchesValue(0, rev_plain_op, ob1)) { return TObject.BOOLEAN_TRUE; } return TObject.BOOLEAN_FALSE; } else if (ob2.getTType() instanceof TArrayType) { Operator plain_op = getSubQueryForm(NONE); Expression[] exp_list = (Expression[]) ob2.getObject(); // Assume true unless otherwise found to be false or NULL. TObject ret_val = TObject.BOOLEAN_TRUE; for (int i = 0; i < exp_list.length; ++i) { TObject exp_item = exp_list[i].evaluate(group, resolver, context); // If there is a null item, we return null if not otherwise found to // be false. if (exp_item.isNull()) { ret_val = TObject.BOOLEAN_NULL; } // If it doesn't match return false else if (!isTrue(plain_op.eval(ob1, exp_item, null, null, null))) { return TObject.BOOLEAN_FALSE; } } // Otherwise return true or null. If all match and no NULLs return // true. If all match and there are NULLs then return NULL. return ret_val; } else { throw new Error("Unknown RHS of ALL."); } } } static class RegexOperator extends Operator { static final long serialVersionUID = 8062751421429261272L; public RegexOperator() { super("regex", 8); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { if (ob1.isNull()) { return ob1; } if (ob2.isNull()) { return ob2; } String val = ob1.castTo(TType.STRING_TYPE).toStringValue(); String pattern = ob2.castTo(TType.STRING_TYPE).toStringValue(); return TObject.booleanVal(PatternSearch.regexMatch( context.getSystem(), pattern, val)); } } static class PatternMatchTrueOperator extends Operator { static final long serialVersionUID = 3038856811053114238L; public PatternMatchTrueOperator() { super("like", 8); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { if (ob1.isNull()) { return ob1; } if (ob2.isNull()) { return ob2; } String val = ob1.castTo(TType.STRING_TYPE).toStringValue(); String pattern = ob2.castTo(TType.STRING_TYPE).toStringValue(); TObject result = TObject.booleanVal( PatternSearch.fullPatternMatch(pattern, val, '\\')); return result; } } static class PatternMatchFalseOperator extends Operator { static final long serialVersionUID = 7271394661743778291L; public PatternMatchFalseOperator() { super("not like", 8); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { if (ob1.isNull()) { return ob1; } if (ob2.isNull()) { return ob2; } String val = ob1.castTo(TType.STRING_TYPE).toStringValue(); String pattern = ob2.castTo(TType.STRING_TYPE).toStringValue(); return TObject.booleanVal( !PatternSearch.fullPatternMatch(pattern, val, '\\')); } } // and/or have lowest precedence static class AndOperator extends Operator { static final long serialVersionUID = -6044610739300316190L; public AndOperator() { super("and", 2); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { Boolean b1 = ob1.toBoolean(); Boolean b2 = ob2.toBoolean(); // If either ob1 or ob2 are null if (b1 == null) { if (b2 != null) { if (b2.equals(Boolean.FALSE)) { return TObject.BOOLEAN_FALSE; } } return TObject.BOOLEAN_NULL; } else if (b2 == null) { if (b1.equals(Boolean.FALSE)) { return TObject.BOOLEAN_FALSE; } return TObject.BOOLEAN_NULL; } // If both true. return TObject.booleanVal(b1.equals(Boolean.TRUE) && b2.equals(Boolean.TRUE)); } } static class OrOperator extends Operator { static final long serialVersionUID = 6505549460035023998L; public OrOperator() { super("or", 1); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { Boolean b1 = ob1.toBoolean(); Boolean b2 = ob2.toBoolean(); // If either ob1 or ob2 are null if (b1 == null) { if (b2 != null) { if (b2.equals(Boolean.TRUE)) { return TObject.BOOLEAN_TRUE; } } return TObject.BOOLEAN_NULL; } else if (b2 == null) { if (b1.equals(Boolean.TRUE)) { return TObject.BOOLEAN_TRUE; } return TObject.BOOLEAN_NULL; } // If both true. return TObject.booleanVal(b1.equals(Boolean.TRUE) || b2.equals(Boolean.TRUE)); } } static class ParenOperator extends Operator { static final long serialVersionUID = -5720902399037456435L; public ParenOperator(String paren) { super(paren); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { throw new Error("Parenthese should never be evaluated!"); } } static class SimpleOperator extends Operator { static final long serialVersionUID = 1136249637094226133L; public SimpleOperator(String str) { super(str); } public SimpleOperator(String str, int prec) { super(str, prec); } public TObject eval(TObject ob1, TObject ob2, GroupResolver group, VariableResolver resolver, QueryContext context) { throw new Error("SimpleOperator should never be evaluated!"); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/OuterTable.java000066400000000000000000000125431330501023400246330ustar00rootroot00000000000000/** * com.mckoi.database.OuterTable 21 Sep 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.util.IntegerVector; /** * A Table class for forming OUTER type results. This takes as its constructor * the base table (with no outer NULL fields) that is what the result is based * on. It is then possible to merge in tables that are ancestors * * @author Tobias Downer */ class OuterTable extends VirtualTable implements RootTable { /** * The merged rows. */ public IntegerVector[] outer_rows; /** * The row count of the outer rows. */ private int outer_row_count; /** * Constructs the OuterTable given the base table. */ public OuterTable(Table input_table) { super(); RawTableInformation base_table = input_table.resolveToRawTable(new RawTableInformation()); Table[] tables = base_table.getTables(); IntegerVector[] rows = base_table.getRows(); outer_rows = new IntegerVector[rows.length]; // Set up the VirtualTable with this base table information, init(tables); set(tables, rows); } /** * Merges the given table in with this table. */ public void mergeIn(Table outside_table) { RawTableInformation raw_table_info = outside_table.resolveToRawTable(new RawTableInformation()); // Get the base information, Table[] base_tables = getReferenceTables(); IntegerVector[] base_rows = getReferenceRows(); // The tables and rows being merged in. Table[] tables = raw_table_info.getTables(); IntegerVector[] rows = raw_table_info.getRows(); // The number of rows being merged in. outer_row_count = rows[0].size(); for (int i = 0; i < base_tables.length; ++i) { Table btable = base_tables[i]; int index = -1; for (int n = 0; n < tables.length && index == -1; ++n) { if (btable == tables[n]) { index = n; } } // If the table wasn't found, then set 'NULL' to this base_table if (index == -1) { outer_rows[i] = null; } else { IntegerVector list = new IntegerVector(outer_row_count); outer_rows[i] = list; // Merge in the rows from the input table, IntegerVector to_merge = rows[index]; if (to_merge.size() != outer_row_count) { throw new Error("Wrong size for rows being merged in."); } list.append(to_merge); } } } // ---------- Implemented from DefaultDataTable ---------- /** * Returns the modified row count. */ public int getRowCount() { return super.getRowCount() + outer_row_count; } /** * Returns a SelectableScheme for the given column in the given VirtualTable * row domain. This searches down through the tables ancestors until it * comes across a table with a SelectableScheme where the given column is * fully resolved. In most cases, this will be the root DataTable. *

* For an OuterTable, this must also include any rows with an index of -1 * which indicates they are NULL. NULL rows are put at the top of the * index list. */ SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { if (column_scheme[column] == null) { // EFFICIENCY: We implement this with a blind search... SelectableScheme scheme = new BlindSearch(this, column); column_scheme[column] = scheme.getSubsetScheme(this, column); } if (table == this) { return column_scheme[column]; } else { return column_scheme[column].getSubsetScheme(table, original_column); } } /** * Returns an object that represents the information in the given cell * in the table. */ public TObject getCellContents(int column, int row) { int table_num = column_table[column]; Table parent_table = reference_list[table_num]; if (row >= outer_row_count) { row = row_list[table_num].intAt(row - outer_row_count); return parent_table.getCellContents(column_filter[column], row); } else { if (outer_rows[table_num] == null) { // Special case, handling outer entries (NULL) return new TObject(getColumnDefAt(column).getTType(), null); } else { row = outer_rows[table_num].intAt(row); return parent_table.getCellContents(column_filter[column], row); } } } // ---------- Implemented from RootTable ---------- /** * This function is used to check that two tables are identical. This * is used in operations like 'union' that need to determine that the * roots are infact of the same type. */ public boolean typeEquals(RootTable table) { return (this == table); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ParameterSubstitution.java000066400000000000000000000033671330501023400271460ustar00rootroot00000000000000/** * com.mckoi.database.ParameterSubstitution 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An object that represents a constant value that is to be lately binded to * a constant value in an Expression. This is used when we have ? style * prepared statement values. This object is used as a marker in the * elements of a expression. * * @author Tobias Downer */ public class ParameterSubstitution implements java.io.Serializable { static final long serialVersionUID = -740886588230246432L; /** * The numerical number of this parameter substitution. The first * substitution is '0', the second is '1', etc. */ private int parameter_id; /** * Creates the substitution. */ public ParameterSubstitution(int parameter_id) { this.parameter_id = parameter_id; } /** * Returns the number of this parameter id. */ public int getID() { return parameter_id; } /** * Equality test. */ public boolean equals(Object ob) { ParameterSubstitution sub = (ParameterSubstitution) ob; return this.parameter_id == sub.parameter_id; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/PatternSearch.java000066400000000000000000000442021330501023400253250ustar00rootroot00000000000000/** * com.mckoi.database.PatternSearch 05 Sep 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.Types; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; import com.mckoi.util.IntegerIterator; /** * This is a static class that performs the operations to do a pattern search * on a given column of a table. The pattern syntax is very simple and follows * that of the SQL standard. *

* It works as follows: * The '%' character represents any sequence of characters. * The '_' character represents some character. *

* Therefore, the pattern search 'Toby%' will find all rows that start with * the string 'Toby' and end with any sequence of characters. The pattern * 'T% Downer%' will find all names starting with T and containing 'Downer' * somewhere in the end. The pattern '_at' will find all three letter words * ending with 'at'. *

* NOTE: A 'ab%' type search is faster than a '%bc' type search. If the start * of the search pattern is unknown then the entire contents of the column * need to be accessed. * * @author Tobias Downer */ public final class PatternSearch { /** * Statics for the tokens. */ private final static char ZERO_OR_MORE_CHARS = '%'; private final static char ONE_CHAR = '_'; public static boolean testSearch(String pattern, String expression, boolean result) { System.out.print("Pattern: "); System.out.println("'" + pattern + "'"); System.out.print("Expression: "); System.out.println("'" + expression + "'"); boolean tested_as = fullPatternMatch(pattern, expression, '\\'); System.out.print("Result: "); System.out.print(tested_as); if (tested_as != result) { System.out.println(" *** FAILED, Expected: " + result + " ***"); } else { System.out.println(); } return tested_as; } public static void main(String[] args) { // Testing the SQL expression parser. testSearch("", "abc", false); testSearch("%", "abc", true); testSearch("a%", "abc", true); testSearch("ab%", "abc", true); testSearch("abc%", "abc", true); testSearch("abcd%", "abc", false); testSearch("abcd", "abc", false); testSearch("abc", "abc", true); testSearch("ab", "abc", false); testSearch("a", "abc", false); testSearch("a_", "abc", false); testSearch("ab_", "abc", true); testSearch("abc_", "abc", false); testSearch("a_c", "abc", true); testSearch("a%bc", "abc", true); testSearch("a%c", "abc", true); testSearch("%c", "abc", true); testSearch("a_bc", "abc", false); testSearch("__c", "abc", true); testSearch("a__", "abc", true); testSearch("a\\_\\_", "a__", true); testSearch("a\\__", "a__", true); testSearch("a\\__", "a_b", true); testSearch("\\___", "_ab", true); testSearch("\\_\\__", "_ab", false); testSearch("\\_\\__", "__b", true); testSearch("\\%ab", "%ab", true); testSearch("ab\\%", "ab%", true); testSearch("cab\\%", "cab", false); testSearch("cab%", "cab", true); } /** * Returns true if the given character is a wild card (unknown). */ private static boolean isWildCard(char ch) { return (ch == ONE_CHAR || ch == ZERO_OR_MORE_CHARS); } /** * Matches a pattern against a string and returns true if it matches or * false otherwise. This matches patterns that do not necessarily start * with a wild card unlike the 'patternMatch' method. */ public static boolean fullPatternMatch(String pattern, final String str, char escape_char) { StringBuffer start = new StringBuffer(); String rezt = null; int len = pattern.length(); int i = 0; boolean last_escape_char = false; for (; i < len && rezt == null; ++i) { char c = pattern.charAt(i); if (last_escape_char) { last_escape_char = false; start.append(c); } else if (c == escape_char) { last_escape_char = true; } else if (isWildCard(c)) { rezt = pattern.substring(i); } else { start.append(c); } } if (rezt == null) { rezt = ""; } String st = new String(start); // System.out.println("--"); // System.out.println(str); // System.out.println(st); if (str.startsWith(st)) { String str_rezt = str.substring(st.length()); // (i) if (rezt.length() > 0) { return patternMatch(rezt, str_rezt, escape_char); } else { return str_rezt.length() == 0; } } else { return false; } } /** * This is the pattern match recurrsive method. It recurses on each wildcard * expression in the pattern which makes for slightly better efficiency than * a character recurse algorithm. However, patterns such as "_%_a" will * result in many recursive calls. *

* Returns true if the pattern matches. *

* NOTE: That "_%_" will be less efficient than "__%" and will produce the * same result. * NOTE: It requires that a wild card character is the first character in * the expression. * ISSUE: Pattern optimiser, we should optimise wild cards of type "%__" to * "__%", or "%__%_%_%" to "____%". Optimised forms are identical in * result and more efficient. This optimisation could be performed by the * client during parsing of the LIKE statement. * HACKING ISSUE: Badly formed wild cards may result in hogging of server * side resources. */ public static boolean patternMatch(String pattern, String expression, char escape_char) { // Look at first character in pattern, if it's a ONE_CHAR wildcard then // check expression and pattern match until next wild card. if (pattern.charAt(0) == ONE_CHAR) { // Else step through each character in pattern and see if it matches up // with the expression until a wild card is found or the end is reached. // When the end of the pattern is reached, 'finished' is set to true. int i = 1; boolean finished = (i >= pattern.length() || expression.length() < 1); boolean last_was_escape = false; int checked = 0; while (!finished) { char c = pattern.charAt(i); if (!last_was_escape && c == escape_char) { last_was_escape = true; if (i >= expression.length()) { return false; } ++i; } else if (last_was_escape || !isWildCard(c)) { last_was_escape = false; // If expression and pattern character doesn't match or end of // expression reached, search has failed. if (i >= expression.length() || c != expression.charAt(i)) { return false; } ++i; ++checked; } else { // found a wildcard, so recurse on this wildcard return patternMatch(pattern.substring(i), expression.substring(i), escape_char); } finished = (i >= pattern.length()); } // The pattern length minus any escaped characters int real_pattern_length = 0; int sz = pattern.length(); for (int n = 0; n < sz; ++n) { if (pattern.charAt(n) != escape_char) { ++real_pattern_length; } else { ++n; } } // If pattern and expression lengths match then we have walked through // the expression and found a match, otherwise no match. return real_pattern_length == expression.length(); } // Therefore we are doing a ZERO_OR_MORE_CHARS wildcard check. // If the pattern is '%' (ie. pattern.length() == 1 because it's only 1 // character in length (the '%' character)) then it doesn't matter what the // expression is, we have found a match. if (pattern.length() == 1) { return true; } // Look at following character in pattern, and extract all the characters // before the next wild card. StringBuffer next_string = new StringBuffer(); int i = 1; boolean finished = (i >= pattern.length()); boolean last_was_escape = false; while (!finished) { char next_char = pattern.charAt(i); if (!last_was_escape && next_char == escape_char) { last_was_escape = true; ++i; if (i >= pattern.length()) { finished = true; } } else if (last_was_escape || !isWildCard(next_char)) { last_was_escape = false; next_string.append(next_char); ++i; if (i >= pattern.length()) { finished = true; } } else { finished = true; } } String find_string = new String(next_string); // Special case optimisation if we have found the end of the pattern, all // we need to do is check if 'find_string' is on the end of the expression. // eg. pattern = "%er", will have a 'find_string' of "er" and it is saying // 'does the expression end with 'er''. if (i >= pattern.length()) { return (expression.endsWith(find_string)); } // Otherwise we must have finished with another wild card. // Try and find 'next_string' in the expression. If its found then // recurse over the next pattern. int find_str_length = find_string.length(); int str_index = expression.indexOf(find_string, 0); while (str_index != -1) { boolean matched = patternMatch( pattern.substring(1 + find_str_length), expression.substring(str_index + find_str_length), escape_char); if (matched) { return true; } str_index = expression.indexOf(find_string, str_index + 1); } return false; } /** * This is the search method. It requires a table to search, a column of the * table, and a pattern. It returns the rows in the table that match the * pattern if any. Pattern searching only works successfully on columns that * are of type Types.DB_STRING. * This works by first reducing the search to all cells that contain the * first section of text. ie. pattern = "Toby% ___ner" will first reduce * search to all rows between "Toby" and "Tobz". This makes for better * efficiency. */ static IntegerVector search(Table table, int column, String pattern) { return search(table, column, pattern, '\\'); } /** * This is the search method. It requires a table to search, a column of the * table, and a pattern. It returns the rows in the table that match the * pattern if any. Pattern searching only works successfully on columns that * are of type Types.DB_STRING. * This works by first reducing the search to all cells that contain the * first section of text. ie. pattern = "Toby% ___ner" will first reduce * search to all rows between "Toby" and "Tobz". This makes for better * efficiency. */ static IntegerVector search(Table table, int column, String pattern, char escape_char) { // Get the type for the column TType col_type = table.getDataTableDef().columnAt(column).getTType(); // If the column type is not a string type then report an error. if (!(col_type instanceof TStringType)) { throw new Error("Unable to perform a pattern search " + "on a non-String type column."); } TStringType col_string_type = (TStringType) col_type; // ---------- Pre Search ---------- // First perform a 'pre-search' on the head of the pattern. Note that // there may be no head in which case the entire column is searched which // has more potential to be expensive than if there is a head. StringBuffer pre_pattern = new StringBuffer(); int i = 0; boolean finished = i >= pattern.length(); boolean last_is_escape = false; while (!finished) { char c = pattern.charAt(i); if (last_is_escape) { last_is_escape = true; pre_pattern.append(c); } else if (c == escape_char) { last_is_escape = true; } else if (!isWildCard(c)) { pre_pattern.append(c); ++i; if (i >= pattern.length()) { finished = true; } } else { finished = true; } } // This is set with the remaining search. String post_pattern; // This is our initial search row set. In the second stage, rows are // eliminated from this vector. IntegerVector search_case; if (i >= pattern.length()) { // If the pattern has no 'wildcards' then just perform an EQUALS // operation on the column and return the results. TObject cell = new TObject(col_type, pattern); return table.selectRows(column, Operator.get("="), cell); // RETURN } else if (pre_pattern.length() == 0 || col_string_type.getLocale() != null) { // No pre-pattern easy search :-(. This is either because there is no // pre pattern (it starts with a wild-card) or the locale of the string // is non-lexicographical. In either case, we need to select all from // the column and brute force the search space. search_case = table.selectAll(column); post_pattern = pattern; } else { // Criteria met: There is a pre_pattern, and the column locale is // lexicographical. // Great, we can do an upper and lower bound search on our pre-search // set. eg. search between 'Geoff' and 'Geofg' or 'Geoff ' and // 'Geoff\33' String lower_bounds = new String(pre_pattern); int next_char = pre_pattern.charAt(i - 1) + 1; pre_pattern.setCharAt(i - 1, (char) next_char); String upper_bounds = new String(pre_pattern); post_pattern = pattern.substring(i); TObject cell_lower = new TObject(col_type, lower_bounds); TObject cell_upper = new TObject(col_type, upper_bounds); // Select rows between these two points. search_case = table.selectRows(column, cell_lower, cell_upper); } // ---------- Post search ---------- // [This optimization assumes that (NULL like '%' = true) which is incorrect] // // EFFICIENCY: This is a special case efficiency case. // // If 'post_pattern' is '%' then we have already found all the records in // // our pattern. // // if (post_pattern.equals("%")) { // return search_case; // } int pre_index = i; // Now eliminate from our 'search_case' any cells that don't match our // search pattern. // Note that by this point 'post_pattern' will start with a wild card. // This follows the specification for the 'patternMatch' method. // EFFICIENCY: This is a brute force iterative search. Perhaps there is // a faster way of handling this? BlockIntegerList i_list = new BlockIntegerList(search_case); IntegerIterator iterator = i_list.iterator(0, i_list.size() - 1); while (iterator.hasNext()) { // Get the expression (the contents of the cell at the given column, row) boolean pattern_matches = false; TObject cell = table.getCellContents(column, iterator.next()); // Null values doesn't match with anything if (!cell.isNull()) { String expression = cell.getObject().toString(); // We must remove the head of the string, which has already been // found from the pre-search section. expression = expression.substring(pre_index); pattern_matches = patternMatch(post_pattern, expression, escape_char); } if (!pattern_matches) { // If pattern does not match then remove this row from the search. iterator.remove(); } } return new IntegerVector(i_list); } // ---------- Matching against a regular expression ---------- /** * Matches a string against a regular expression pattern. We use the regex * library as specified in the DatabaseSystem configuration. */ static boolean regexMatch(TransactionSystem system, String pattern, String value) { // If the first character is a '/' then we assume it's a Perl style regular // expression (eg. "/.*[0-9]+\/$/i") if (pattern.startsWith("/")) { int end = pattern.lastIndexOf('/'); String pat = pattern.substring(1, end); String ops = pattern.substring(end + 1); return system.getRegexLibrary().regexMatch(pat, ops, value); } else { // Otherwise it's a regular expression with no operators return system.getRegexLibrary().regexMatch(pattern, "", value); } } /** * Matches a column of a table against a constant regular expression * pattern. We use the regex library as specified in the DatabaseSystem * configuration. */ static IntegerVector regexSearch(Table table, int column, String pattern) { // If the first character is a '/' then we assume it's a Perl style regular // expression (eg. "/.*[0-9]+\/$/i") if (pattern.startsWith("/")) { int end = pattern.lastIndexOf('/'); String pat = pattern.substring(1, end); String ops = pattern.substring(end + 1); return table.getDatabase().getSystem().getRegexLibrary().regexSearch( table, column, pat, ops); } else { // Otherwise it's a regular expression with no operators return table.getDatabase().getSystem().getRegexLibrary().regexSearch( table, column, pattern, ""); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Privileges.java000066400000000000000000000204341330501023400246740ustar00rootroot00000000000000/** * com.mckoi.database.Privileges 23 Aug 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.StringListBucket; import java.util.StringTokenizer; import java.util.Collections; import java.util.ArrayList; import java.sql.*; /** * A set of privileges to grant a user for an object. * * @author Tobias Downer */ public class Privileges { /** * The number of bits available to set. */ final static int BIT_COUNT = 11; /** * The bit mask. There are currently 11 used bits, so this has all 11 bits * set. */ final static int BIT_MASK = (1 << BIT_COUNT) - 1; // ---------- Statics ---------- /** * The priv to allow full access to the database object. If this is used, * it should be the only privilege added. */ public final static int ALL = BIT_MASK; /** * The priv to SELECT from a database object. */ public final static int SELECT = 0x01; /** * The priv to DELETE from a database object. */ public final static int DELETE = 0x02; /** * The priv to UPDATE a database object. */ public final static int UPDATE = 0x04; /** * The priv to INSERT to a database object. */ public final static int INSERT = 0x08; /** * The priv to REFERENCE a database object. */ public final static int REFERENCES = 0x010; /** * The priv to see statistics on a database object. */ public final static int USAGE = 0x020; /** * The priv to compact a database object. */ public final static int COMPACT = 0x040; /** * The priv to create objects (only applicable for SCHEMA grant objects). */ public final static int CREATE = 0x080; /** * The priv to alter objects (only applicable for SCHEMA grant objects). */ public final static int ALTER = 0x0100; /** * The priv to drop objects (only applicable for SCHEMA grant objects). */ public final static int DROP = 0x0200; /** * The priv to view objects in a schema (only applicable for SCHEMA grant * objects). */ public final static int LIST = 0x0400; // ---------- Members ---------- /** * The priv bit map. */ private int privs; /** * Constructor. */ private Privileges(int privs) { this.privs = privs & BIT_MASK; } public Privileges() { this(0); } /** * Adds a privilege and returns a new Privileges object with the new priv * set. */ public Privileges add(int priv) { return new Privileges(privs | priv); } /** * Removes a privilege with a column list parameter. */ public Privileges remove(int priv) { int and_priv = (privs & priv); return new Privileges(privs ^ and_priv); } /** * Removes the given privileges from this privileges object and returns the * new privileges object. */ public Privileges remove(Privileges privs) { return remove(privs.privs); } /** * Returns true if this privileges permits the given priv. */ public boolean permits(int priv) { return (privs & priv) != 0; } /** * Merges privs from the given privilege object with this set of privs. * This performs an OR on all the attributes in the set. If the entry * does not exist in this set then it is added. */ public Privileges merge(Privileges in_privs) { return add(in_privs.privs); } /** * Returns true if this Privileges object contains no priv entries. */ public boolean isEmpty() { return privs == 0; } /** * Returns a String that represents the given priv bit. */ static String formatPriv(int priv) { if ((priv & SELECT) != 0) { return "SELECT"; } else if ((priv & DELETE) != 0) { return "DELETE"; } else if ((priv & UPDATE) != 0) { return "UPDATE"; } else if ((priv & INSERT) != 0) { return "INSERT"; } else if ((priv & REFERENCES) != 0) { return "REFERENCES"; } else if ((priv & USAGE) != 0) { return "USAGE"; } else if ((priv & COMPACT) != 0) { return "COMPACT"; } else if ((priv & CREATE) != 0) { return "CREATE"; } else if ((priv & ALTER) != 0) { return "ALTER"; } else if ((priv & DROP) != 0) { return "DROP"; } else if ((priv & LIST) != 0) { return "LIST"; } else { throw new Error("Not priv bit set."); } } /** * Given a string, returns the priv bit for it. */ public static int parseString(String priv) { if (priv.equals("SELECT")) { return SELECT; } else if (priv.equals("DELETE")) { return DELETE; } else if (priv.equals("UPDATE")) { return UPDATE; } else if (priv.equals("INSERT")) { return INSERT; } else if (priv.equals("REFERENCES")) { return REFERENCES; } else if (priv.equals("USAGE")) { return USAGE; } else if (priv.equals("COMPACT")) { return COMPACT; } else if (priv.equals("CREATE")) { return CREATE; } else if (priv.equals("ALTER")) { return ALTER; } else if (priv.equals("DROP")) { return DROP; } else if (priv.equals("LIST")) { return LIST; } else { throw new Error("Priv not recognised."); } } /** * Returns this Privileges object as an encoded int bit array. */ public int toInt() { return privs; } /** * Converts this privilege to an encoded string. */ public String toEncodedString() { StringBuffer buf = new StringBuffer(); buf.append("||"); int priv_bit = 1; for (int i = 0; i < 11; ++i) { if ((privs & priv_bit) != 0) { buf.append(formatPriv(priv_bit)); buf.append("||"); } priv_bit = priv_bit << 1; } return new String(buf); } public String toString() { StringBuffer buf = new StringBuffer(); int priv_bit = 1; for (int i = 0; i < 11; ++i) { if ((privs & priv_bit) != 0) { buf.append(formatPriv(priv_bit)); buf.append(' '); } priv_bit = priv_bit << 1; } return new String(buf); } public boolean equals(Object ob) { return privs == ((Privileges) ob).privs; } // ---------- More statics ---------- /** * No privileges. */ public final static Privileges EMPTY_PRIVS; /** * Enable all privs for the object. */ public final static Privileges TABLE_ALL_PRIVS; /** * Read privs for the object. */ public final static Privileges TABLE_READ_PRIVS; /** * All access privs for a schema object. */ public final static Privileges SCHEMA_ALL_PRIVS; /** * Read access privs for a schema object. */ public final static Privileges SCHEMA_READ_PRIVS; /** * All access (execute/update/delete/etc) privs for a procedure object. */ public final static Privileges PROCEDURE_ALL_PRIVS; /** * Execute access privs for a procedure object. */ public final static Privileges PROCEDURE_EXECUTE_PRIVS; static { Privileges p; EMPTY_PRIVS = new Privileges(); p = EMPTY_PRIVS; p = p.add(SELECT); p = p.add(DELETE); p = p.add(UPDATE); p = p.add(INSERT); p = p.add(REFERENCES); p = p.add(USAGE); p = p.add(COMPACT); TABLE_ALL_PRIVS = p; p = EMPTY_PRIVS; p = p.add(SELECT); p = p.add(USAGE); TABLE_READ_PRIVS = p; p = EMPTY_PRIVS; p = p.add(CREATE); p = p.add(ALTER); p = p.add(DROP); p = p.add(LIST); SCHEMA_ALL_PRIVS = p; p = EMPTY_PRIVS; p = p.add(LIST); SCHEMA_READ_PRIVS = p; p = EMPTY_PRIVS; p = p.add(SELECT); p = p.add(DELETE); p = p.add(UPDATE); p = p.add(INSERT); PROCEDURE_ALL_PRIVS = p; p = EMPTY_PRIVS; p = p.add(SELECT); PROCEDURE_EXECUTE_PRIVS = p; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ProcedureConnection.java000066400000000000000000000044101330501023400265270ustar00rootroot00000000000000/** * com.mckoi.database.ProcedureConnection 06 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.sql.Connection; /** * An interface for accessing a database connection inside a stored procedure. * * @author Tobias Downer */ public interface ProcedureConnection { /** * Returns a JDBC Connection implementation for executing queries on this * connection. The Connection has auto-commit turned off, and it * disables the ability for the connection to 'commit' changes to the * database. *

* This method is intended to provide the procedure developer with a * convenient and consistent way to query and manipulate the database from * the body of a stored procedure method. *

* The java.sql.Connection object returned here may invalidate when the * procedure invokation call ends so the returned object must not be cached * to be used again later. *

* The returned java.sql.Connection object is NOT thread safe and should * only be used by a single thread. Accessing this connection from multiple * threads will result in undefined behaviour. *

* The Connection object returned here has the same privs as the user who * owns the stored procedure. */ Connection getJDBCConnection(); /** * Returns the Database object for this database providing access to various * general database features including backing up replication and * configuration. Some procedures may not be allowed access to this object * in which case a ProcedureException is thrown notifying of the security * violation. */ Database getDatabase(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ProcedureException.java000066400000000000000000000021461330501023400263720ustar00rootroot00000000000000/** * com.mckoi.database.ProcedureException 06 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An exception that is generated from a stored procedure when some erronious * condition occurs. This error is typically returned back to the client. * * @author Tobias Downer */ public class ProcedureException extends RuntimeException { /** * Construct the exception. */ public ProcedureException(String str) { super(str); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ProcedureManager.java000066400000000000000000000760761330501023400260230ustar00rootroot00000000000000/** * com.mckoi.database.ProcedureManager 27 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.lang.reflect.*; import com.mckoi.database.global.BlobAccessor; import com.mckoi.database.global.StringAccessor; import com.mckoi.util.BigNumber; import java.io.IOException; import java.io.InputStream; import java.util.StringTokenizer; import java.util.ArrayList; /** * A DatabaseConnection procedure manager. This controls adding, updating, * deleting and querying/calling stored procedures. * * @author Tobias Downer */ public class ProcedureManager { /** * The DatabaseConnection. */ private DatabaseConnection connection; /** * The context. */ private DatabaseQueryContext context; /** * Constructs the ProcedureManager for a DatabaseConnection. */ ProcedureManager(DatabaseConnection connection) { this.connection = connection; this.context = new DatabaseQueryContext(connection); } /** * Given the SYS_FUNCTION table, this returns a new table that contains the * entry with the given procedure name, or an empty result if nothing found. * Generates an error if more than 1 entry found. */ private Table findProcedureEntry(DataTable table, ProcedureName procedure_name) { Operator EQUALS = Operator.get("="); Variable schemav = table.getResolvedVariable(0); Variable namev = table.getResolvedVariable(1); Table t = table.simpleSelect(context, namev, EQUALS, new Expression(TObject.stringVal(procedure_name.getName()))); t = t.exhaustiveSelect(context, Expression.simple( schemav, EQUALS, TObject.stringVal(procedure_name.getSchema()))); // This should be at most 1 row in size if (t.getRowCount() > 1) { throw new RuntimeException( "Assert failed: multiple procedure names for " + procedure_name); } // Return the entries found. return t; } /** * Formats a string that gives information about the procedure, return * type and param types. */ private static String procedureInfoString(ProcedureName name, TType ret, TType[] params) { StringBuffer buf = new StringBuffer(); if (ret != null) { buf.append(ret.asSQLString()); buf.append(" "); } buf.append(name.getName()); buf.append("("); for (int i = 0; i < params.length; ++i) { buf.append(params[i].asSQLString()); if (i < params.length - 1) { buf.append(", "); } } buf.append(")"); return new String(buf); } /** * Given a location string as defined for a Java stored procedure, this * parses the string into the various parts. For example, given the * string 'com.mycompany.storedprocedures.MyFunctions.minFunction()' this * will parse the string out to the class called * 'com.mycompany.storedprocedures.MyFunctions' and the method 'minFunction' * with no arguments. This function will work event if the method name is * not given, or the method name does not have an arguments specification. */ public static String[] parseJavaLocationString(final String str) { // Look for the first parenthese int parenthese_delim = str.indexOf("("); String class_method; if (parenthese_delim != -1) { // This represents class/method class_method = str.substring(0, parenthese_delim); // This will be deliminated by a '.' int method_delim = class_method.lastIndexOf("."); if (method_delim == -1) { throw new StatementException( "Incorrectly formatted Java method string: " + str); } String class_str = class_method.substring(0, method_delim); String method_str = class_method.substring(method_delim + 1); // Next parse the argument list int end_parenthese_delim = str.lastIndexOf(")"); if (end_parenthese_delim == -1) { throw new StatementException( "Incorrectly formatted Java method string: " + str); } String arg_list_str = str.substring(parenthese_delim + 1, end_parenthese_delim); // Now parse the list of arguments ArrayList arg_list = new ArrayList(); StringTokenizer tok = new StringTokenizer(arg_list_str, ","); while (tok.hasMoreTokens()) { String arg = tok.nextToken(); arg_list.add(arg); } // Form the parsed array and return it int sz = arg_list.size(); String[] return_array = new String[2 + sz]; return_array[0] = class_str; return_array[1] = method_str; for (int i = 0; i < sz; ++i) { return_array[i + 2] = (String) arg_list.get(i); } return return_array; } else { // No parenthese so we assume this is a java class return new String[] { str }; } } /** * Returns true if the procedure with the given name exists. */ public boolean procedureExists(ProcedureName procedure_name) { DataTable table = connection.getTable(Database.SYS_FUNCTION); return findProcedureEntry(table, procedure_name).getRowCount() == 1; } /** * Returns true if the procedure with the given table name exists. */ public boolean procedureExists(TableName procedure_name) { return procedureExists(new ProcedureName(procedure_name)); } /** * Defines a Java stored procedure. If the procedure with the name has not * been defined it is defined. If the procedure has been defined then it is * overwritten with this information. *

* If 'return_type' is null then the procedure does not return a value. */ public void defineJavaProcedure(ProcedureName procedure_name, String java_specification, TType return_type, TType[] param_types, String username) throws DatabaseException { TableName proc_table_name = new TableName(procedure_name.getSchema(), procedure_name.getName()); // Check this name is not reserved DatabaseConnection.checkAllowCreate(proc_table_name); DataTable table = connection.getTable(Database.SYS_FUNCTION); // The new row to insert/update RowData row_data = new RowData(table); row_data.setColumnDataFromObject(0, procedure_name.getSchema()); row_data.setColumnDataFromObject(1, procedure_name.getName()); row_data.setColumnDataFromObject(2, "Java-1"); row_data.setColumnDataFromObject(3, java_specification); if (return_type != null) { row_data.setColumnDataFromObject(4, TType.asEncodedString(return_type)); } row_data.setColumnDataFromObject(5, TType.asEncodedString(param_types)); row_data.setColumnDataFromObject(6, username); // Find the entry from the procedure table that equal this name Table t = findProcedureEntry(table, procedure_name); // Delete the entry if it already exists. if (t.getRowCount() == 1) { table.delete(t); } // Insert the new entry, table.add(row_data); // Notify that this database object has been successfully created. connection.databaseObjectCreated(proc_table_name); } /** * Deletes the procedure with the given name, or generates an error if the * procedure doesn't exist. */ public void deleteProcedure(ProcedureName procedure_name) throws DatabaseException { DataTable table = connection.getTable(Database.SYS_FUNCTION); // Find the entry from the procedure table that equal this name Table t = findProcedureEntry(table, procedure_name); // If no entries then generate error. if (t.getRowCount() == 0) { throw new StatementException("Procedure " + procedure_name + " doesn't exist."); } table.delete(t); // Notify that this database object has been successfully dropped. connection.databaseObjectDropped( new TableName(procedure_name.getSchema(), procedure_name.getName())); } /** * Returns an InternalTableInfo object used to model the list of procedures * that are accessible within the given Transaction object. This is used to * model all procedures that have been defined as tables. */ static InternalTableInfo createInternalTableInfo(Transaction transaction) { return new ProcedureInternalTableInfo(transaction); } /** * Invokes the procedure with the given name and the given parameters and * returns the procedure return value. */ public TObject invokeProcedure(ProcedureName procedure_name, TObject[] params) { DataTable table = connection.getTable(Database.SYS_FUNCTION); // Find the entry from the procedure table that equals this name Table t = findProcedureEntry(table, procedure_name); if (t.getRowCount() == 0) { throw new StatementException("Procedure " + procedure_name + " doesn't exist."); } int row_index = t.rowEnumeration().nextRowIndex(); TObject type_ob = t.getCellContents(2, row_index); TObject location_ob = t.getCellContents(3, row_index); TObject return_type_ob = t.getCellContents(4, row_index); TObject param_types_ob = t.getCellContents(5, row_index); TObject owner_ob = t.getCellContents(6, row_index); String type = type_ob.getObject().toString(); String location = location_ob.getObject().toString(); TType return_type = null; if (!return_type_ob.isNull()) { return_type = TType.decodeString(return_type_ob.getObject().toString()); } TType[] param_types = TType.decodeTypes(param_types_ob.getObject().toString()); String owner = owner_ob.getObject().toString(); // Check the number of parameters given match the function parameters length if (params.length != param_types.length) { throw new StatementException( "Parameters given do not match the parameters of the procedure: " + procedureInfoString(procedure_name, return_type, param_types)); } // The different procedure types, if (type.equals("Java-1")) { return invokeJavaV1Procedure(procedure_name, location, return_type, param_types, owner, params); } else { throw new RuntimeException("Unknown procedure type: " + type); } } /** * Resolves a Java class specification string to a Java Class object. For * example, "String" becomes 'java.lang.String.class' and "boolean[]" becomes * 'boolean[].class', etc. */ private static Class resolveToClass(String java_spec) { // Trim the string java_spec = java_spec.trim(); // Is this an array? Count the number of array dimensions. int dimensions = -1; int last_index = java_spec.length(); while (last_index > 0) { ++dimensions; last_index = java_spec.lastIndexOf("[]", last_index) - 1; } // Remove the array part int array_end = java_spec.length() - (dimensions * 2); String class_part = java_spec.substring(0, array_end); // Check there's no array parts in the class part if (class_part.indexOf("[]") != -1) { throw new RuntimeException( "Java class specification incorrectly formatted: " + java_spec); } // Convert the java specification to a Java class. For example, // String is converted to java.lang.String.class, etc. Class cl; // Is there a '.' in the class specification? if (class_part.indexOf(".") != -1) { // Must be a specification such as 'java.net.URL' or 'java.util.List'. try { cl = Class.forName(class_part); } catch (ClassNotFoundException i) { throw new RuntimeException("Java class not found: " + class_part); } } // Try for a primitive types else if (class_part.equals("boolean")) { cl = boolean.class; } else if (class_part.equals("byte")) { cl = byte.class; } else if (class_part.equals("short")) { cl = short.class; } else if (class_part.equals("char")) { cl = char.class; } else if (class_part.equals("int")) { cl = int.class; } else if (class_part.equals("long")) { cl = long.class; } else if (class_part.equals("float")) { cl = float.class; } else if (class_part.equals("double")) { cl = double.class; } else { // Not a primitive type so try resolving against java.lang.* or some // key classes in com.mckoi.database.* if (class_part.equals("ProcedureConnection")) { cl = ProcedureConnection.class; } else { try { cl = Class.forName("java.lang." + class_part); } catch (ClassNotFoundException i) { // No luck so give up, throw new RuntimeException("Java class not found: " + class_part); } } } // Finally make into a dimension if necessary if (dimensions > 0) { // This is a little untidy way of doing this. Perhaps a better approach // would be to make an array encoded string (eg. "[[Ljava.langString;"). cl = java.lang.reflect.Array.newInstance(cl, new int[dimensions]).getClass(); } return cl; } /** * Given a Java location_str and a list of parameter types, returns an * immutable 'Method' object that can be used to invoke a Java stored * procedure. The returned object can be cached if necessary. Note that * this method will generate an error for the following situations: * a) The invokation class or method was not found, b) there is not an * invokation method with the required number of arguments or that matches * the method specification. *

* Returns null if the invokation method could not be found. */ public static Method javaProcedureMethod( String location_str, TType[] param_types) { // Parse the location string String[] loc_parts = parseJavaLocationString(location_str); // The name of the class String class_name; // The name of the invokation method in the class. String method_name; // The object specification that must be matched. If any entry is 'null' // then the argument parameter is discovered. Class[] object_specification; boolean firstProcedureConnectionIgnore; if (loc_parts.length == 1) { // This means the location_str only specifies a class name, so we use // 'invoke' as the static method to call, and discover the arguments. class_name = loc_parts[0]; method_name = "invoke"; // All null which means we discover the arg types dynamically object_specification = new Class[param_types.length]; // ignore ProcedureConnection is first argument firstProcedureConnectionIgnore = true; } else { // This means we specify a class and method name and argument // specification. class_name = loc_parts[0]; method_name = loc_parts[1]; object_specification = new Class[loc_parts.length - 2]; for (int i = 0; i < loc_parts.length - 2; ++i) { String java_spec = loc_parts[i + 2]; object_specification[i] = resolveToClass(java_spec); } firstProcedureConnectionIgnore = false; } Class procedure_class; try { // Reference the procedure's class. procedure_class = Class.forName(class_name); } catch (ClassNotFoundException e) { throw new RuntimeException("Procedure class not found: " + class_name); } // Get all the methods in this class Method[] methods = procedure_class.getMethods(); Method invoke_method = null; // Search for the invoke method for (int i = 0; i < methods.length; ++i) { Method method = methods[i]; int modifier = method.getModifiers(); if (Modifier.isStatic(modifier) && Modifier.isPublic(modifier) && method.getName().equals(method_name)) { boolean params_match; // Get the parameters for this method Class[] method_args = method.getParameterTypes(); // If no methods, and object_specification has no args then this is a // match. if (method_args.length == 0 && object_specification.length == 0) { params_match = true; } else { int search_start = 0; // Is the first arugments a ProcedureConnection implementation? if (firstProcedureConnectionIgnore && ProcedureConnection.class.isAssignableFrom(method_args[0])) { search_start = 1; } // Do the number of arguments match if (object_specification.length == method_args.length - search_start) { // Do they match the specification? boolean match_spec = true; for (int n = 0; n < object_specification.length && match_spec == true; ++n) { Class ob_spec = object_specification[n]; if (ob_spec != null && ob_spec != method_args[n + search_start]) { match_spec = false; } } params_match = match_spec; } else { params_match = false; } } if (params_match) { if (invoke_method == null) { invoke_method = method; } else { throw new RuntimeException("Ambiguous public static " + method_name + " methods in stored procedure class '" + class_name + "'"); } } } } // Return the invoke method we found return invoke_method; } // ---------- Various procedure type invokation methods ---------- /** * Invokes a Java (type 1) procedure. A type 1 procedure is represented by * a single class with a static invokation method (called invoke). The * parameters of the static 'invoke' method must be compatible class * parameters defined for the procedure, and the return class must also be * compatible with the procedure return type. *

* If the invoke method does not contain arguments that are compatible with * the parameters given an exception is generated. *

* The class must only have a single public static 'invoke' method. If there * are multiple 'invoke' methods a runtime exception is generated. */ private TObject invokeJavaV1Procedure(ProcedureName procedure_name, String location_str, TType return_type, TType[] param_types, String owner, TObject[] param_values) { // Search for the invokation method for this stored procedure Method invoke_method = javaProcedureMethod(location_str, param_types); // Did we find an invoke method? if (invoke_method == null) { throw new RuntimeException("Could not find the invokation method for " + "the Java location string '" + location_str + "'"); } // Go through each argument of this class and work out how we are going // cast from the database engine object to the Java object. Class[] java_param_types = invoke_method.getParameterTypes(); // Is the first param a ProcedureConnection implementation? int start_param; Object[] java_values; if (java_param_types.length > 0 && ProcedureConnection.class.isAssignableFrom(java_param_types[0])) { start_param = 1; java_values = new Object[param_types.length + 1]; } else { start_param = 0; java_values = new Object[param_types.length]; } // For each type for (int i = 0; i < param_types.length; ++i) { TObject value = param_values[i]; TType proc_type = param_types[i]; Class java_type = java_param_types[i + start_param]; String java_type_str = java_type.getName(); // First null check, if (value.isNull()) { java_values[i + start_param] = null; } else { TType value_type = value.getTType(); // If not null, is the value and the procedure type compatible if (proc_type.comparableTypes(value_type)) { boolean error_cast = false; Object cast_value = null; // Compatible types, // Now we need to convert the parameter value into a Java object, if (value_type instanceof TStringType) { // A String type can be represented in Java as a java.lang.String, // or as a java.io.Reader. StringAccessor accessor = (StringAccessor) value.getObject(); if (java_type == java.lang.String.class) { cast_value = accessor.toString(); } else if (java_type == java.io.Reader.class) { cast_value = accessor.getReader(); } else { error_cast = true; } } else if (value_type instanceof TBooleanType) { // A boolean in Java is either java.lang.Boolean or primitive // boolean. if (java_type == java.lang.Boolean.class || java_type == boolean.class) { cast_value = value.getObject(); } else { error_cast = true; } } else if (value_type instanceof TDateType) { // A date translates to either java.util.Date, java.sql.Date, // java.sql.Timestamp, java.sql.Time. java.util.Date d = (java.util.Date) value.getObject(); if (java_type == java.util.Date.class) { cast_value = d; } else if (java_type == java.sql.Date.class) { cast_value = new java.sql.Date(d.getTime()); } else if (java_type == java.sql.Time.class) { cast_value = new java.sql.Time(d.getTime()); } else if (java_type == java.sql.Timestamp.class) { cast_value = new java.sql.Timestamp(d.getTime()); } else { error_cast = true; } } else if (value_type instanceof TNumericType) { // Number can be cast to any one of the Java numeric types BigNumber num = (BigNumber) value.getObject(); if (java_type == BigNumber.class) { cast_value = num; } else if (java_type == java.lang.Byte.class || java_type == byte.class) { cast_value = new Byte(num.byteValue()); } else if (java_type == java.lang.Short.class || java_type == short.class) { cast_value = new Short(num.shortValue()); } else if (java_type == java.lang.Integer.class || java_type == int.class) { cast_value = new Integer(num.intValue()); } else if (java_type == java.lang.Long.class || java_type == long.class) { cast_value = new Long(num.longValue()); } else if (java_type == java.lang.Float.class || java_type == float.class) { cast_value = new Float(num.floatValue()); } else if (java_type == java.lang.Double.class || java_type == double.class) { cast_value = new Double(num.doubleValue()); } else if (java_type == java.math.BigDecimal.class) { cast_value = num.asBigDecimal(); } else { error_cast = true; } } else if (value_type instanceof TBinaryType) { // A binary type can translate to a java.io.InputStream or a // byte[] array. BlobAccessor blob = (BlobAccessor) value.getObject(); if (java_type == java.io.InputStream.class) { cast_value = blob.getInputStream(); } else if (java_type == byte[].class) { byte[] buf = new byte[blob.length()]; try { InputStream in = blob.getInputStream(); int n = 0; int len = blob.length(); while (len > 0) { int count = in.read(buf, n, len); if (count == -1) { throw new IOException("End of stream."); } n += count; len -= count; } } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } cast_value = buf; } else { error_cast = true; } } // If the cast of the parameter was not possible, report the error. if (error_cast) { throw new StatementException("Unable to cast argument " + i + " ... " + value_type.asSQLString() + " to " + java_type_str + " for procedure: " + procedureInfoString(procedure_name, return_type, param_types)); } // Set the java value for this parameter java_values[i + start_param] = cast_value; } else { // The parameter is not compatible - throw new StatementException("Parameter (" + i + ") not compatible " + value.getTType().asSQLString() + " -> " + proc_type.asSQLString() + " for procedure: " + procedureInfoString(procedure_name, return_type, param_types)); } } // if not null } // for each parameter // Create the user that has the privs of this procedure. User priv_user = new User(owner, connection.getDatabase(), "/Internal/Procedure/", System.currentTimeMillis()); // Create the ProcedureConnection object. ProcedureConnection proc_connection = connection.createProcedureConnection(priv_user); Object result; try { // Now the 'connection' will be set to the owner's user privs. // Set the ProcedureConnection object as an argument if necessary. if (start_param > 0) { java_values[0] = proc_connection; } // The java_values array should now contain the parameter values formatted // as Java objects. // Invoke the method try { result = invoke_method.invoke(null, java_values); } catch (IllegalAccessException e) { connection.Debug().writeException(e); throw new StatementException("Illegal access exception when invoking " + "stored procedure: " + e.getMessage()); } catch (InvocationTargetException e) { Throwable real_e = e.getTargetException(); connection.Debug().writeException(real_e); throw new StatementException("Procedure Exception: " + real_e.getMessage()); } } finally { connection.disposeProcedureConnection(proc_connection); } // If return_type is null, there is no result from this procedure (void) if (return_type == null) { return null; } else { // Cast to a valid return object and return. return TObject.createAndCastFromObject(return_type, result); } } // ---------- Inner classes ---------- /** * An object that models the list of procedures as table objects in a * transaction. */ private static class ProcedureInternalTableInfo extends AbstractInternalTableInfo2 { ProcedureInternalTableInfo(Transaction transaction) { super(transaction, Database.SYS_FUNCTION); } private static DataTableDef createDataTableDef(String schema, String name) { // Create the DataTableDef that describes this entry DataTableDef def = new DataTableDef(); def.setTableName(new TableName(schema, name)); // Add column definitions def.addColumn(DataTableColumnDef.createStringColumn("type")); def.addColumn(DataTableColumnDef.createStringColumn("location")); def.addColumn(DataTableColumnDef.createStringColumn("return_type")); def.addColumn(DataTableColumnDef.createStringColumn("param_args")); def.addColumn(DataTableColumnDef.createStringColumn("owner")); // Set to immutable def.setImmutable(); // Return the data table def return def; } public String getTableType(int i) { return "FUNCTION"; } public DataTableDef getDataTableDef(int i) { TableName table_name = getTableName(i); return createDataTableDef(table_name.getSchema(), table_name.getName()); } public MutableTableDataSource createInternalTable(int index) { MutableTableDataSource table = transaction.getTable(Database.SYS_FUNCTION); RowEnumeration row_e = table.rowEnumeration(); int p = 0; int i; int row_i = -1; while (row_e.hasMoreRows()) { i = row_e.nextRowIndex(); if (p == index) { row_i = i; } else { ++p; } } if (p == index) { String schema = table.getCellContents(0, row_i).getObject().toString(); String name = table.getCellContents(1, row_i).getObject().toString(); final DataTableDef table_def = createDataTableDef(schema, name); final TObject type = table.getCellContents(2, row_i); final TObject location = table.getCellContents(3, row_i); final TObject return_type = table.getCellContents(4, row_i); final TObject param_types = table.getCellContents(5, row_i); final TObject owner = table.getCellContents(6, row_i); // Implementation of MutableTableDataSource that describes this // procedure. return new GTDataSource(transaction.getSystem()) { public DataTableDef getDataTableDef() { return table_def; } public int getRowCount() { return 1; } public TObject getCellContents(int col, int row) { switch (col) { case 0: return type; case 1: return location; case 2: return return_type; case 3: return param_types; case 4: return owner; default: throw new RuntimeException("Column out of bounds."); } } }; } else { throw new RuntimeException("Index out of bounds."); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ProcedureName.java000066400000000000000000000047051330501023400253170ustar00rootroot00000000000000/** * com.mckoi.database.ProcedureName 27 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * The name of a procedure as understood by a ProcedureManager. */ public class ProcedureName { /** * The schema of this procedure. */ private final String schema; /** * The name of this procedure. */ private final String name; /** * Constructs the ProcedureName. */ public ProcedureName(String schema, String name) { this.schema = schema; this.name = name; } /** * Constructs the ProcedureName from a TableName. */ public ProcedureName(TableName table_name) { this(table_name.getSchema(), table_name.getName()); } /** * Returns the schema of this procedure. */ public String getSchema() { return schema; } /** * Returns the name of this procedure. */ public String getName() { return name; } /** * Returns this procedure name as a string. */ public String toString() { return schema + "." + name; } /** * Returns a version of this procedure qualified to the given schema (unless * the schema is present). */ public static ProcedureName qualify(String current_schema, String proc_name) { int delim = proc_name.indexOf("."); if (delim == -1) { return new ProcedureName(current_schema, proc_name); } else { return new ProcedureName(proc_name.substring(0, delim), proc_name.substring(delim + 1, proc_name.length())); } } /** * Equality test. */ public boolean equals(Object ob) { ProcedureName src_ob = (ProcedureName) ob; return (schema.equals(src_ob.schema) && name.equals(src_ob.name)); } /** * The hash key. */ public int hashCode() { return schema.hashCode() + name.hashCode(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/QueryContext.java000066400000000000000000000047711330501023400252430ustar00rootroot00000000000000/** * com.mckoi.database.QueryContext 05 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Facts about a particular query including the root table sources, user name * of the controlling context, sequence state, etc. * * @author Tobias Downer */ public interface QueryContext { /** * Returns a TransactionSystem object that is used to determine information * about the transactional system. */ TransactionSystem getSystem(); /** * Returns the user name of the connection. */ String getUserName(); /** * Returns a FunctionLookup object used to convert FunctionDef objects to * Function objects when evaluating an expression. */ FunctionLookup getFunctionLookup(); // ---------- Sequences ---------- /** * Increments the sequence generator and returns the next unique key. */ long nextSequenceValue(String generator_name); /** * Returns the current sequence value returned for the given sequence * generator within the connection defined by this context. If a value was * not returned for this connection then a statement exception is generated. */ long currentSequenceValue(String generator_name); /** * Sets the current sequence value for the given sequence generator. */ void setSequenceValue(String generator_name, long value); // ---------- Caching ---------- /** * Marks a table in a query plan. */ void addMarkedTable(String mark_name, Table table); /** * Returns a table that was marked in a query plan or null if no mark was * found. */ Table getMarkedTable(String mark_name); /** * Put a Table into the cache. */ void putCachedNode(long id, Table table); /** * Returns a cached table or null if it isn't cached. */ Table getCachedNode(long id); /** * Clears the cache of any cached tables. */ void clearCache(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/QueryPlan.java000066400000000000000000001522671330501023400245150ustar00rootroot00000000000000/** * com.mckoi.database.QueryPlan 06 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.List; import java.util.ArrayList; /** * Various helper methods for constructing a plan tree, and the plan node * implementations themselves. * * @author Tobias Downer */ public class QueryPlan { /** * Convenience, replaces all elements of the array with clone versions of * themselves. */ private static void cloneArray(Variable[] array) throws CloneNotSupportedException { if (array != null) { for (int i = 0; i < array.length; ++i) { array[i] = (Variable) array[i].clone(); } } } /** * Convenience, replaces all elements of the array with clone versions of * themselves. */ private static void cloneArray(Expression[] array) throws CloneNotSupportedException { if (array != null) { for (int i = 0; i < array.length; ++i) { array[i] = (Expression) array[i].clone(); } } } private static void indentBuffer(int level, StringBuffer buf) { for (int i = 0; i < level; ++i) { buf.append(' '); } } // ---------- Plan node implementations ---------- /** * A QueryPlanNode with a single child. */ public static abstract class SingleQueryPlanNode implements QueryPlanNode { static final long serialVersionUID = -6753991881140638658L; /** * The single child node. */ protected QueryPlanNode child; /** * Constructor. */ protected SingleQueryPlanNode(QueryPlanNode child) { this.child = child; } /** * Returns the child plan. */ public QueryPlanNode child() { return child; } /** * Default implementation delegates responsibility to child. */ public ArrayList discoverTableNames(ArrayList list) { return child.discoverTableNames(list); } /** * Default implementation that discovers correlated variables for the * given offset level. */ public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return child.discoverCorrelatedVariables(level, list); } /** * Deep clone. */ public Object clone() throws CloneNotSupportedException { SingleQueryPlanNode node = (SingleQueryPlanNode) super.clone(); node.child = (QueryPlanNode) child.clone(); return node; } public String titleString() { return getClass().getName(); } public void debugString(int level, StringBuffer buf) { indentBuffer(level, buf); buf.append(titleString()); buf.append('\n'); child.debugString(level + 2, buf); } } /** * A QueryPlanNode that is a branch with two child nodes. */ public static abstract class BranchQueryPlanNode implements QueryPlanNode { static final long serialVersionUID = 2938130775577221138L; /** * The left and right node. */ protected QueryPlanNode left, right; /** * The Constructor. */ protected BranchQueryPlanNode(QueryPlanNode left, QueryPlanNode right) { this.left = left; this.right = right; } /** * Returns the left node. */ public QueryPlanNode left() { return left; } /** * Returns the right node. */ public QueryPlanNode right() { return right; } /** * Default implementation delegates responsibility to children. */ public ArrayList discoverTableNames(ArrayList list) { return right.discoverTableNames( left.discoverTableNames(list)); } /** * Default implementation that discovers correlated variables for the * given offset level. */ public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return right.discoverCorrelatedVariables(level, left.discoverCorrelatedVariables(level, list)); } /** * Deep clone. */ public Object clone() throws CloneNotSupportedException { BranchQueryPlanNode node = (BranchQueryPlanNode) super.clone(); node.left = (QueryPlanNode) left.clone(); node.right = (QueryPlanNode) right.clone(); return node; } public String titleString() { return getClass().getName(); } public void debugString(int level, StringBuffer buf) { indentBuffer(level, buf); buf.append(titleString()); buf.append('\n'); left.debugString(level + 2, buf); right.debugString(level + 2, buf); } } /** * The node for fetching a table from the current transaction. This is * a tree node and has no children. */ public static class FetchTableNode implements QueryPlanNode { static final long serialVersionUID = 7545493568015241717L; /** * The name of the table to fetch. */ private TableName table_name; /** * The name to alias the table as. */ private TableName alias_name; public FetchTableNode(TableName table_name, TableName aliased_as) { this.table_name = table_name; this.alias_name = aliased_as; } /** * Adds the table name to the list if it's not already in there. */ public ArrayList discoverTableNames(ArrayList list) { if (!list.contains(table_name)) { list.add(table_name); } return list; } public Table evaluate(QueryContext context) { // MILD HACK: Cast the context to a DatabaseQueryContext DatabaseQueryContext db_context = (DatabaseQueryContext) context; DataTable t = db_context.getTable(table_name); if (alias_name != null) { return new ReferenceTable(t, alias_name); } return t; } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return list; } public Object clone() throws CloneNotSupportedException { return super.clone(); } public String titleString() { return "FETCH: " + table_name + " AS " + alias_name; } public void debugString(int level, StringBuffer buf) { indentBuffer(level, buf); buf.append(titleString()); buf.append('\n'); } } /** * A node for creating a table with a single row. This table is useful for * queries that have no underlying row. For example, a pure functional * table expression. */ public static class SingleRowTableNode implements QueryPlanNode { static final long serialVersionUID = -7180494964138911604L; public SingleRowTableNode() { } public ArrayList discoverTableNames(ArrayList list) { return list; } public Table evaluate(QueryContext context) { // MILD HACK: Cast the context to a DatabaseQueryContext DatabaseQueryContext db_context = (DatabaseQueryContext) context; return db_context.getDatabase().getSingleRowTable(); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return list; } public Object clone() throws CloneNotSupportedException { return super.clone(); } public String titleString() { return "SINGLE ROW"; } public void debugString(int level, StringBuffer buf) { indentBuffer(level, buf); buf.append(titleString()); buf.append('\n'); } } /** * The node that fetches a view from the current connection. This is a * tree node that has no children, however the child can be created by * calling the 'createViewChildNode' method. This node can be removed from a * plan tree by calling the 'createViewChildNode' method and substituting this * node with the returned child. For a planner that normalizes and optimizes * plan trees, this is a useful feature. */ public static class FetchViewNode implements QueryPlanNode { static final long serialVersionUID = -6557333346211179284L; /** * The name of the view to fetch. */ private TableName table_name; /** * The name to alias the table as. */ private TableName alias_name; public FetchViewNode(TableName table_name, TableName aliased_as) { this.table_name = table_name; this.alias_name = aliased_as; } /** * Returns the QueryPlanNode that resolves to the view. This looks up the * query plan in the context given. */ public QueryPlanNode createViewChildNode(QueryContext context) { DatabaseQueryContext db = (DatabaseQueryContext) context; return db.createViewQueryPlanNode(table_name); } /** * Adds the table name to the list if it's not already in there. */ public ArrayList discoverTableNames(ArrayList list) { if (!list.contains(table_name)) { list.add(table_name); } return list; } public Table evaluate(QueryContext context) { // Create the view child node QueryPlanNode node = createViewChildNode(context); // Evaluate the plan Table t = node.evaluate(context); if (alias_name != null) { return new ReferenceTable(t, alias_name); } else { return t; } } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return list; } public Object clone() throws CloneNotSupportedException { return super.clone(); } public String titleString() { return "VIEW: " + table_name + " AS " + alias_name; } public void debugString(int level, StringBuffer buf) { indentBuffer(level, buf); buf.append(titleString()); buf.append('\n'); } } /** * The node for performing a simple indexed query on a single column of the * child node. Finds the set from the child node that matches the range. *

* The given Expression object must conform to a number of rules. It may * reference only one column in the child node. It must consist of only * simple mathemetical and logical operators (<, >, =, <>, >=, <=, AND, OR). * The left side of each mathematical operator must be a variable, and the * right side must be a constant (parameter subsitution or correlated value). * For example; * (col > 10 AND col < 100) OR col > 1000 OR col == 10 *

* Breaking any of these rules will mean the range select can not happen. */ public static class RangeSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = -108747827391465748L; /** * A simple expression that represents the range to select. See the * class comments for a description for how this expression must be * formed. */ private Expression expression; public RangeSelectNode(QueryPlanNode child, Expression exp) { super(child); this.expression = exp; } /** * Given an Expression, this will return a list of expressions that can be * safely executed as a set of 'and' operations. For example, an * expression of 'a=9 and b=c and d=2' would return the list; 'a=9','b=c', * 'd=2'. *

* If non 'and' operators are found then the reduction stops. */ private ArrayList createAndList(ArrayList list, Expression exp) { return exp.breakByOperator(list, "and"); } /** * Updates a range with the given expression. */ private void updateRange(QueryContext context, SelectableRangeSet range, DataTableColumnDef field, Expression e) { Operator op = (Operator) e.last(); Expression[] exps = e.split(); // Evaluate to an object TObject cell = exps[1].evaluate(null, null, context); // If the evaluated object is not of a comparable type, then it becomes // null. TType field_type = field.getTType(); if (!cell.getTType().comparableTypes(field_type)) { cell = new TObject(field_type, null); } // Intersect this in the range set range.intersect(op, cell); } /** * Calculates a list of SelectableRange objects that represent the range * of the expression. */ private void calcRange(final QueryContext context, final DataTableColumnDef field, final SelectableRangeSet range, final Expression exp) { Operator op = (Operator) exp.last(); if (op.isLogical()) { if (op.is("and")) { ArrayList and_list = createAndList(new ArrayList(), exp); int sz = and_list.size(); for (int i = 0; i < sz; ++i) { updateRange(context, range, field, (Expression) and_list.get(i)); } } else if (op.is("or")) { // Split left and right of logical operator. Expression[] exps = exp.split(); // Calculate the range of the left and right SelectableRangeSet left = new SelectableRangeSet(); calcRange(context, field, left, exps[0]); SelectableRangeSet right = new SelectableRangeSet(); calcRange(context, field, right, exps[1]); // Union the left and right range with the current range range.union(left); range.union(right); } else { throw new Error("Unrecognised logical operator."); } } else { // Not an operator so this is the value. updateRange(context, range, field, exp); } } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); Expression exp = expression; // Assert that all variables in the expression are identical. List all_vars = exp.allVariables(); Variable v = null; int sz = all_vars.size(); for (int i = 0; i < sz; ++i) { Variable cv = (Variable) all_vars.get(i); if (v != null) { if (!cv.equals(v)) { throw new Error("Assertion failed: " + "Range plan does not contain common variable."); } } v = cv; } // Find the variable field in the table. int col = t.findFieldName(v); if (col == -1) { throw new Error("Couldn't find column reference in table: " + v); } DataTableColumnDef field = t.getColumnDefAt(col); // Calculate the range SelectableRangeSet range = new SelectableRangeSet(); calcRange(context, field, range, exp); // System.out.println("RANGE: "); // System.out.println(range); // Select the range from the table SelectableRange[] ranges = range.toSelectableRangeArray(); return t.rangeSelect(v, ranges); } public ArrayList discoverTableNames(ArrayList list) { return expression.discoverTableNames(super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { // System.out.println(expression); return expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { RangeSelectNode node = (RangeSelectNode) super.clone(); node.expression = (Expression) expression.clone(); return node; } public String titleString() { return "RANGE: " + expression; } } /** * The node for performing a simple select operation on a table. The simple * select requires a LHS variable, an operator, and an expression * representing the RHS. */ public static class SimpleSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = 5502157970886270867L; /** * The LHS variable. */ private Variable left_var; /** * The operator to select under (=, <>, >, <, >=, <=). */ private Operator op; /** * The RHS expression. */ private Expression right_expression; public SimpleSelectNode(QueryPlanNode child, Variable left_var, Operator op, Expression right_expression) { super(child); this.left_var = left_var; this.op = op; this.right_expression = right_expression; } public Table evaluate(QueryContext context) { // Solve the child branch result Table table = child.evaluate(context); // The select operation. return table.simpleSelect(context, left_var, op, right_expression); } public ArrayList discoverTableNames(ArrayList list) { return right_expression.discoverTableNames( super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return right_expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { SimpleSelectNode node = (SimpleSelectNode) super.clone(); node.left_var = (Variable) left_var.clone(); node.right_expression = (Expression) right_expression.clone(); return node; } public String titleString() { return "SIMPLE: " + left_var + op + right_expression; } } /** * The node for performing an equi-select on a group of columns of the * child node. This is a separate node instead of chained * IndexedSelectNode's so that we might exploit multi-column indexes. */ public static class MultiColumnEquiSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = -1407710412096857588L; /** * The list of columns to select the range of. */ private Variable[] columns; /** * The values of the cells to equi-select (must be constant expressions). */ private Expression[] values; public MultiColumnEquiSelectNode(QueryPlanNode child, Variable[] columns, Expression[] values) { super(child); this.columns = columns; this.values = values; } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); // PENDING: Exploit multi-column indexes when they are implemented... // We select each column in turn Operator EQUALS_OP = Operator.get("="); for (int i = 0; i < columns.length; ++i) { t = t.simpleSelect(context, columns[i], EQUALS_OP, values[i]); } return t; } public ArrayList discoverTableNames(ArrayList list) { throw new Error("PENDING"); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { throw new Error("PENDING"); } public Object clone() throws CloneNotSupportedException { MultiColumnEquiSelectNode node = (MultiColumnEquiSelectNode) super.clone(); cloneArray(node.columns); cloneArray(node.values); return node; } } /** * The node for performing a functional select operation on the child node. * Some examples of this type of query are; * CONCAT(a, ' ', b) > 'abba boh' * TONUMBER(DATEFORMAT(a, 'yyyy')) > 2001 * LOWER(a) < 'ook' * The reason this is a separate node is because it is possible to exploit * a functional indexes on a table with this node. *

* The given expression MUST be of the form; * 'function_expression' 'operator' 'constant' */ public static class FunctionalSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = -1428022600352236457L; /** * The function expression (eg. CONCAT(a, ' ', b) == 'abba bo'). */ private Expression expression; public FunctionalSelectNode(QueryPlanNode child, Expression exp) { super(child); this.expression = exp; } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); // NOTE: currently this uses exhaustive select but should exploit // function indexes when they are available. return t.exhaustiveSelect(context, expression); } public ArrayList discoverTableNames(ArrayList list) { return expression.discoverTableNames(super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { FunctionalSelectNode node = (FunctionalSelectNode) super.clone(); node.expression = (Expression) expression.clone(); return node; } } /** * The node for performing a exhaustive select operation on the child node. * This node will iterate through the entire child result and all * results that evaulate to true are included in the result. *

* NOTE: The Expression may have correlated sub-queries. */ public static class ExhaustiveSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = -2005551680157574172L; /** * The search expression. */ private Expression expression; public ExhaustiveSelectNode(QueryPlanNode child, Expression exp) { super(child); this.expression = exp; } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); return t.exhaustiveSelect(context, expression); } public ArrayList discoverTableNames(ArrayList list) { return expression.discoverTableNames(super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { ExhaustiveSelectNode node = (ExhaustiveSelectNode) super.clone(); node.expression = (Expression) expression.clone(); return node; } public String titleString() { return "EXHAUSTIVE: " + expression; } } /** * The node for evaluating an expression that contains entirely constant * values (no variables). */ public static class ConstantSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = -4435336817396073146L; /** * The search expression. */ private Expression expression; public ConstantSelectNode(QueryPlanNode child, Expression exp) { super(child); this.expression = exp; } public Table evaluate(QueryContext context) { // Evaluate the expression TObject v = expression.evaluate(null, null, context); // If it evaluates to NULL or FALSE then return an empty set if (v.isNull() || v.getObject().equals(Boolean.FALSE)) { return child.evaluate(context).emptySelect(); } else { return child.evaluate(context); } } public ArrayList discoverTableNames(ArrayList list) { return expression.discoverTableNames(super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { ConstantSelectNode node = (ConstantSelectNode) super.clone(); node.expression = (Expression) expression.clone(); return node; } public String titleString() { return "CONSTANT: " + expression; } } /** * The node for evaluating a simple pattern search on a table which * includes a single left hand variable or constant, a pattern type (LIKE, * NOT LIKE or REGEXP), and a right hand constant (eg. 'T__y'). If the * expression is not in this form then this node will not operate * correctly. */ public static class SimplePatternSelectNode extends SingleQueryPlanNode { static final long serialVersionUID = -8247282157310682761L; /** * The search expression. */ private Expression expression; public SimplePatternSelectNode(QueryPlanNode child, Expression exp) { super(child); this.expression = exp; } public Table evaluate(QueryContext context) { // Evaluate the child Table t = child.evaluate(context); // Perform the pattern search expression on the table. // Split the expression, Expression[] exps = expression.split(); Variable lhs_var = exps[0].getVariable(); if (lhs_var != null) { // LHS is a simple variable so do a simple select Operator op = (Operator) expression.last(); return t.simpleSelect(context, lhs_var, op, exps[1]); } else { // LHS must be a constant so we can just evaluate the expression // and see if we get true, false, null, etc. TObject v = expression.evaluate(null, context); // If it evaluates to NULL or FALSE then return an empty set if (v.isNull() || v.getObject().equals(Boolean.FALSE)) { return t.emptySelect(); } else { return t; } } } public ArrayList discoverTableNames(ArrayList list) { return expression.discoverTableNames(super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { SimplePatternSelectNode node = (SimplePatternSelectNode) super.clone(); node.expression = (Expression) expression.clone(); return node; } public String titleString() { return "PATTERN: " + expression; } } /** * The node for finding a subset and renaming the columns of the results in * the child node. */ public static class SubsetNode extends SingleQueryPlanNode { static final long serialVersionUID = 3784462788248510832L; /** * The original columns in the child that we are to make the subset of. */ private Variable[] original_columns; /** * New names to assign the columns. */ private Variable[] new_column_names; public SubsetNode(QueryPlanNode child, Variable[] original_columns, Variable[] new_column_names) { super(child); this.original_columns = original_columns; this.new_column_names = new_column_names; } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); int sz = original_columns.length; int[] col_map = new int[sz]; // // DEBUG // for (int n = 0; n < t.getColumnCount(); ++n) { // System.out.print(t.getResolvedVariable(n).toTechString()); // System.out.print(", "); // } // System.out.println(); // // - DEBUG for (int i = 0; i < sz; ++i) { // // DEBUG // System.out.print(t.getClass() + ".findFieldName(" + // original_columns[i].toTechString() + ") = "); // // - DEBUG col_map[i] = t.findFieldName(original_columns[i]); // // DEBUG // System.out.println(col_map[i]); // // - DEBUG } SubsetColumnTable col_table = new SubsetColumnTable(t); col_table.setColumnMap(col_map, new_column_names); return col_table; } // ---------- Set methods ---------- /** * Sets the given table name of the resultant table. This is intended * if we want to create a sub-query that has an aliased table name. */ public void setGivenName(TableName name) { // given_name = name; if (name != null) { int sz = new_column_names.length; for (int i = 0; i < sz; ++i) { new_column_names[i].setTableName(name); } } } // ---------- Get methods ---------- /** * Returns the list of original columns that represent the mappings from * the columns in this subset. */ public Variable[] getOriginalColumns() { return original_columns; } /** * Returns the list of new column names that represent the new columns * in this subset. */ public Variable[] getNewColumnNames() { return new_column_names; } public Object clone() throws CloneNotSupportedException { SubsetNode node = (SubsetNode) super.clone(); cloneArray(node.original_columns); cloneArray(node.new_column_names); return node; } public String titleString() { StringBuffer buf = new StringBuffer(); buf.append("SUBSET: "); for (int i = 0; i < new_column_names.length; ++i) { buf.append(new_column_names[i]); buf.append("->"); buf.append(original_columns[i]); buf.append(", "); } return new String(buf); } } /** * The node for performing a distinct operation on the given columns of the * child node. */ public static class DistinctNode extends SingleQueryPlanNode { static final long serialVersionUID = -1538264313804102373L; /** * The list of columns to be distinct. */ private Variable[] columns; public DistinctNode(QueryPlanNode child, Variable[] columns) { super(child); this.columns = columns; } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); int sz = columns.length; int[] col_map = new int[sz]; for (int i = 0; i < sz; ++i) { col_map[i] = t.findFieldName(columns[i]); } return t.distinct(col_map); } public Object clone() throws CloneNotSupportedException { DistinctNode node = (DistinctNode) super.clone(); cloneArray(node.columns); return node; } public String titleString() { StringBuffer buf = new StringBuffer(); buf.append("DISTINCT: ("); for (int i = 0; i < columns.length; ++i) { buf.append(columns[i]); buf.append(", "); } buf.append(")"); return new String(buf); } } /** * The node for performing a sort operation on the given columns of the * child node. */ public static class SortNode extends SingleQueryPlanNode { static final long serialVersionUID = 3644480534542996928L; /** * The list of columns to sort. */ private Variable[] columns; /** * Whether to sort the column in ascending or descending order */ private boolean[] correct_ascending; public SortNode(QueryPlanNode child, Variable[] columns, boolean[] ascending) { super(child); this.columns = columns; this.correct_ascending = ascending; // How we handle ascending/descending order // ---------------------------------------- // Internally to the database, all columns are naturally ordered in // ascending order (start at lowest and end on highest). When a column // is ordered in descending order, a fast way to achieve this is to take // the ascending set and reverse it. This works for single columns, // however some thought is required for handling multiple column. We // order columns from RHS to LHS. If LHS is descending then this will // order the RHS incorrectly if we leave as is. Therefore, we must do // some pre-processing that looks ahead on any descending orders and // reverses the order of the columns to the right. This pre-processing // is done in the first pass. int sz = ascending.length; for (int n = 0; n < sz - 1; ++n) { if (!ascending[n]) { // if descending... // Reverse order of all columns to the right... for (int p = n + 1; p < sz; ++p) { ascending[p] = !ascending[p]; } } } } public Table evaluate(QueryContext context) { Table t = child.evaluate(context); // Sort the results by the columns in reverse-safe order. int sz = correct_ascending.length; for (int n = sz - 1; n >= 0; --n) { t = t.orderByColumn(columns[n], correct_ascending[n]); } return t; } public Object clone() throws CloneNotSupportedException { SortNode node = (SortNode) super.clone(); cloneArray(node.columns); return node; } public String titleString() { StringBuffer buf = new StringBuffer(); buf.append("SORT: ("); for (int i = 0; i < columns.length; ++i) { buf.append(columns[i]); if (correct_ascending[i]) { buf.append(" ASC"); } else { buf.append(" DESC"); } buf.append(", "); } buf.append(")"); return new String(buf); } } /** * The node for performing a grouping operation on the columns of the child * node. As well as grouping, any aggregate functions must also be defined * with this plan. *

* NOTE: The whole child is a group if columns is null. */ public static class GroupNode extends SingleQueryPlanNode { static final long serialVersionUID = 7140928678192396348L; /** * The columns to group by. */ private Variable[] columns; /** * The group max column. */ private Variable group_max_column; /** * Any aggregate functions (or regular function columns) that are to be * planned. */ private Expression[] function_list; /** * The list of names to give each function table. */ private String[] name_list; /** * Groups over the given columns from the child. */ public GroupNode(QueryPlanNode child, Variable[] columns, Variable group_max_column, Expression[] function_list, String[] name_list) { super(child); this.columns = columns; this.group_max_column = group_max_column; this.function_list = function_list; this.name_list = name_list; } /** * Groups over the entire child (always ends in 1 result in set). */ public GroupNode(QueryPlanNode child, Variable group_max_column, Expression[] function_list, String[] name_list) { this(child, null, group_max_column, function_list, name_list); } public Table evaluate(QueryContext context) { Table child_table = child.evaluate(context); DatabaseQueryContext db_context = (DatabaseQueryContext) context; FunctionTable fun_table = new FunctionTable(child_table, function_list, name_list, db_context); // If no columns then it is implied the whole table is the group. if (columns == null) { fun_table.setWholeTableAsGroup(); } else { fun_table.createGroupMatrix(columns); } return fun_table.mergeWithReference(group_max_column); } public ArrayList discoverTableNames(ArrayList list) { list = super.discoverTableNames(list); for (int i = 0; i < function_list.length; ++i) { list = function_list[i].discoverTableNames(list); } return list; } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { list = super.discoverCorrelatedVariables(level, list); for (int i = 0; i < function_list.length; ++i) { list = function_list[i].discoverCorrelatedVariables(level, list); } return list; } public Object clone() throws CloneNotSupportedException { GroupNode node = (GroupNode) super.clone(); cloneArray(node.columns); cloneArray(node.function_list); if (group_max_column != null) { node.group_max_column = (Variable) group_max_column.clone(); } else { node.group_max_column = null; } return node; } public String titleString() { StringBuffer buf = new StringBuffer(); buf.append("GROUP: ("); if (columns == null) { buf.append("WHOLE TABLE"); } else { for (int i = 0; i < columns.length; ++i) { buf.append(columns[i]); buf.append(", "); } } buf.append(")"); if (function_list != null) { buf.append(" FUNS: ["); for (int i = 0; i < function_list.length; ++i) { buf.append(function_list[i]); buf.append(", "); } buf.append("]"); } return new String(buf); } } /** * The node for merging the child node with a set of new function columns * over the entire result. For example, we may want to add an expression * 'a + 10' or 'coalesce(a, b, 1)'. */ public static class CreateFunctionsNode extends SingleQueryPlanNode { static final long serialVersionUID = -181012844247626327L; /** * The list of functions to create. */ private Expression[] function_list; /** * The list of names to give each function table. */ private String[] name_list; /** * Constructor. */ public CreateFunctionsNode(QueryPlanNode child, Expression[] function_list, String[] name_list) { super(child); this.function_list = function_list; this.name_list = name_list; } public Table evaluate(QueryContext context) { Table child_table = child.evaluate(context); DatabaseQueryContext db_context = (DatabaseQueryContext) context; FunctionTable fun_table = new FunctionTable(child_table, function_list, name_list, db_context); Table t = fun_table.mergeWithReference(null); return t; } public ArrayList discoverTableNames(ArrayList list) { list = super.discoverTableNames(list); for (int i = 0; i < function_list.length; ++i) { list = function_list[i].discoverTableNames(list); } return list; } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { list = super.discoverCorrelatedVariables(level, list); for (int i = 0; i < function_list.length; ++i) { list = function_list[i].discoverCorrelatedVariables(level, list); } return list; } public Object clone() throws CloneNotSupportedException { CreateFunctionsNode node = (CreateFunctionsNode) super.clone(); cloneArray(node.function_list); return node; } public String titleString() { StringBuffer buf = new StringBuffer(); buf.append("FUNCTIONS: ("); for (int i = 0; i < function_list.length; ++i) { buf.append(function_list[i]); buf.append(", "); } buf.append(")"); return new String(buf); } } /** * A marker node that takes the result of a child and marks it as a name * that can later be retrieved. This is useful for implementing things * such as outer joins. */ public static class MarkerNode extends SingleQueryPlanNode { static final long serialVersionUID = -8321710589608765270L; /** * The name of this mark. */ private String mark_name; /** * Constructor. */ public MarkerNode(QueryPlanNode child, String mark_name) { super(child); this.mark_name = mark_name; } public Table evaluate(QueryContext context) { Table child_table = child.evaluate(context); context.addMarkedTable(mark_name, child_table); return child_table; } public Object clone() throws CloneNotSupportedException { return super.clone(); } public String titleString() { return "MARKER: " + mark_name; } } /** * A cache point node that only evaluates the child if the result can not * be found in the cache with the given unique id. */ public static class CachePointNode extends SingleQueryPlanNode { static final long serialVersionUID = 7866310557831478639L; /** * The unique identifier of this cache point. */ private long id; private final static Object GLOB_LOCK = new Object(); private static int GLOB_ID = 0; /** * Constructor. */ public CachePointNode(QueryPlanNode child) { super(child); synchronized (GLOB_LOCK) { id = (System.currentTimeMillis() << 16) | (GLOB_ID & 0x0FFFF); ++GLOB_ID; } } public Table evaluate(QueryContext context) { // Is the result available in the context? Table child_table = context.getCachedNode(id); if (child_table == null) { // No so evaluate the child and cache it child_table = child.evaluate(context); context.putCachedNode(id, child_table); } return child_table; } public Object clone() throws CloneNotSupportedException { return super.clone(); } public String titleString() { return "CACHE: " + id; } } /** * A branch node for naturally joining two tables together. These branches * should be optimized out if possible because they result in huge results. */ public static class NaturalJoinNode extends BranchQueryPlanNode { static final long serialVersionUID = 942526205653132810L; public NaturalJoinNode(QueryPlanNode left, QueryPlanNode right) { super(left, right); } public Table evaluate(QueryContext context) { // Solve the left branch result Table left_result = left.evaluate(context); // Solve the Join (natural) return left_result.join(right.evaluate(context)); } public String titleString() { return "NATURAL JOIN"; } } /** * A branch node for equi-joining two tables together given two sets of * columns. This is a seperate node from a general join operation to allow * for optimizations with multi-column indexes. *

* An equi-join is the most common type of join. *

* At query runtime, this decides the best best way to perform the join, * either by */ public static class EquiJoinNode extends BranchQueryPlanNode { static final long serialVersionUID = 113332589582049607L; /** * The columns in the left table. */ private Variable[] left_columns; /** * The columns in the right table. */ private Variable[] right_columns; public EquiJoinNode(QueryPlanNode left, QueryPlanNode right, Variable[] left_cols, Variable[] right_cols) { super(left, right); this.left_columns = left_cols; this.right_columns = right_cols; } public Table evaluate(QueryContext context) { // Solve the left branch result Table left_result = left.evaluate(context); // Solve the right branch result Table right_result = right.evaluate(context); // PENDING: This needs to migrate to a better implementation that // exploits multi-column indexes if one is defined that can be used. Variable first_left = left_columns[0]; Variable first_right = right_columns[0]; Operator EQUALS_OP = Operator.get("="); Table result = left_result.simpleJoin(context, right_result, first_left, EQUALS_OP, new Expression(first_right)); int sz = left_columns.length; // If there are columns left to equi-join, we resolve the rest with a // single exhaustive select of the form, // ( table1.col2 = table2.col2 AND table1.col3 = table2.col3 AND ... ) if (sz > 1) { // Form the expression Expression rest_expression = new Expression(); for (int i = 1; i < sz; ++i) { Variable left_var = left_columns[i]; Variable right_var = right_columns[i]; rest_expression.addElement(left_var); rest_expression.addElement(right_var); rest_expression.addOperator(EQUALS_OP); } Operator AND_OP = Operator.get("and"); for (int i = 2; i < sz; ++i) { rest_expression.addOperator(AND_OP); } result = result.exhaustiveSelect(context, rest_expression); } return result; } public Object clone() throws CloneNotSupportedException { EquiJoinNode node = (EquiJoinNode) super.clone(); cloneArray(node.left_columns); cloneArray(node.right_columns); return node; } } /** * A branch node for a non-equi join between two tables. *

* NOTE: The cost of a LeftJoin is higher if the right child result is * greater than the left child result. The plan should be arranged so * smaller results are on the left. */ public static class JoinNode extends BranchQueryPlanNode { static final long serialVersionUID = 4133205808616807832L; /** * The variable in the left table to be joined. */ private Variable left_var; /** * The operator to join under (=, <>, >, <, >=, <=). */ private Operator join_op; /** * The expression evaluated on the right table. */ private Expression right_expression; public JoinNode(QueryPlanNode left, QueryPlanNode right, Variable left_var, Operator join_op, Expression right_expression) { super(left, right); this.left_var = left_var; this.join_op = join_op; this.right_expression = right_expression; } public Table evaluate(QueryContext context) { // Solve the left branch result Table left_result = left.evaluate(context); // Solve the right branch result Table right_result = right.evaluate(context); // If the right_expression is a simple variable then we have the option // of optimizing this join by putting the smallest table on the LHS. Variable rhs_var = right_expression.getVariable(); Variable lhs_var = left_var; Operator op = join_op; if (rhs_var != null) { // We should arrange the expression so the right table is the smallest // of the sides. // If the left result is less than the right result if (left_result.getRowCount() < right_result.getRowCount()) { // Reverse the join right_expression = new Expression(lhs_var); lhs_var = rhs_var; op = op.reverse(); // Reverse the tables. Table t = right_result; right_result = left_result; left_result = t; } } // The join operation. return left_result.simpleJoin(context, right_result, lhs_var, op, right_expression); } public ArrayList discoverTableNames(ArrayList list) { return right_expression.discoverTableNames( super.discoverTableNames(list)); } public ArrayList discoverCorrelatedVariables(int level, ArrayList list) { return right_expression.discoverCorrelatedVariables(level, super.discoverCorrelatedVariables(level, list)); } public Object clone() throws CloneNotSupportedException { JoinNode node = (JoinNode) super.clone(); node.left_var = (Variable) left_var.clone(); node.right_expression = (Expression) right_expression.clone(); return node; } public String titleString() { return "JOIN: " + left_var + join_op + right_expression; } } /** * A branch node for a left outer join. Using this node is a little non- * intuitive. This node will only work when used in conjuction with * MarkerNode. *

* To use - first the complete left table in the join must be marked with a * name. Then the ON expression is evaluated to a single plan node. Then * this plan node must be added to result in a left outer join. A tree for * a left outer join may look as follows; *

   *            LeftOuterJoinNode
   *                    |
   *                Join a = b
   *               /          \
   *          Marker       GetTable T2
   *            |
   *       GetTable T1
   * 
*/ public static class LeftOuterJoinNode extends SingleQueryPlanNode { static final long serialVersionUID = 8908801499550863492L; /** * The name of the mark that points to the left table that represents * the complete set. */ private String complete_mark_name; public LeftOuterJoinNode(QueryPlanNode child, String complete_mark_name) { super(child); this.complete_mark_name = complete_mark_name; } public Table evaluate(QueryContext context) { // Evaluate the child branch, Table result = child.evaluate(context); // Get the table of the complete mark name, Table complete_left = context.getMarkedTable(complete_mark_name); // The rows in 'complete_left' that are outside (not in) the rows in the // left result. Table outside = complete_left.outside(result); // Create an OuterTable OuterTable outer_table = new OuterTable(result); outer_table.mergeIn(outside); // Return the outer table return outer_table; } public String titleString() { return "LEFT OUTER JOIN"; } } /** * A branch node for a logical union of two tables of identical types. This * branch can only work if the left and right children have exactly the same * ancestor tables. If the ancestor tables are different it will fail. This * node is used for logical OR. *

* This union does not include duplicated rows. */ public static class LogicalUnionNode extends BranchQueryPlanNode { static final long serialVersionUID = -7783166856668779902L; public LogicalUnionNode(QueryPlanNode left, QueryPlanNode right) { super(left, right); } public Table evaluate(QueryContext context) { // Solve the left branch result Table left_result = left.evaluate(context); // Solve the right branch result Table right_result = right.evaluate(context); return left_result.union(right_result); } public String titleString() { return "LOGICAL UNION"; } } /** * A branch node for performing a composite function on two child nodes. * This branch is used for general UNION, EXCEPT, INTERSECT composites. The * left and right branch results must have the same number of columns and * column types. */ public static class CompositeNode extends BranchQueryPlanNode { static final long serialVersionUID = -560587816928425857L; /** * The composite operation * (either CompositeTable.UNION, EXCEPT, INTERSECT). */ private int composite_op; /** * If this is true, the composite includes all results from both children, * otherwise removes deplicates. */ private boolean all_op; public CompositeNode(QueryPlanNode left, QueryPlanNode right, int composite_op, boolean all_op) { super(left, right); this.composite_op = composite_op; this.all_op = all_op; } public Table evaluate(QueryContext context) { // Solve the left branch result Table left_result = left.evaluate(context); // Solve the right branch result Table right_result = right.evaluate(context); // Form the composite table CompositeTable t = new CompositeTable(left_result, new Table[] { left_result, right_result }); t.setupIndexesForCompositeFunction(composite_op, all_op); return t; } } /** * A branch node for a non-correlated ANY or ALL sub-query evaluation. This * node requires a set of columns from the left branch and an operator. * The right branch represents the non-correlated sub-query. *

* NOTE: The cost of a SubQuery is higher if the right child result is * greater than the left child result. The plan should be arranged so * smaller results are on the left. */ public static class NonCorrelatedAnyAllNode extends BranchQueryPlanNode { static final long serialVersionUID = 7480579008259288291L; /** * The columns in the left table. */ private Variable[] left_columns; /** * The SubQuery operator, eg. '= ANY', '<> ALL' */ private Operator sub_query_operator; public NonCorrelatedAnyAllNode(QueryPlanNode left, QueryPlanNode right, Variable[] left_vars, Operator subquery_op) { super(left, right); this.left_columns = left_vars; this.sub_query_operator = subquery_op; } public Table evaluate(QueryContext context) { // Solve the left branch result Table left_result = left.evaluate(context); // Solve the right branch result Table right_result = right.evaluate(context); // Solve the sub query on the left columns with the right plan and the // given operator. return TableFunctions.anyAllNonCorrelated(left_result, left_columns, sub_query_operator, right_result); } public Object clone() throws CloneNotSupportedException { NonCorrelatedAnyAllNode node = (NonCorrelatedAnyAllNode) super.clone(); cloneArray(node.left_columns); return node; } public String titleString() { StringBuffer buf = new StringBuffer(); buf.append("NON_CORRELATED: ("); for (int i = 0; i < left_columns.length; ++i) { buf.append(left_columns[i].toString()); } buf.append(") "); buf.append(sub_query_operator.toString()); return new String(buf); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/QueryPlanNode.java000066400000000000000000000046061330501023400253140ustar00rootroot00000000000000/** * com.mckoi.database.QueryPlanNode 06 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; /** * A node element of a query plan tree. A plan of a query is represented as * a tree structure of such nodes. The design allows for plan nodes to be * easily reorganised for the construction of better plans. * * @author Tobias Downer */ public interface QueryPlanNode extends java.io.Serializable, Cloneable { /** * Evaluates the node and returns the result as a Table. The * VariableResolver resolves any outer variables */ Table evaluate(QueryContext context); /** * Discovers a list of TableName that represent the sources that this query * requires to complete itself. For example, if this is a query plan of * two joined table, the fully resolved names of both tables are returned. *

* The resultant list will not contain the same table name more than once. * The resultant list contains TableName objects. *

* NOTE, if a table is aliased, the unaliased name is returned. */ ArrayList discoverTableNames(ArrayList list); /** * Discovers all the correlated variables in the plan (and plan children) * that reference a particular layer. For example, if we wanted to find * all the CorrelatedVariable objects that reference the current layer, we * would typically call 'discoverCorrelatedVariables(0, new ArrayList())' */ ArrayList discoverCorrelatedVariables(int level, ArrayList list); /** * Deep clones this query plan. */ Object clone() throws CloneNotSupportedException; /** * Writes a textural representation of the node to the StringBuffer at the * given indent level. */ void debugString(int indent, StringBuffer buf); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RIDList.java000066400000000000000000000450671330501023400240460ustar00rootroot00000000000000/** * com.mckoi.database.RIDList 01 Dec 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.ArrayList; import com.mckoi.util.IntegerVector; import com.mckoi.util.IntegerIterator; import com.mckoi.util.IndexComparator; import com.mckoi.util.BlockIntegerList; import com.mckoi.debug.*; /** * This is an optimization to help sorting over a column in a table. It is * an aid for sorting rows in a query without having to resort to cell * lookup. It uses memory to speed up sorting. *

* Sorting data is a central part of any database system. This object * maintains a list of values that represent each cell in a column * relationally. *

* For example, consider the following data in a column:

* { 'a', 'g', 'i', 'b', 'a' } *

* A RID list is a set of integer values that represents a column relationally. * So the above column data could be represented in a RID list as:

* { 1, 3, 4, 2, 1 } *

* If 'c' is inserted into the above list, there is not an integer value that * we can use to represent this cell. In this case, the RID list is * renumbered to make room for the insertion. * * @author Tobias Downer */ final class RIDList { /** * The TransactionSystem that we are in. */ private TransactionSystem system; /** * The master table for the column this is in. */ private MasterTableDataSource master_table; /** * The TableName of the table. */ private TableName table_name; /** * The name of the column of this rid list. */ private String column_name; /** * The column in the master table. */ private int column; /** * The sorted list of rows in this set. This is sorted from min to max * (not sorted by row number - sorted by entity row value). */ private BlockIntegerList set_list; /** * The contents of our list. */ private IntegerVector rid_list; /** * The difference between each hash when the uid_list was last created or * rehashed. */ private int hash_rid_difference; /** * The IndexComparator that we use to refer elements in the set to actual * data objects. */ private IndexComparator set_comparator; /** * Set to true if this list has been fully built. */ private boolean is_built; /** * The RID list build state. * 0 - list not built. * 1 - stage 1 (set_list being built). * 2 - state 2 (rid_list being built). * 3 - pending modifications. * 4 - finished */ private int build_state = 0; /** * A list of modifications made to the index while it is being built. */ private IntegerVector concurrent_modification_info; private ArrayList concurrent_modification_data; private Object modification_lock = new Object(); /** * Set to true if a request to build the rid list is on the event dispatcher. */ private boolean request_processing = false; /** * Constructs the object. */ RIDList(MasterTableDataSource master_table, int column) { // rid_list = new IntegerVector(); this.master_table = master_table; this.system = master_table.getSystem(); this.column = column; DataTableDef table_def = master_table.getDataTableDef(); table_name = table_def.getTableName(); column_name = table_def.columnAt(column).getName(); is_built = false; setupComparator(); } /** * Returns a DebugLogger object that we can use to log debug messages. */ public final DebugLogger Debug() { return master_table.Debug(); } /** * Sets the internal comparator that enables us to sort and lookup on the * data in this column. */ private void setupComparator() { set_comparator = new IndexComparator() { private int internalCompare(int index, TObject cell2) { TObject cell1 = getCellContents(index); return cell1.compareTo(cell2); } public int compare(int index, Object val) { return internalCompare(index, (TObject) val); } public int compare(int index1, int index2) { TObject cell = getCellContents(index2); return internalCompare(index1, cell); } }; } /** * Gets the cell at the given row in the column of the master table. */ private TObject getCellContents(int row) { return master_table.getCellContents(column, row); } /** * Calculates the 'hash_rid_difference' variable. This dictates the * difference between hashing entries. */ private void calcHashRIDDifference(int size) { if (size == 0) { hash_rid_difference = 32; } else { hash_rid_difference = (65536 * 4096) / size; if (hash_rid_difference > 16384) { hash_rid_difference = 16384; } else if (hash_rid_difference < 8) { hash_rid_difference = 8; } } // hash_rid_difference = 2; // System.out.println(hash_rid_difference); } /** * Rehashes the entire rid list. This goes through the entire list from * first sorted entry to last and spaces out each rid so that there's 16 * numbers between each entry. */ private int rehashRIDList(int old_rid_place) { calcHashRIDDifference(set_list.size()); int new_rid_place = -1; int cur_rid = 0; int old_rid = 0; IntegerIterator iterator = set_list.iterator(); while (iterator.hasNext()) { int row_index = iterator.next(); if (row_index >= 0 && row_index < rid_list.size()) { int old_value = rid_list.intAt(row_index); int new_value; if (old_value == 0) { cur_rid += hash_rid_difference; new_rid_place = cur_rid; } else { if (old_value != old_rid) { old_rid = old_value; cur_rid += hash_rid_difference; new_value = cur_rid; } else { new_value = cur_rid; } rid_list.placeIntAt(new_value, row_index); } } } if (new_rid_place == -1) { throw new Error( "Post condition not correct - new_rid_place shouldn't be -1"); } system.stats().increment("RIDList.rehash_rid_table"); return new_rid_place; } /** * Algorithm for inserting a new row into the rid table. For most cases * this should be a very fast method. *

* NOTE: This must never be called from anywhere except inside * MasterTableDataStore. * * @param cell the cell to insert into the list. * @param row the row number. */ void insertRID(TObject cell, int row) { // NOTE: We are guarenteed to be synchronized on master_table when this // is called. synchronized (modification_lock) { // If state isn't pre-build or finished, then note this modification. if (build_state > 0 && build_state < 4) { concurrent_modification_info.addInt(1); concurrent_modification_info.addInt(row); concurrent_modification_data.add(cell); return; } // Only register if this list has been created. if (rid_list == null) { return; } } // Place a zero to mark the new row rid_list.placeIntAt(0, row); // Insert this into the set_list. set_list.insertSort(cell, row, set_comparator); int given_rid = -1; TObject previous_cell; // The index of this cell in the list int set_index = set_list.searchLast(cell, set_comparator); if (set_list.get(set_index) != row) { throw new Error( "set_list.searchLast(cell) didn't turn up expected row."); } int next_set_index = set_index + 1; if (next_set_index >= set_list.size()) { next_set_index = -1; } int previous_set_index = set_index - 1; int next_rid; if (next_set_index > -1) { next_rid = rid_list.intAt(set_list.get(next_set_index)); } else { if (previous_set_index > -1) { // If at end and there's a previous set then use that as the next // rid. next_rid = rid_list.intAt(set_list.get(previous_set_index)) + (hash_rid_difference * 2); } else { next_rid = (hash_rid_difference * 2); } } int previous_rid; if (previous_set_index > -1) { previous_rid = rid_list.intAt(set_list.get(previous_set_index)); } else { previous_rid = 0; } // Are we the same as the previous or next cell in the list? if (previous_set_index > -1) { previous_cell = getCellContents(set_list.get(previous_set_index)); if (previous_cell.compareTo(cell) == 0) { given_rid = previous_rid; } } // If not given a rid yet, if (given_rid == -1) { if (previous_rid + 1 == next_rid) { // There's no room so we have to rehash the rid list. given_rid = rehashRIDList(next_rid); } else { given_rid = ((next_rid + 1) + (previous_rid - 1)) / 2; } } // Finally (!!) - set the rid for this row. rid_list.placeIntAt(given_rid, row); } /** * Removes a RID entry from the given row. This MUST only be * called when the row is perminantly removed from the table (eg. by the * row garbage collector). *

* NOTE: This must never be called from anywhere except inside * MasterTableDataStore. */ void removeRID(int row) { // NOTE: We are guarenteed to be synchronized on master_table when this // is called. synchronized (modification_lock) { // If state isn't pre-build or finished, then note this modification. if (build_state > 0 && build_state < 4) { concurrent_modification_info.addInt(2); concurrent_modification_info.addInt(row); return; } // Only register if this list has been created. if (rid_list == null) { return; } } try { // Remove from the set_list index. TObject cell = getCellContents(row); int removed = set_list.removeSort(cell, row, set_comparator); } catch (Error e) { System.err.println("RIDList: " + table_name + "." + column_name); throw e; } } /** * Requests that a rid_list should be built for this column. The list will * be built on the database dispatcher thread. */ void requestBuildRIDList() { if (!isBuilt()) { if (!request_processing) { request_processing = true; // Wait 10 seconds to build rid list. system.postEvent(10000, system.createEvent(new Runnable() { public void run() { createRIDCache(); } })); } } } /** * If rid_list is null then create it now. *

* NOTE: This must never be called from anywhere except inside * MasterTableDataStore. */ private void createRIDCache() { try { // If the master table is closed then return // ISSUE: What if this happens while we are constructing the list? if (master_table.isClosed()) { return; } long time_start = System.currentTimeMillis(); long time_took; int rid_list_size; int set_size; synchronized (master_table) { synchronized (modification_lock) { if (is_built) { return; } // Set the build state build_state = 1; concurrent_modification_info = new IntegerVector(); concurrent_modification_data = new ArrayList(); // The set_list (complete index of the master table). set_size = master_table.rawRowCount(); set_list = new BlockIntegerList(); // Go through the master table and build set_list. for (int r = 0; r < set_size; ++r) { if (!master_table.recordDeleted(r)) { TObject cell = getCellContents(r); set_list.insertSort(cell, r, set_comparator); } } // Now we have a complete/current index, including uncommitted, // and committed added and removed rows, of the given column // Add a root lock to the table master_table.addRootLock(); } // synchronized (modification_lock) } // synchronized master_table try { // Go through and work out the rid values for the list. We know // that 'set_list' is correct and no entries can be deleted from it // until we relinquish the root lock. calcHashRIDDifference(set_size); rid_list = new IntegerVector(set_size + 128); // Go through 'set_list'. All entries that are equal are given the // same rid. if (set_list.size() > 0) { //set_size > 0) { int cur_rid = hash_rid_difference; IntegerIterator iterator = set_list.iterator(); int row_index = iterator.next(); TObject last_cell = getCellContents(row_index); rid_list.placeIntAt(cur_rid, row_index); while (iterator.hasNext()) { row_index = iterator.next(); TObject cur_cell = getCellContents(row_index); int cmp = cur_cell.compareTo(last_cell); if (cmp > 0) { cur_rid += hash_rid_difference; } else if (cmp < 0) { // ASSERTION // If current cell is less than last cell then the list ain't // sorted! throw new Error("Internal Database Error: Index is corrupt " + " - InsertSearch list is not sorted."); } rid_list.placeIntAt(cur_rid, row_index); last_cell = cur_cell; } } // Final stage, insert final changes, // We lock the master_table so we are guarenteed no changes to the // table can happen during the final stage. synchronized (master_table) { synchronized (modification_lock) { build_state = 4; // Make any modifications to the list that occured during the time // we were building the RID list. int mod_size = concurrent_modification_info.size(); int i = 0; int m_data = 0; int insert_count = 0; int remove_count = 0; while (i < mod_size) { int type = concurrent_modification_info.intAt(i); int row = concurrent_modification_info.intAt(i + 1); // An insert if (type == 1) { TObject cell = (TObject) concurrent_modification_data.get(m_data); insertRID(cell, row); ++m_data; ++insert_count; } // A remove else if (type == 2) { removeRID(row); ++remove_count; } else { throw new Error("Unknown modification type."); } i += 2; } if (remove_count > 0) { Debug().write(Lvl.ERROR, this, "Assertion failed: It should not be possible to remove " + "rows during a root lock when building a RID list."); } concurrent_modification_info = null; concurrent_modification_data = null; // Log the time it took time_took = System.currentTimeMillis() - time_start; rid_list_size = rid_list.size(); is_built = true; } } // synchronized (modification_lock) } finally { // Must guarentee we remove the root lock from the master table master_table.removeRootLock(); } Debug().write(Lvl.MESSAGE, this, "RID List " + table_name.toString() + "." + column_name + " Initial Size = " + rid_list_size); Debug().write(Lvl.MESSAGE, this, "RID list built in " + time_took + "ms."); // The number of rid caches created. system.stats().increment( "{session} RIDList.rid_caches_created"); // The total size of all rid indices that we have created. system.stats().add(rid_list_size, "{session} RIDList.rid_indices"); } catch (IOException e) { throw new Error("IO Error: " + e.getMessage()); } } /** * Quick way of determining if the RID list has been built. */ boolean isBuilt() { synchronized (modification_lock) { return is_built; } } /** * Given an unsorted set of rows in this table, this will return the row * list sorted in descending order. This only uses the information from * within this list to make up the sorted result, and does not reference any * data in the master table. *

* SYNCHRONIZATION: This does not lock the master_table because it doesn't * use any information in it. */ BlockIntegerList sortedSet(final IntegerVector row_set) { // The length of the set to order int row_set_length = row_set.size(); // This will be 'row_set' sorted by its entry lookup. This must only // contain indices to row_set entries. BlockIntegerList new_set = new BlockIntegerList(); // The comparator we use to sort IndexComparator comparator = new IndexComparator() { public int compare(int index, Object val) { int rid_val = rid_list.intAt(row_set.intAt(index)); int rid_val2 = ((Integer) val).intValue(); return rid_val - rid_val2; } public int compare(int index1, int index2) { throw new Error("Shouldn't be called!"); } }; // This synchronized statement is required because a RID list may be // altered when a row is deleted from the dispatcher thread. Inserts and // deletes on a table at this level do not necessarily only happen when // the table is under a lock. For this reason this block of code must // be synchronized. synchronized (master_table) { // Fill new_set with the set { 0, 1, 2, .... , row_set_length } for (int i = 0; i < row_set_length; ++i) { Integer rid_val = new Integer(rid_list.intAt(row_set.intAt(i))); new_set.insertSort(rid_val, i, comparator); } return new_set; } // synchronized (master_table) } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RawDiagnosticTable.java000066400000000000000000000050711330501023400262710ustar00rootroot00000000000000/** * com.mckoi.database.RawDiagnosticTable 29 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An interface that allows for the inspection and repair of the raw data * in a file. This is used for table debugging and the repair of damaged * files. * * @author Tobias Downer */ public interface RawDiagnosticTable { /** * Statics that represent the various states of a record. */ public final static int UNCOMMITTED = 1, COMMITTED_ADDED = 2, COMMITTED_REMOVED = 3, DELETED = 4; // ie. available for reclaimation. /** * Denotes an erroneous record state. */ public final static int RECORD_STATE_ERROR = 0; // ---------- Query Methods ---------- /** * Returns the number of physical records in the table. This includes * records that are uncommitted, deleted, committed removed and committed * added. */ int physicalRecordCount(); /** * Returns the DataTableDef object that describes the logical topology of * the columns in this table. */ DataTableDef getDataTableDef(); /** * Returns the state of the given record index. The state of a row is * either UNCOMMITTED, COMMITTED ADDED, COMMITTED REMOVED or DELETED. * record_index should be between 0 and physicalRecordCount. */ int recordState(int record_index); /** * The number of bytes the record takes up on the underlying media. */ int recordSize(int record_index); /** * Returns the contents of the given cell in this table. If the system is * unable to return a valid cell then an exception is thrown. */ TObject getCellContents(int column, int record_index); /** * Returns any misc information regarding this row as a human readable * string. May return null if there is no misc information associated with * this record. */ String recordMiscInformation(int record_index); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RawTableInformation.java000066400000000000000000000234631330501023400264770ustar00rootroot00000000000000/** * com.mckoi.database.RawTableInformation 31 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.Vector; //import com.mckoi.util.Comparable; import com.mckoi.util.SortUtil; import com.mckoi.util.IntegerVector; /** * This object represents the lowest level DataTable information of a given * VirtualTable. Since it is possible to make any level of VirtualTable's, * it is useful to be able to resolve an 'n leveled' VirtualTable to a * single level table. This object is used to collect information as the * 'VirtualTable.resolveToRawTable' method is walking throught the * VirtualTable's ancestors. *

* @author Tobias Downer */ final class RawTableInformation { /** * A Vector containing a list of DataTables, and 'row index' IntegerVectors * of the given rows in the table. */ private Vector raw_info; /** * The constructor. */ RawTableInformation() { raw_info = new Vector(); } /** * Adds a new DataTable or ReferenceTable, and IntegerVector row set into * the object. We can not add VirtualTable objects into this object. */ void add(RootTable table, IntegerVector row_set) { RawTableElement elem = new RawTableElement(); elem.table = table; elem.row_set = row_set; raw_info.addElement(elem); } /** * Returns an AbstractDataTable[] array of all the tables that have been * added. */ Table[] getTables() { int size = raw_info.size(); Table[] list = new Table[size]; for (int i = 0; i < size; ++i) { list[i] = (Table) ((RawTableElement) raw_info.elementAt(i)).table; } return list; } /** * Returns a IntegerVector[] list of the rows in the table that have been * added. */ IntegerVector[] getRows() { int size = raw_info.size(); IntegerVector[] list = new IntegerVector[size]; for (int i = 0; i < size; ++i) { list[i] = ((RawTableElement) raw_info.elementAt(i)).row_set; } return list; } /** * Returns an array of RawTableElement sorted into a consistant order. */ protected RawTableElement[] getSortedElements() { RawTableElement[] list = new RawTableElement[raw_info.size()]; raw_info.copyInto(list); SortUtil.quickSort(list); return list; } /** * Finds the union of this information with the given information. * It does the following: * + Sorts the unioned tables into a consistant order. * + Merges each row in the tables row_set. * + Sorts the resultant merge. * + Makes a new set with the resultant merge minus any duplicates. */ void union(RawTableInformation info) { // Number of Table 'columns' int col_count = raw_info.size(); // Get the sorted RawTableElement[] from each raw table information object. RawTableElement[] merge1 = getSortedElements(); RawTableElement[] merge2 = info.getSortedElements(); // Validates that both tables being merges are of identical type. int size1 = -1; int size2 = -1; // First check number of tables in each merge is correct. if (merge1.length != merge2.length) { throw new Error("Incorrect format in table union"); } // Check each table in the merge1 set has identical length row_sets for (int i = 0; i < merge1.length; ++i) { if (size1 == -1) { size1 = merge1[i].row_set.size(); } else { if (size1 != merge1[i].row_set.size()) { throw new Error("Incorrect format in table union"); } } } // Check each table in the merge2 set has identical length row_sets for (int i = 0; i < merge2.length; ++i) { // Check the tables in merge2 are identical to the tables in merge1 // (Checks the names match, and the validColumns filters are identical // see AbstractDataTable.typeEquals method). if (!merge2[i].table.typeEquals(merge1[i].table)) { throw new Error("Incorrect format in table union"); } if (size2 == -1) { size2 = merge2[i].row_set.size(); } else { if (size2 != merge2[i].row_set.size()) { throw new Error("Incorrect format in table union"); } } } // If size1 or size2 are -1 then we have a corrupt table. (It will be // 0 for an empty table). if (size1 == -1 || size2 == -1) { throw new Error("Incorrect format in table union"); } // We don't need information in 'raw_info' vector anymore so clear it. // This may help garbage collection. raw_info.removeAllElements(); // Merge the two together into a new list of RawRowElement[] int merge_size = size1 + size2; RawRowElement[] elems = new RawRowElement[merge_size]; int elems_index = 0; for (int i = 0; i < size1; ++i) { RawRowElement e = new RawRowElement(); e.row_vals = new int[col_count]; for (int n = 0; n < col_count; ++n) { e.row_vals[n] = merge1[n].row_set.intAt(i); } elems[elems_index] = e; ++elems_index; } for (int i = 0; i < size2; ++i) { RawRowElement e = new RawRowElement(); e.row_vals = new int[col_count]; for (int n = 0; n < col_count; ++n) { e.row_vals[n] = merge2[n].row_set.intAt(i); } elems[elems_index] = e; ++elems_index; } // Now sort the row elements into order. SortUtil.quickSort(elems); // Set up the 'raw_info' vector with the new RawTableElement[] removing // any duplicate rows. for (int i = 0; i < col_count; ++i) { RawTableElement e = merge1[i]; e.row_set.clear(); } RawRowElement previous = null; RawRowElement current = null; for (int n = 0; n < merge_size; ++n) { current = elems[n]; // Check that the current element in the set is not a duplicate of the // previous. if (previous == null || previous.compareTo(current) != 0) { for (int i = 0; i < col_count; ++i) { merge1[i].row_set.addInt(current.row_vals[i]); } previous = current; } } for (int i = 0; i < col_count; ++i) { raw_info.addElement(merge1[i]); } } /** * Removes any duplicate rows from this RawTableInformation object. */ void removeDuplicates() { // If no tables in duplicate then return if (raw_info.size() == 0) { return; } // Get the length of the first row set in the first table. We assume that // the row set length is identical across each table in the Vector. RawTableElement elen = (RawTableElement) raw_info.elementAt(0); int len = elen.row_set.size(); if (len == 0) { return; } // Create a new row element to sort. RawRowElement[] elems = new RawRowElement[len]; int width = raw_info.size(); // Create an array of RawTableElement so we can quickly access the data RawTableElement[] rdup = new RawTableElement[width]; raw_info.copyInto(rdup); // Run through the data building up a new RawTableElement[] array with // the information in every raw span. for (int i = 0; i < len; ++i) { RawRowElement e = new RawRowElement(); e.row_vals = new int[width]; for (int n = 0; n < width; ++n) { e.row_vals[n] = rdup[n].row_set.intAt(i); } elems[i] = e; } // Now 'elems' it an array of individual RawRowElement objects which // represent each individual row in the table. // Now sort and remove duplicates to make up a new set. SortUtil.quickSort(elems); // Remove all elements from the raw_info Vector. raw_info.removeAllElements(); // Make a new set of RawTableElement[] objects RawTableElement[] table_elements = rdup; // Set up the 'raw_info' vector with the new RawTableElement[] removing // any duplicate rows. for (int i = 0; i < width; ++i) { table_elements[i].row_set.clear(); } RawRowElement previous = null; RawRowElement current = null; for (int n = 0; n < len; ++n) { current = elems[n]; // Check that the current element in the set is not a duplicate of the // previous. if (previous == null || previous.compareTo(current) != 0) { for (int i = 0; i < width; ++i) { table_elements[i].row_set.addInt(current.row_vals[i]); } previous = current; } } for (int i = 0; i < width; ++i) { raw_info.addElement(table_elements[i]); } } } /** * A container class to hold the DataTable and IntegerVector row set of a * given table in the list. */ final class RawTableElement implements Comparable { RootTable table; IntegerVector row_set; public int compareTo(Object o) { RawTableElement rte = (RawTableElement) o; return table.hashCode() - rte.table.hashCode(); } } /** * A container class to hold each row of a list of tables. * table_elems is a reference to the merged set the 'row_index' is in. * row_index is the row index of the row this element refers to. */ final class RawRowElement implements Comparable { int[] row_vals; public int compareTo(Object o) { RawRowElement rre = (RawRowElement) o; int size = row_vals.length; for (int i = 0; i < size; ++i) { int v1 = row_vals[i]; int v2 = rre.row_vals[i]; if (v1 != v2) { return v1 - v2; } } return 0; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ReferenceTable.java000066400000000000000000000072671330501023400254420ustar00rootroot00000000000000/** * com.mckoi.database.ReferenceTable 03 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * This is an implementation of a Table that references a DataTable as its * parent. This is a one-to-one relationship unlike the VirtualTable class * which is a one-to-many relationship. *

* The entire purpose of this class is as a filter. We can use it to rename * a DataTable class to any domain we feel like. This allows us to generate * unique column names. *

* For example, say we need to join the same table. We can use this method * to ensure that the newly joined table won't have duplicate column names. *

* This object implements RootTable. * * @author Tobias Downer */ public final class ReferenceTable extends FilterTable implements RootTable { /** * This represents the new name of the table. */ private TableName table_name; /** * The modified DataTableDef object for this reference. */ private DataTableDef modified_table_def; /** * The Constructor. */ ReferenceTable(Table table, TableName tname) { super(table); table_name = tname; // Create a modified table def based on the parent def. modified_table_def = new DataTableDef(table.getDataTableDef()); modified_table_def.setTableName(tname); modified_table_def.setImmutable(); } /** * Constructs the ReferenceTable given the parent table, and a new * DataTableDef that describes the columns in this table. This is used if * we want to redefine the column names. *

* Note that the given DataTableDef must contain the same number of columns as * the parent table, and the columns must be the same type. */ ReferenceTable(Table table, DataTableDef def) { super(table); table_name = def.getTableName(); modified_table_def = def; } /** * Filters the name of the table. This returns the declared name of the * table. */ public TableName getTableName() { return table_name; } /** * Returns the 'modified' DataTableDef object for this reference. */ public DataTableDef getDataTableDef() { return modified_table_def; } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public int findFieldName(Variable v) { TableName table_name = v.getTableName(); if (table_name != null && table_name.equals(getTableName())) { return getDataTableDef().fastFindColumnName(v.getName()); } return -1; } /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public Variable getResolvedVariable(int column) { return new Variable(getTableName(), getDataTableDef().columnAt(column).getName()); } public boolean typeEquals(RootTable table) { return (this == table); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RegexLibrary.java000066400000000000000000000044261330501023400251650ustar00rootroot00000000000000/** * com.mckoi.database.RegexLibrary 13 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * An interface that links with a Regex library. This interface allows * the database engine to use any regular expression library that this * interface can be implemented for. * * @author Tobias Downer */ public interface RegexLibrary { /** * Matches a regular expression against a string value. If the value is * a match against the expression then it returns true. * * @param regular_expression the expression to match (eg. "[0-9]+"). * @param expression_ops expression operator string that specifies various * flags. For example, "im" is like '/[expression]/im' in Perl. * @param value the string to test. */ boolean regexMatch(String regular_expression, String expression_ops, String value); /** * Performs a regular expression search on the given column of the table. * Returns an IntegerVector that contains the list of rows in the table that * matched the expression. Returns an empty list if the expression matched * no rows in the column. * * @param table the table to search for matching values. * @param column the column of the table to search for matching values. * @param regular_expression the expression to match (eg. "[0-9]+"). * @param expression_ops expression operator string that specifies various * flags. For example, "im" is like '/[expression]/im' in Perl. */ IntegerVector regexSearch(Table table, int column, String regular_expression, String expression_ops); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RootTable.java000066400000000000000000000027471330501023400244650ustar00rootroot00000000000000/** * com.mckoi.database.RootTable 22 Sep 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Interface that is implemented by all Root tables. A Root table is a non- * virtual table that represents table data in its lowest form. When the * Table.resolveToRawTable method is called, if it encounters a table that * implements RootTable then it does not attempt to decend further to * extract the underlying tables. *

* This interface is used for unions. * * @author Tobias Downer */ public interface RootTable { /** * This is function is used to check that two root tables are identical. * This is used if we need to chect that the form of the table is the same. * Such as in a union operation, when we can only union two tables with * the identical columns. */ boolean typeEquals(RootTable table); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RowData.java000066400000000000000000000277641330501023400241410ustar00rootroot00000000000000/** * com.mckoi.database.RowData 07 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.Types; import com.mckoi.database.global.StringObject; import java.util.List; import java.util.Arrays; import java.util.Date; import java.io.DataOutput; import java.io.InputStream; import java.io.IOException; /** * Represents a row of data to be added into a table. The row data is linked * to a TableField that describes the cell information within a row. *

* There are two types of RowData object. Those that are empty and contain * blank data, and those that contain information to either be inserted * into a table, or has be retrieved from a row. *

* NOTE: Any RowData objects that need to be set to 'null' should be done so * explicitly. * NOTE: We must call a 'setColumnData' method for _every_ column in the * row to form. * NOTE: This method (or derived classes) must only use safe methods in * DataTable. (ie. getRowCount, etc are out). * * @author Tobias Downer */ public class RowData implements Types { /** * The TransactionSystem this RowData is a context of. */ private TransactionSystem system; /** * The TableDataSource object that this RowData is in, or is destined to be * in. */ private TableDataSource table; /** * The definition of the table. */ private DataTableDef table_def; /** * A list of TObject objects in the table. */ private TObject[] data_cell_list; /** * The number of columns in the row. */ private int col_count; /** * To create a RowData object without an underlying table. This is for * copying from one table to a different one. */ public RowData(TransactionSystem system, int col_count) { this.system = system; this.col_count = col_count; data_cell_list = new TObject[col_count]; } /** * The Constructor generates a blank row. */ public RowData(TableDataSource table) { this.system = table.getSystem(); this.table = table; table_def = table.getDataTableDef(); col_count = table_def.columnCount(); data_cell_list = new TObject[col_count]; } /** * Populates the RowData object with information from a specific row from * the underlying DataTable. */ void setFromRow(int row) { for (int col = 0; col < col_count; ++col) { setColumnData(col, table.getCellContents(col, row)); } } /** * Returns the table object this row data is assigned to. This is used to * ensure we don't try to use a row data in a different table to what it was * created from. */ boolean isSameTable(DataTable tab) { return table == tab; } /** * Sets up a column by casting the value from the given TObject to a * type that is compatible with the column. This is useful when we * are copying information from one table to another. */ public void setColumnData(int column, TObject cell) { DataTableColumnDef col = table_def.columnAt(column); if (table != null && col.getSQLType() != cell.getTType().getSQLType()) { // Cast the TObject cell = cell.castTo(col.getTType()); } setColumnDataFromTObject(column, cell); } /** * Sets up a column from an Object. */ public void setColumnDataFromObject(int column, Object ob) { DataTableColumnDef col_def = table_def.columnAt(column); if (ob instanceof String) { ob = StringObject.fromString((String) ob); } // Create a TObject from the given object to the given type TObject cell = TObject.createAndCastFromObject(col_def.getTType(), ob); setColumnDataFromTObject(column, cell); } /** * Sets up a column from a TObject. */ public void setColumnDataFromTObject(int column, TObject ob) { data_cell_list[column] = ob; } /** * This is a special case situation for setting the column cell to 'null'. */ public void setColumnToNull(int column) { DataTableColumnDef col_def = table_def.columnAt(column); setColumnDataFromTObject(column, new TObject(col_def.getTType(), null)); } /** * Sets the given column number to the default value for this column. */ public void setColumnToDefault(int column, QueryContext context) { if (table != null) { DataTableColumnDef column_def = table_def.columnAt(column); Expression exp = column_def.getDefaultExpression(system); if (exp != null) { TObject def_val = evaluate(exp, context); setColumnData(column, def_val); return; } } setColumnToNull(column); } /** * Returns the TObject that represents the information in the given column * of the row. */ public TObject getCellData(int column) { TObject cell = data_cell_list[column]; if (cell == null) { DataTableColumnDef col_def = table_def.columnAt(column); cell = new TObject(col_def.getTType(), null); } return cell; } /** * Returns the name of the given column number. */ public String getColumnName(int column) { return table_def.columnAt(column).getName(); } /** * Finds the field in this RowData with the given name. */ public int findFieldName(String column_name) { return table_def.findColumnName(column_name); } /** * Returns the number of columns (cells) in this row. */ public int getColumnCount() { return col_count; } /** * Evaluates the expression and returns the object it evaluates to using * the local VariableResolver to resolve variables in the expression. */ TObject evaluate(Expression expression, QueryContext context) { boolean ignore_case = system.ignoreIdentifierCase(); // Resolve any variables to the table_def for this expression. table_def.resolveColumns(ignore_case, expression); // Get the variable resolver and evaluate over this data. VariableResolver vresolver = getVariableResolver(); return expression.evaluate(null, vresolver, context); } /** * Evaluates a single assignment on this RowData object. A VariableResolver * is made which resolves to variables only within this RowData context. */ void evaluate(Assignment assignment, QueryContext context) { // Get the variable resolver and evaluate over this data. VariableResolver vresolver = getVariableResolver(); TObject ob = assignment.getExpression().evaluate(null, vresolver, context); // Check the variable name is within this row. Variable variable = assignment.getVariable(); int column = findFieldName(variable.getName()); // Set the column to the resolved value. setColumnData(column, ob); } /** * Any columns in the row of data that haven't been set yet (they will be * 'null') will be set to the default value during this method. This should * be called after the row data has initially been set with values from some * source. */ public void setDefaultForRest(QueryContext context) throws DatabaseException { for (int i = 0; i < col_count; ++i) { if (data_cell_list[i] == null) { setColumnToDefault(i, context); } } } /** * Sets up an entire row given the array of assignments. If any columns are * left 'null' then they are filled with the default value. */ public void setupEntire(Assignment[] assignments, QueryContext context) throws DatabaseException { for (int i = 0; i < assignments.length; ++i) { evaluate(assignments[i], context); } // Any that are left as 'null', set to default value. setDefaultForRest(context); } /** * Sets up an entire row given the list of insert elements and a list of * indices to the columns to set. An insert element is either an expression * that is resolved to a constant, or the string "DEFAULT" which indicates * the value should be set to the default value of the column. */ public void setupEntire(int[] col_indices, List insert_elements, QueryContext context) throws DatabaseException { int elem_size = insert_elements.size(); if (col_indices.length != elem_size) { throw new DatabaseException( "Column indices and expression array sizes don't match"); } // Get the variable resolver and evaluate over this data. VariableResolver vresolver = getVariableResolver(); for (int i = 0; i < col_indices.length; ++i) { Object element = insert_elements.get(i); if (element instanceof Expression) { // Evaluate to the object to insert TObject ob = ((Expression) element).evaluate(null, vresolver, context); int table_column = col_indices[i]; // Cast the object to the type of the column ob = ob.castTo(table_def.columnAt(table_column).getTType()); // Set the column to the resolved value. setColumnDataFromTObject(table_column, ob); } else { // The element must be 'DEFAULT'. If it's not throw an error. If it // is, the default value will be set later. if (!element.equals("DEFAULT")) { throw new DatabaseException( "Invalid value in 'insert_elements' list."); } } } // Any that are left as 'null', set to default value. setDefaultForRest(context); } /** * Sets up an entire row given the array of Expressions and a list of indices * to the columns to set. Any columns that are not set by this method are * set to the default value as defined for the column. */ public void setupEntire(int[] col_indices, Expression[] exps, QueryContext context) throws DatabaseException { setupEntire(col_indices, Arrays.asList(exps), context); } /** * Returns a string representation of this row. */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[RowData: "); for (int i = 0; i < col_count; ++i) { buf.append(data_cell_list[i].getObject()); buf.append(", "); } return new String(buf); } /** * Returns a VariableResolver to use within this RowData context. */ private VariableResolver getVariableResolver() { if (variable_resolver == null) { variable_resolver = new RDVariableResolver(); } else { variable_resolver.nextAssignment(); } return variable_resolver; } private RDVariableResolver variable_resolver = null; // ---------- Inner classes ---------- /** * Variable resolver for this context. */ private class RDVariableResolver implements VariableResolver { private int assignment_count = 0; void nextAssignment() { ++assignment_count; } public int setID() { return assignment_count; } public TObject resolve(Variable variable) { String col_name = variable.getName(); int col_index = table_def.findColumnName(col_name); if (col_index == -1) { throw new Error("Can't find column: " + col_name); } TObject cell = data_cell_list[col_index]; if (cell == null) { throw new Error("Column " + col_name + " hasn't been set yet."); } return cell; } public TType returnTType(Variable variable) { String col_name = variable.getName(); int col_index = table_def.findColumnName(col_name); if (col_index == -1) { throw new Error("Can't find column: " + col_name); } return table_def.columnAt(col_index).getTType(); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/RowEnumeration.java000066400000000000000000000023101330501023400255320ustar00rootroot00000000000000/** * com.mckoi.database.RowEnumeration 05 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * This enumeration allows for access to a tables rows. Each call to * 'nextRowIndex()' returns an int that can be used in the * 'Table.getCellContents(int row, int column)'. *

* @author Tobias Downer */ public interface RowEnumeration { /** * Determines if there are any rows left in the enumeration. */ public boolean hasMoreRows(); /** * Returns the next row index from the enumeration. */ public int nextRowIndex(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SchemaDef.java000066400000000000000000000026301330501023400244000ustar00rootroot00000000000000/** * com.mckoi.database.SchemaDef 29 Aug 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A definition of a schema. * * @author Tobias Downer */ public final class SchemaDef { /** * The name of the schema (eg. APP). */ private String name; /** * The type of this schema (eg. SYSTEM, USER, etc) */ private String type; /** * Constructs the SchemaDef. */ public SchemaDef(String name, String type) { this.name = name; this.type = type; } /** * Returns the case correct name of the schema. */ public String getName() { return name; } /** * Returns the type of this schema. */ public String getType() { return type; } public String toString() { return getName(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SelectableRange.java000066400000000000000000000130131330501023400255760ustar00rootroot00000000000000/** * com.mckoi.database.SelectableRange 12 Aug 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An object that represents a range of values to select from a list. A range * has a start value, an end value, and whether we should pick inclusive or * exclusive of the end value. The start value may be a concrete value from * the set or it may be a flag that represents the start or end of the list. *

* For example, to select the first item from a set the range would be; *

 * RANGE:
 *   start = FIRST_VALUE, first
 *   end   = LAST_VALUE, first
 * 
* To select the last item from a set the range would be; *
 * RANGE:
 *   start = FIRST_VALUE, last
 *   end   = LAST_VALUE, last
 * 
* To select the range of values between '10' and '15' then range would be; *
 * RANGE:
 *   start = FIRST_VALUE, '10'
 *   end   = LAST_VALUE, '15'
 * 
* Note that the the start value may not compare less than the end value. For * example, start can not be 'last' and end can not be 'first'. * * @author Tobias Downer */ public final class SelectableRange { // ---------- Statics ---------- /** * An object that represents the first value in the set. *

* Note that these objects have no (NULL) type. */ public static final TObject FIRST_IN_SET = new TObject(TType.NULL_TYPE, "[FIRST_IN_SET]"); /** * An object that represents the last value in the set. *

* Note that these objects have no (NULL) type. */ public static final TObject LAST_IN_SET = new TObject(TType.NULL_TYPE, "[LAST_IN_SET]"); /** * Represents the various points in the set on the value to represent the * set range. */ public static final byte FIRST_VALUE = 1, LAST_VALUE = 2, BEFORE_FIRST_VALUE = 3, AFTER_LAST_VALUE = 4; // ---------- Members ---------- /** * The start of the range to select from the set. */ private TObject start; /** * The end of the range to select from the set. */ private TObject end; /** * Denotes the place for the range to start with respect to the start value. * Either FIRST_VALUE or AFTER_LAST_VALUE. */ private byte set_start_flag; /** * Denotes the place for the range to end with respect to the end value. * Either BEFORE_FIRST_VALUE or LAST_VALUE. */ private byte set_end_flag; /** * Constructs the range. */ public SelectableRange(byte set_start_flag, TObject start, byte set_end_flag, TObject end) { this.start = start; this.end = end; this.set_start_flag = set_start_flag; this.set_end_flag = set_end_flag; } /** * Returns the start of the range. * NOTE: This may return FIRST_IN_SET or LAST_IN_SET. */ public TObject getStart() { return start; } /** * Returns the end of the range. * NOTE: This may return FIRST_IN_SET or LAST_IN_SET. */ public TObject getEnd() { return end; } /** * Returns the place for the range to start (either FIRST_VALUE or * AFTER_LAST_VALUE) */ public byte getStartFlag() { return set_start_flag; } /** * Returns the place for the range to end (either BEFORE_FIRST_VALUE or * LAST VALUE). */ public byte getEndFlag() { return set_end_flag; } /** * Outputs this range as a string. */ public String toString() { StringBuffer buf = new StringBuffer(); if (getStartFlag() == FIRST_VALUE) { buf.append("FIRST_VALUE "); } else if (getStartFlag() == AFTER_LAST_VALUE) { buf.append("AFTER_LAST_VALUE "); } buf.append(getStart()); buf.append(" -> "); if (getEndFlag() == LAST_VALUE) { buf.append("LAST_VALUE "); } else if (getEndFlag() == BEFORE_FIRST_VALUE) { buf.append("BEFORE_FIRST_VALUE "); } buf.append(getEnd()); return new String(buf); } /** * Returns true if this range is equal to the given range. */ public boolean equals(Object ob) { if (super.equals(ob)) { return true; } SelectableRange dest_range = (SelectableRange) ob; return (getStart().valuesEqual(dest_range.getStart()) && getEnd().valuesEqual(dest_range.getEnd()) && getStartFlag() == dest_range.getStartFlag() && getEndFlag() == dest_range.getEndFlag()); } // ---------- Statics ---------- /** * The range that represents the entire range (including null). */ public static final SelectableRange FULL_RANGE = new SelectableRange(FIRST_VALUE, FIRST_IN_SET, LAST_VALUE, LAST_IN_SET); /** * The range that represents the entire range (not including null). */ public static final SelectableRange FULL_RANGE_NO_NULLS = new SelectableRange(AFTER_LAST_VALUE, TObject.nullVal(), LAST_VALUE, LAST_IN_SET); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SelectableRangeSet.java000066400000000000000000000325511330501023400262620ustar00rootroot00000000000000/** * com.mckoi.database.SelectableRangeSet 18 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import java.util.ListIterator; /** * Represents a complex normalized range of a list. This is essentially a * set of SelectableRange objects that make up a complex view of a range. For * example, say we had a query * '(a > 10 and a < 20 and a <> 15) or a >= 50', * we could represent this range by the following range set; *

 * RANGE: AFTER_LAST_VALUE 10, BEFORE_FIRST_VALUE 15
 * RANGE: AFTER_LAST_VALUE 15, BEFORE_FIRST_VALUE 20
 * RANGE: FIRST_VALUE 50, LAST_VALUE LAST_IN_SET
 * 

* The range is constructed by calls to 'intersect', and 'union'. * * @author Tobias Downer */ public final class SelectableRangeSet { /** * The list of ranges. */ private final ArrayList range_set; /** * Constructs the SelectableRangeSet to a full range (a range that encompases * all values). If 'no_nulls' is true then the range can't include null * values. */ public SelectableRangeSet() { range_set = new ArrayList(); range_set.add(SelectableRange.FULL_RANGE); } /** * Intersects the given SelectableRange object with the given Operator and * value constraint. *

* NOTE: This does not work with the '<>' operator which must be handled * another way. */ private static SelectableRange intersectRange(SelectableRange range, Operator op, TObject val, boolean null_check) { TObject start = range.getStart(); byte start_flag = range.getStartFlag(); TObject end = range.getEnd(); byte end_flag = range.getEndFlag(); boolean inclusive = op.is("is") || op.is("=") || op.is(">=") || op.is("<="); if (op.is("is") || op.is("=") || op.is(">") || op.is(">=")) { // With this operator, NULL values must return null. if (null_check && val.isNull()) { return null; } if (start == SelectableRange.FIRST_IN_SET) { start = val; start_flag = inclusive ? SelectableRange.FIRST_VALUE : SelectableRange.AFTER_LAST_VALUE; } else { int c = val.compareTo(start); if ((c == 0 && start_flag == SelectableRange.FIRST_VALUE) || c > 0) { start = val; start_flag = inclusive ? SelectableRange.FIRST_VALUE : SelectableRange.AFTER_LAST_VALUE; } } } if (op.is("is") || op.is("=") || op.is("<") || op.is("<=")) { // With this operator, NULL values must return null. if (null_check && val.isNull()) { return null; } // If start is first in set, then we have to change it to after NULL if (null_check && start == SelectableRange.FIRST_IN_SET) { start = TObject.nullVal(); start_flag = SelectableRange.AFTER_LAST_VALUE; } if (end == SelectableRange.LAST_IN_SET) { end = val; end_flag = inclusive ? SelectableRange.LAST_VALUE : SelectableRange.BEFORE_FIRST_VALUE; } else { int c = val.compareTo(end); if ((c == 0 && end_flag == SelectableRange.LAST_VALUE) || c < 0) { end = val; end_flag = inclusive ? SelectableRange.LAST_VALUE : SelectableRange.BEFORE_FIRST_VALUE; } } } // If start and end are not null types (if either are, then it means it // is a placeholder value meaning start or end of set). if (start != SelectableRange.FIRST_IN_SET && end != SelectableRange.LAST_IN_SET) { // If start is higher than end, return null int c = start.compareTo(end); if ((c == 0 && (start_flag == SelectableRange.AFTER_LAST_VALUE || end_flag == SelectableRange.BEFORE_FIRST_VALUE)) || c > 0) { return null; } } // The new intersected range return new SelectableRange(start_flag, start, end_flag, end); } /** * Returns true if the two SelectableRange ranges intersect. */ private static boolean rangeIntersectedBy(SelectableRange range1, SelectableRange range2) { byte start_flag_1 = range1.getStartFlag(); TObject start_1 = range1.getStart(); byte end_flag_1 = range1.getEndFlag(); TObject end_1 = range1.getEnd(); byte start_flag_2 = range2.getStartFlag(); TObject start_2 = range2.getStart(); byte end_flag_2 = range2.getEndFlag(); TObject end_2 = range2.getEnd(); TObject start_cell_1, end_cell_1; TObject start_cell_2, end_cell_2; start_cell_1 = start_1 == SelectableRange.FIRST_IN_SET ? null : start_1; end_cell_1 = end_1 == SelectableRange.LAST_IN_SET ? null : end_1; start_cell_2 = start_2 == SelectableRange.FIRST_IN_SET ? null : start_2; end_cell_2 = end_2 == SelectableRange.LAST_IN_SET ? null : end_2; boolean intersect_1 = false; if (start_cell_1 != null && end_cell_2 != null) { int c = start_cell_1.compareTo(end_cell_2); if (c < 0 || (c == 0 && (start_flag_1 == SelectableRange.FIRST_VALUE || end_flag_2 == SelectableRange.LAST_VALUE))) { intersect_1 = true; } } else { intersect_1 = true; } boolean intersect_2 = false; if (start_cell_2 != null && end_cell_1 != null) { int c = start_cell_2.compareTo(end_cell_1); if (c < 0 || (c == 0 && (start_flag_2 == SelectableRange.FIRST_VALUE || end_flag_1 == SelectableRange.LAST_VALUE))) { intersect_2 = true; } } else { intersect_2 = true; } return (intersect_1 && intersect_2); } /** * Alters the first range so it encompasses the second range. This assumes * that range1 intersects range2. */ private static SelectableRange changeRangeSizeToEncompass( SelectableRange range1, SelectableRange range2) { byte start_flag_1 = range1.getStartFlag(); TObject start_1 = range1.getStart(); byte end_flag_1 = range1.getEndFlag(); TObject end_1 = range1.getEnd(); byte start_flag_2 = range2.getStartFlag(); TObject start_2 = range2.getStart(); byte end_flag_2 = range2.getEndFlag(); TObject end_2 = range2.getEnd(); if (start_1 != SelectableRange.FIRST_IN_SET) { if (start_2 != SelectableRange.FIRST_IN_SET) { TObject cell = start_1; int c = cell.compareTo(start_2); if (c > 0 || c == 0 && start_flag_1 == SelectableRange.AFTER_LAST_VALUE && start_flag_2 == SelectableRange.FIRST_VALUE) { start_1 = start_2; start_flag_1 = start_flag_2; } } else { start_1 = start_2; start_flag_1 = start_flag_2; } } if (end_1 != SelectableRange.LAST_IN_SET) { if (end_2 != SelectableRange.LAST_IN_SET) { TObject cell = (TObject) end_1; int c = cell.compareTo(end_2); if (c < 0 || c == 0 && end_flag_1 == SelectableRange.BEFORE_FIRST_VALUE && end_flag_2 == SelectableRange.LAST_VALUE) { end_1 = end_2; end_flag_1 = end_flag_2; } } else { end_1 = end_2; end_flag_1 = end_flag_2; } } return new SelectableRange(start_flag_1, start_1, end_flag_1, end_1); } /** * Intersects this range with the given Operator and value constraint. * For example, if a range is 'a' -> [END] and the given operator is '<=' and * the value is 'z' the result range is 'a' -> 'z'. */ public void intersect(Operator op, TObject val) { int sz = range_set.size(); ListIterator i = range_set.listIterator(); if (op.is("<>") || op.is("is not")) { boolean null_check = op.is("<>"); while (i.hasNext()) { SelectableRange range = (SelectableRange) i.next(); SelectableRange left_range = intersectRange(range, Operator.get("<"), val, null_check); SelectableRange right_range = intersectRange(range, Operator.get(">"), val, null_check); i.remove(); if (left_range != null) { i.add(left_range); } if (right_range != null) { i.add(right_range); } } } else { boolean null_check = !op.is("is"); while (i.hasNext()) { SelectableRange range = (SelectableRange) i.next(); range = intersectRange(range, op, val, null_check); if (range == null) { i.remove(); } else { i.set(range); } } } } /** * Unions this range with the given Operator and value constraint. */ public void union(Operator op, TObject val) { throw new Error("PENDING"); } /** * Unions the current range set with the given range set. */ public void union(SelectableRangeSet union_to) { ArrayList input_set = union_to.range_set; int in_sz = input_set.size(); for (int n = 0; n < in_sz; ++n) { // The range to merge in. SelectableRange in_range = (SelectableRange) input_set.get(n); // For each range in this set int sz = range_set.size(); ListIterator i = range_set.listIterator(); while (i.hasNext()) { SelectableRange range = (SelectableRange) i.next(); if (rangeIntersectedBy(in_range, range)) { i.remove(); in_range = changeRangeSizeToEncompass(in_range, range); } } // Insert into sorted position byte start_flag = in_range.getStartFlag(); TObject start = in_range.getStart(); byte end_flag = in_range.getEndFlag(); TObject end = in_range.getEnd(); if (start == SelectableRange.FIRST_IN_SET) { range_set.add(0, in_range); } else { TObject start_cell = start; i = range_set.listIterator(); while (i.hasNext()) { SelectableRange range = (SelectableRange) i.next(); TObject cur_start = range.getStart(); if (cur_start != SelectableRange.FIRST_IN_SET) { if (cur_start.compareTo(start_cell) > 0) { i.previous(); break; } } } i.add(in_range); } } } /** * Returns the range as an array of SelectableRange or an empty array if * there is no range. */ public SelectableRange[] toSelectableRangeArray() { int sz = range_set.size(); SelectableRange[] ranges = new SelectableRange[sz]; for (int i = 0; i < sz; ++i) { ranges[i] = (SelectableRange) range_set.get(i); } return ranges; } /** * Outputs this range as a string, for diagnostic and testing purposes. */ public String toString() { StringBuffer buf = new StringBuffer(); if (range_set.size() == 0) { return "(NO RANGE)"; } for (int i = 0; i < range_set.size(); ++i) { buf.append(range_set.get(i)); buf.append(", "); } return new String(buf); } /** * A test application. */ public static void main(String[] args) { TType ttype = TType.STRING_TYPE; SelectableRangeSet range_set = new SelectableRangeSet(); System.out.println(range_set); range_set.intersect(Operator.get(">="), new TObject(ttype, "2")); System.out.println(range_set); range_set.intersect(Operator.get("<>"), new TObject(ttype, "4")); System.out.println(range_set); range_set.intersect(Operator.get("<>"), new TObject(ttype, "2")); System.out.println(range_set); range_set.intersect(Operator.get("<>"), new TObject(ttype, "3")); System.out.println(range_set); range_set.intersect(Operator.get("<>"), new TObject(ttype, "2")); System.out.println(range_set); range_set.intersect(Operator.get("<>"), new TObject(ttype, "1")); System.out.println(range_set); range_set.intersect(Operator.get(">="), new TObject(ttype, "3")); System.out.println(range_set); range_set.intersect(Operator.get("<="), new TObject(ttype, "5")); System.out.println(range_set); range_set.intersect(Operator.get("<"), new TObject(ttype, "5")); System.out.println(range_set); range_set.intersect(Operator.get(">="), new TObject(ttype, "6")); System.out.println(range_set); System.out.println("---"); SelectableRangeSet range1 = new SelectableRangeSet(); range1.intersect(Operator.get("="), new TObject(ttype, "k")); SelectableRangeSet range2 = new SelectableRangeSet(); range2.intersect(Operator.get("<>"), new TObject(ttype, "d")); range2.intersect(Operator.get("<"), new TObject(ttype, "g")); SelectableRangeSet range3 = new SelectableRangeSet(); range3.intersect(Operator.get(">"), new TObject(ttype, "o")); range2.union(range3); range1.union(range2); System.out.println(range1); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SelectableScheme.java000066400000000000000000000370371330501023400257620ustar00rootroot00000000000000/** * com.mckoi.database.SelectableScheme 12 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import com.mckoi.util.IndexComparator; import com.mckoi.util.BlockIntegerList; import com.mckoi.debug.DebugLogger; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; /** * Represents a base class for a mechanism to select ranges from a given set. * Such schemes could include BinaryTree, Hashtable or just a blind search. *

* A given element in the set is specified through a 'row' integer whose * contents can be obtained through the 'table.getCellContents(column, row)'. * Every scheme is given a table and column number that the set refers to. * While a given set element is refered to as a 'row', the integer is really * only a pointer into the set list which can be de-referenced with a call to * table.getCellContents(row). Better performance schemes will keep such * calls to a minimum. *

* A scheme may choose to retain knowledge about a given element when it is * added or removed from the set, such as a BinaryTree that catalogs all * elements with respect to each other. * * @author Tobias Downer */ public abstract class SelectableScheme { /** * Some statics. */ protected static final BlockIntegerList EMPTY_LIST; protected static final BlockIntegerList ONE_LIST; static { EMPTY_LIST = new BlockIntegerList(); EMPTY_LIST.setImmutable(); ONE_LIST = new BlockIntegerList(); ONE_LIST.add(0); ONE_LIST.setImmutable(); } /** * The table data source with the column this scheme indexes. */ private final TableDataSource table; /** * The column number in the tree this tree helps. */ private final int column; /** * Set to true if this scheme is immutable (can't be changed). */ private boolean immutable = false; /** * The constructor for all schemes. */ public SelectableScheme(TableDataSource table, int column) { this.table = table; this.column = column; } /** * Returns the Table. */ protected final TableDataSource getTable() { return table; } /** * Returns the global transaction system. */ protected final TransactionSystem getSystem() { return table.getSystem(); } /** * Returns the DebugLogger object to log debug messages to. */ protected final DebugLogger Debug() { return getSystem().Debug(); } /** * Returns the column this scheme is indexing in the table. */ protected final int getColumn() { return column; } /** * Obtains the given cell in the row from the table. */ protected final TObject getCellContents(int row) { return table.getCellContents(column, row); } /** * Sets this scheme to immutable. */ public final void setImmutable() { immutable = true; } /** * Returns true if this scheme is immutable. */ public final boolean isImmutable() { return immutable; } /** * Diagnostic information. */ public String toString() { // Name of the table String table_name; if (table instanceof DefaultDataTable) { table_name = ((DefaultDataTable) table).getTableName().toString(); } else { table_name = "VirtualTable"; } StringBuffer buf = new StringBuffer(); buf.append("[ SelectableScheme "); buf.append(super.toString()); buf.append(" for table: "); buf.append(table_name); buf.append("]"); return new String(buf); } /** * Writes the entire contents of the scheme to an OutputStream object. */ public abstract void writeTo(OutputStream out) throws IOException; /** * Reads the entire contents of the scheme from a InputStream object. If the * scheme is full of any information it throws an exception. */ public abstract void readFrom(InputStream in) throws IOException; /** * Returns an exact copy of this scheme including any optimization * information. The copied scheme is identical to the original but does not * share any parts. Modifying any part of the copied scheme will have no * effect on the original and vice versa. *

* The newly copied scheme can be given a new table source. If * 'immutable' is true, then the resultant scheme is an immutable version * of the parent. An immutable version may share information with the * copied version so can not be changed. *

* NOTE: Even if the scheme maintains no state you should still be careful * to ensure a fresh SelectableScheme object is returned here. */ public abstract SelectableScheme copy(TableDataSource table, boolean immutable); /** * Dispose and invalidate this scheme. */ public abstract void dispose(); /** * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= * Abstract methods for selection of rows, and maintenance of rows * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ /** * Inserts the given element into the set. This is called just after a * row has been initially added to a table. */ abstract void insert(int row); /** * Removes the given element from the set. This is called just before the * row is removed from the table. */ abstract void remove(int row); /** * Returns a BlockIntegerList that represents the given row_set sorted * in the order of this scheme. The values in 'row_set' must be references * to rows in the domain of the table this scheme represents. *

* The returned set must be stable, meaning if values are equal they keep * the same ordering. *

* Note that the default implementation of this method can often be optimized. * For example, InsertSearch uses a secondary RID list to sort items if the * given list is over a certain size. */ public BlockIntegerList internalOrderIndexSet(final IntegerVector row_set) { // The length of the set to order int row_set_length = row_set.size(); // Trivial cases where sorting is not required: // NOTE: We use immutable objects to save some memory. if (row_set_length == 0) { return EMPTY_LIST; } else if (row_set_length == 1) { return ONE_LIST; } // This will be 'row_set' sorted by its entry lookup. This must only // contain indices to row_set entries. BlockIntegerList new_set = new BlockIntegerList(); if (row_set_length <= 250000) { // If the subset is less than or equal to 250,000 elements, we generate // an array in memory that contains all values in the set and we sort // it. This requires use of memory from the heap but is faster than // the no heap use method. final TObject[] subset_list = new TObject[row_set_length]; for (int i = 0; i < row_set_length; ++i) { subset_list[i] = getCellContents(row_set.intAt(i)); } // The comparator we use to sort IndexComparator comparator = new IndexComparator() { public int compare(int index, Object val) { TObject cell = subset_list[index]; return cell.compareTo((TObject) val); } public int compare(int index1, int index2) { throw new Error("Shouldn't be called!"); } }; // Fill new_set with the set { 0, 1, 2, .... , row_set_length } for (int i = 0; i < row_set_length; ++i) { TObject cell = subset_list[i]; new_set.insertSort(cell, i, comparator); } } else { // This is the no additional heap use method to sorting the sub-set. // The comparator we use to sort IndexComparator comparator = new IndexComparator() { public int compare(int index, Object val) { TObject cell = getCellContents(row_set.intAt(index)); return cell.compareTo((TObject) val); } public int compare(int index1, int index2) { throw new Error("Shouldn't be called!"); } }; // Fill new_set with the set { 0, 1, 2, .... , row_set_length } for (int i = 0; i < row_set_length; ++i) { TObject cell = getCellContents(row_set.intAt(i)); new_set.insertSort(cell, i, comparator); } } return new_set; } /** * Asks the Scheme for a SelectableScheme abject that describes a sub-set * of the set handled by this Scheme. Since a Table stores a subset * of a given DataTable, we pass this as the argument. It returns a * new SelectableScheme that orders the rows in the given columns order. * The 'column' variable specifies the column index of this column in the * given table. */ public SelectableScheme getSubsetScheme(Table subset_table, int subset_column) { // Resolve table rows in this table scheme domain. IntegerVector row_set = new IntegerVector(subset_table.getRowCount()); RowEnumeration e = subset_table.rowEnumeration(); while (e.hasMoreRows()) { row_set.addInt(e.nextRowIndex()); } subset_table.setToRowTableDomain(subset_column, row_set, getTable()); // Generates an IntegerVector which contains indices into 'row_set' in // sorted order. BlockIntegerList new_set = internalOrderIndexSet(row_set); // Our 'new_set' should be the same size as 'row_set' if (new_set.size() != row_set.size()) { throw new RuntimeException("Internal sort error in finding sub-set."); } // Set up a new SelectableScheme with the sorted index set. // Move the sorted index set into the new scheme. InsertSearch is = new InsertSearch(subset_table, subset_column, new_set); // Don't let subset schemes create uid caches. is.RECORD_UID = false; return is; } /** * These are the select operations that are the main purpose of the scheme. * They retrieve the given information from the set. Different schemes will * have varying performance on different types of data sets. * The select operations must *always* return a resultant row set that * is sorted from lowest to highest. */ public IntegerVector selectAll() { return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, SelectableRange.FIRST_IN_SET, SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET)); } public IntegerVector selectFirst() { // NOTE: This will find NULL at start which is probably wrong. The // first value should be the first non null value. return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, SelectableRange.FIRST_IN_SET, SelectableRange.LAST_VALUE, SelectableRange.FIRST_IN_SET)); } public IntegerVector selectNotFirst() { // NOTE: This will find NULL at start which is probably wrong. The // first value should be the first non null value. return selectRange(new SelectableRange( SelectableRange.AFTER_LAST_VALUE, SelectableRange.FIRST_IN_SET, SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET)); } public IntegerVector selectLast() { return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, SelectableRange.LAST_IN_SET, SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET)); } public IntegerVector selectNotLast() { return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, SelectableRange.FIRST_IN_SET, SelectableRange.BEFORE_FIRST_VALUE, SelectableRange.LAST_IN_SET)); } /** * Selects all values in the column that are not null. */ public IntegerVector selectAllNonNull() { return selectRange(new SelectableRange( SelectableRange.AFTER_LAST_VALUE, TObject.nullVal(), SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET)); } public IntegerVector selectEqual(TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, ob, SelectableRange.LAST_VALUE, ob)); } public IntegerVector selectNotEqual(TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange[] { new SelectableRange( SelectableRange.AFTER_LAST_VALUE, TObject.nullVal(), SelectableRange.BEFORE_FIRST_VALUE, ob) , new SelectableRange( SelectableRange.AFTER_LAST_VALUE, ob, SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET) }); } public IntegerVector selectGreater(TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange( SelectableRange.AFTER_LAST_VALUE, ob, SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET)); } public IntegerVector selectLess(TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange( SelectableRange.AFTER_LAST_VALUE, TObject.nullVal(), SelectableRange.BEFORE_FIRST_VALUE, ob)); } public IntegerVector selectGreaterOrEqual(TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, ob, SelectableRange.LAST_VALUE, SelectableRange.LAST_IN_SET)); } public IntegerVector selectLessOrEqual(TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange( SelectableRange.AFTER_LAST_VALUE, TObject.nullVal(), SelectableRange.LAST_VALUE, ob)); } // Inclusive of rows that are >= ob1 and < ob2 // NOTE: This is not compatible with SQL BETWEEN predicate which is all // rows that are >= ob1 and <= ob2 public IntegerVector selectBetween(TObject ob1, TObject ob2) { if (ob1.isNull() || ob2.isNull()) { return new IntegerVector(0); } return selectRange(new SelectableRange( SelectableRange.FIRST_VALUE, ob1, SelectableRange.BEFORE_FIRST_VALUE, ob2)); } /** * Selects the given range of values from this index. The SelectableRange * must contain a 'start' value that compares <= to the 'end' value. *

* This must guarentee that the returned set is sorted from lowest to * highest value. */ abstract IntegerVector selectRange(SelectableRange range); /** * Selects a set of ranges from this index. The ranges must not overlap and * each range must contain a 'start' value that compares <= to the 'end' * value. Every range in the array must represent a range that's lower than * the preceeding range (if it exists). *

* If the above rules are enforced (as they must be) then this method will * return a set that is sorted from lowest to highest value. *

* This must guarentee that the returned set is sorted from lowest to * highest value. */ abstract IntegerVector selectRange(SelectableRange[] ranges); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SequenceManager.java000066400000000000000000000705231330501023400256320ustar00rootroot00000000000000/** * com.mckoi.database.SequenceManager 21 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.StringObject; import com.mckoi.util.IntegerVector; import com.mckoi.util.BigNumber; import java.util.HashMap; import java.util.Iterator; /** * An object that manages the creation and removal of sequence keys, and that * offers access to the sequence values (possibly cached). When the sequence * table is changed, this opens an optimized transaction on the database and * manipulates the SequenceInfo table. * * @author Tobias Downer */ final class SequenceManager { /** * The TableDataConglomerate object. */ private TableDataConglomerate conglomerate; /** * A hashmap that maps from the TableName of the sequence key * to the object that manages this sequence (SequenceGenerator). * (TableName) -> (SequenceGenerator) */ private HashMap sequence_key_map; /** * A static TObject that represents numeric 1. */ private static final TObject ONE_VAL = TObject.intVal(1); /** * A static TObject that represents boolean true. */ private static final TObject TRUE_VAL = TObject.booleanVal(true); /** * Constructs the object. */ SequenceManager(TableDataConglomerate conglomerate) { this.conglomerate = conglomerate; sequence_key_map = new HashMap(); } /** * Returns a new Transaction object for manipulating and querying the system * state. */ private Transaction getTransaction() { // Should this transaction be optimized for the access patterns we generate // here? return conglomerate.createTransaction(); } /** * Returns a SequenceGenerator object representing the sequence generator * with the given name. */ private SequenceGenerator getGenerator(TableName name) { // Is the generator already in the cache? SequenceGenerator generator = (SequenceGenerator) sequence_key_map.get(name); if (generator == null) { // This sequence generator is not in the cache so we need to query the // sequence table for this. Transaction sequence_access_transaction = getTransaction(); try { MutableTableDataSource seqi = sequence_access_transaction.getTable(TableDataConglomerate.SYS_SEQUENCE_INFO); SimpleTableQuery query = new SimpleTableQuery(seqi); StringObject schema_val = StringObject.fromString(name.getSchema()); StringObject name_val = StringObject.fromString(name.getName()); IntegerVector ivec = query.selectIndexesEqual(2, name_val, 1, schema_val); if (ivec.size() == 0) { throw new StatementException("Sequence generator '" + name + "' not found."); } else if (ivec.size() > 1) { throw new RuntimeException( "Assert failed: multiple sequence keys with same name."); } int row_i = ivec.intAt(0); TObject sid = seqi.getCellContents(0, row_i); TObject sschema = seqi.getCellContents(1, row_i); TObject sname = seqi.getCellContents(2, row_i); TObject stype = seqi.getCellContents(3, row_i); long id_val = sid.toBigNumber().longValue(); query.dispose(); // Is this a custom sequence generator? // (stype == 1) == true if (stype.operatorEquals(ONE_VAL).valuesEqual(TRUE_VAL)) { // Native generator. generator = new SequenceGenerator(id_val, name); } else { // Query the sequence table. MutableTableDataSource seq = sequence_access_transaction.getTable(TableDataConglomerate.SYS_SEQUENCE); query = new SimpleTableQuery(seq); ivec = query.selectIndexesEqual(0, sid); if (ivec.size() == 0) { throw new RuntimeException( "Sequence table does not contain sequence information."); } if (ivec.size() > 1) { throw new RuntimeException( "Sequence table contains multiple generators for id."); } row_i = ivec.intAt(0); BigNumber last_value = seq.getCellContents(1, row_i).toBigNumber(); BigNumber increment = seq.getCellContents(2, row_i).toBigNumber(); BigNumber minvalue = seq.getCellContents(3, row_i).toBigNumber(); BigNumber maxvalue = seq.getCellContents(4, row_i).toBigNumber(); BigNumber start = seq.getCellContents(5, row_i).toBigNumber(); BigNumber cache = seq.getCellContents(6, row_i).toBigNumber(); Boolean cycle = seq.getCellContents(7, row_i).toBoolean(); query.dispose(); generator = new SequenceGenerator(id_val, name, last_value.longValue(), increment.longValue(), minvalue.longValue(), maxvalue.longValue(), start.longValue(), cache.longValue(), cycle.booleanValue()); // Put the generator in the cache sequence_key_map.put(name, generator); } } finally { // Make sure we always close and commit the transaction. try { sequence_access_transaction.closeAndCommit(); } catch (TransactionException e) { conglomerate.Debug().writeException(e); throw new RuntimeException("Transaction Error: " + e.getMessage()); } } } // Return the generator return generator; } /** * Updates the state of the sequence key in the sequence tables in the * database. The update occurs on an independant transaction. */ private void updateGeneratorState(SequenceGenerator generator) { // We need to update the sequence key state. Transaction sequence_access_transaction = getTransaction(); try { // The sequence table MutableTableDataSource seq = sequence_access_transaction.getTable( TableDataConglomerate.SYS_SEQUENCE); // Find the row with the id for this generator. SimpleTableQuery query = new SimpleTableQuery(seq); IntegerVector ivec = query.selectIndexesEqual(0, BigNumber.fromLong(generator.id)); // Checks if (ivec.size() == 0) { throw new StatementException("Sequence '" + generator.name + "' not found."); } else if (ivec.size() > 1) { throw new RuntimeException( "Assert failed: multiple id for sequence."); } // Get the row position int row_i = ivec.intAt(0); // Create the RowData RowData row_data = new RowData(seq); // Set the content of the row data row_data.setColumnDataFromTObject(0, TObject.longVal(generator.id)); row_data.setColumnDataFromTObject(1, TObject.longVal(generator.last_value)); row_data.setColumnDataFromTObject(2, TObject.longVal(generator.increment_by)); row_data.setColumnDataFromTObject(3, TObject.longVal(generator.min_value)); row_data.setColumnDataFromTObject(4, TObject.longVal(generator.max_value)); row_data.setColumnDataFromTObject(5, TObject.longVal(generator.start)); row_data.setColumnDataFromTObject(6, TObject.longVal(generator.cache)); row_data.setColumnDataFromTObject(7, TObject.booleanVal(generator.cycle)); // Update the row seq.updateRow(row_i, row_data); // Dispose the resources query.dispose(); } finally { // Close and commit the transaction try { sequence_access_transaction.closeAndCommit(); } catch (TransactionException e) { conglomerate.Debug().writeException(e); throw new RuntimeException("Transaction Error: " + e.getMessage()); } } } /** * Flushes a sequence generator from the cache. This should be used when a * sequence generator is altered or dropped from the database. */ synchronized void flushGenerator(TableName name) { sequence_key_map.remove(name); } /** * Static convenience - adds an entry to the Sequence table for a native * table in the database. This acts as a gateway between the native sequence * table function and the custom sequence generator. Note that some of the * system tables and all of the VIEW tables will not have native sequence * generators and thus not have an entry in the sequence table. */ static void addNativeTableGenerator(Transaction transaction, TableName table_name) { // If the SYS_SEQUENCE or SYS_SEQUENCE_INFO tables don't exist then // We can't add or remove native tables if (table_name.equals(TableDataConglomerate.SYS_SEQUENCE) || table_name.equals(TableDataConglomerate.SYS_SEQUENCE_INFO) || !transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE) || !transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE_INFO)) { return; } MutableTableDataSource table = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE_INFO); long unique_id = transaction.nextUniqueID(TableDataConglomerate.SYS_SEQUENCE_INFO); RowData row_data = new RowData(table); row_data.setColumnDataFromObject(0, new Long(unique_id)); row_data.setColumnDataFromObject(1, table_name.getSchema()); row_data.setColumnDataFromObject(2, table_name.getName()); row_data.setColumnDataFromObject(3, new Long(1)); table.addRow(row_data); } /** * Static convenience - removes an entry in the Sequence table for a native * table in the database. */ static void removeNativeTableGenerator(Transaction transaction, TableName table_name) { // If the SYS_SEQUENCE or SYS_SEQUENCE_INFO tables don't exist then // We can't add or remove native tables if (table_name.equals(TableDataConglomerate.SYS_SEQUENCE) || table_name.equals(TableDataConglomerate.SYS_SEQUENCE_INFO) || !transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE) || !transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE_INFO)) { return; } // The SEQUENCE and SEQUENCE_INFO table MutableTableDataSource seq = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE); MutableTableDataSource seqi = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE_INFO); SimpleTableQuery query = new SimpleTableQuery(seqi); IntegerVector ivec = query.selectIndexesEqual(2, TObject.stringVal(table_name.getName()), 1, TObject.stringVal(table_name.getSchema())); // Remove the corresponding entry in the SEQUENCE table for (int i = 0; i < ivec.size(); ++i) { int row_i = ivec.intAt(i); TObject sid = seqi.getCellContents(0, row_i); SimpleTableQuery query2 = new SimpleTableQuery(seq); IntegerVector ivec2 = query2.selectIndexesEqual(0, sid); for (int n = 0; n < ivec2.size(); ++n) { // Remove entry from the sequence table. seq.removeRow(ivec2.intAt(n)); } // Remove entry from the sequence info table seqi.removeRow(row_i); query2.dispose(); } query.dispose(); } /** * Creates a new sequence generator with the given name and details. Note * that this method does not check if the generator name clashes with an * existing database object. */ static void createSequenceGenerator(Transaction transaction, TableName table_name, long start_value, long increment_by, long min_value, long max_value, long cache, boolean cycle) { // If the SYS_SEQUENCE or SYS_SEQUENCE_INFO tables don't exist then // we can't create the sequence generator if (!transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE) || !transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE_INFO)) { throw new RuntimeException("System sequence tables do not exist."); } // The SEQUENCE and SEQUENCE_INFO table MutableTableDataSource seq = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE); MutableTableDataSource seqi = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE_INFO); // All rows in 'sequence_info' that match this table name. SimpleTableQuery query = new SimpleTableQuery(seqi); IntegerVector ivec = query.selectIndexesEqual(2, TObject.stringVal(table_name.getName()), 1, TObject.stringVal(table_name.getSchema())); if (ivec.size() > 0) { throw new RuntimeException( "Sequence generator with name '" + table_name + "' already exists."); } // Dispose the query object query.dispose(); // Generate a unique id for the sequence info table long unique_id = transaction.nextUniqueID(TableDataConglomerate.SYS_SEQUENCE_INFO); // Insert the new row RowData row_data = new RowData(seqi); row_data.setColumnDataFromObject(0, new Long(unique_id)); row_data.setColumnDataFromObject(1, table_name.getSchema()); row_data.setColumnDataFromObject(2, table_name.getName()); row_data.setColumnDataFromObject(3, new Long(2)); seqi.addRow(row_data); // Insert into the SEQUENCE table. row_data = new RowData(seq); row_data.setColumnDataFromObject(0, new Long(unique_id)); row_data.setColumnDataFromObject(1, new Long(start_value)); row_data.setColumnDataFromObject(2, new Long(increment_by)); row_data.setColumnDataFromObject(3, new Long(min_value)); row_data.setColumnDataFromObject(4, new Long(max_value)); row_data.setColumnDataFromObject(5, new Long(start_value)); row_data.setColumnDataFromObject(6, new Long(cache)); row_data.setColumnDataFromObject(7, new Boolean(cycle)); seq.addRow(row_data); } static void dropSequenceGenerator(Transaction transaction, TableName table_name) { // If the SYS_SEQUENCE or SYS_SEQUENCE_INFO tables don't exist then // we can't create the sequence generator if (!transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE) || !transaction.tableExists(TableDataConglomerate.SYS_SEQUENCE_INFO)) { throw new RuntimeException("System sequence tables do not exist."); } // Remove the table generator (delete SEQUENCE_INFO and SEQUENCE entry) removeNativeTableGenerator(transaction, table_name); } /** * Returns the next value from the sequence generator. This will atomically * increment the sequence counter. */ synchronized long nextValue(SimpleTransaction transaction, TableName name) { SequenceGenerator generator = getGenerator(name); if (generator.type == 1) { // Native generator return transaction.nextUniqueID( new TableName(name.getSchema(), name.getName())); } else { // Custom sequence generator long current_val = generator.current_val; // Increment the current value. generator.incrementCurrentValue(); // Have we reached the current cached point? if (current_val == generator.last_value) { // Increment the generator for (int i = 0; i < generator.cache; ++i) { generator.incrementLastValue(); } // Update the state updateGeneratorState(generator); } return generator.current_val; } } /** * Returns the current value from the sequence generator. */ synchronized long curValue(SimpleTransaction transaction, TableName name) { SequenceGenerator generator = getGenerator(name); if (generator.type == 1) { // Native generator return transaction.nextUniqueID( new TableName(name.getSchema(), name.getName())); } else { // Custom sequence generator return generator.current_val; } } /** * Sets the current value of the sequence generator. */ synchronized void setValue(SimpleTransaction transaction, TableName name, long value) { SequenceGenerator generator = getGenerator(name); if (generator.type == 1) { // Native generator transaction.setUniqueID( new TableName(name.getSchema(), name.getName()), value); } else { // Custom sequence generator generator.current_val = value; generator.last_value = value; // Update the state updateGeneratorState(generator); } } /** * Returns an InternalTableInfo object used to model the list of sequence * generators that are accessible within the given Transaction object. This * is used to model all sequence generators that have been defined as tables. */ static InternalTableInfo createInternalTableInfo(Transaction transaction) { return new SequenceInternalTableInfo(transaction); } // ---------- Inner classes ---------- /** * An object that encapsulates information about the sequence key. */ private static class SequenceGenerator { /** * The current value of this sequence generator. */ long current_val; /** * The id value of this sequence key. */ long id; /** * The name of this sequence key. */ TableName name; /** * The type of this sequence key. */ int type; // The following values are only set if 'type' is not a native table // sequence. /** * The last value of this sequence key. This value represents the value * of the sequence key in the persistence medium. */ long last_value; /** * The number we increment the sequence key by. */ long increment_by; /** * The minimum value of the sequence key. */ long min_value; /** * The maximum value of the sequence key. */ long max_value; /** * The start value of the sequence generator. */ long start; /** * How many values we cache. */ long cache; /** * True if the sequence key is cycled. */ boolean cycle; SequenceGenerator(long id, TableName name) { type = 1; this.id = id; this.name = name; } SequenceGenerator(long id, TableName name, long last_value, long increment_by, long min_value, long max_value, long start, long cache, boolean cycle) { type = 2; this.id = id; this.name = name; this.last_value = last_value; this.current_val = last_value; this.increment_by = increment_by; this.min_value = min_value; this.max_value = max_value; this.start = start; this.cache = cache; this.cycle = cycle; } private long incrementValue(long val) { val += increment_by; if (val > max_value) { if (cycle) { val = min_value; } else { throw new StatementException("Sequence out of bounds."); } } if (val < min_value) { if (cycle) { val = max_value; } else { throw new StatementException("Sequence out of bounds."); } } return val; } void incrementCurrentValue() { current_val = incrementValue(current_val); } void incrementLastValue() { last_value = incrementValue(last_value); } } /** * An object that models the list of sequences as table objects in a * transaction. */ private static class SequenceInternalTableInfo implements InternalTableInfo { Transaction transaction; SequenceInternalTableInfo(Transaction transaction) { this.transaction = transaction; } private static DataTableDef createDataTableDef(String schema, String name) { // Create the DataTableDef that describes this entry DataTableDef def = new DataTableDef(); def.setTableName(new TableName(schema, name)); // Add column definitions def.addColumn(DataTableColumnDef.createNumericColumn("last_value")); def.addColumn(DataTableColumnDef.createNumericColumn("current_value")); def.addColumn(DataTableColumnDef.createNumericColumn("top_value")); def.addColumn(DataTableColumnDef.createNumericColumn("increment_by")); def.addColumn(DataTableColumnDef.createNumericColumn("min_value")); def.addColumn(DataTableColumnDef.createNumericColumn("max_value")); def.addColumn(DataTableColumnDef.createNumericColumn("start")); def.addColumn(DataTableColumnDef.createNumericColumn("cache")); def.addColumn(DataTableColumnDef.createBooleanColumn("cycle")); // Set to immutable def.setImmutable(); // Return the data table def return def; } public int getTableCount() { final TableName SEQ = TableDataConglomerate.SYS_SEQUENCE; if (transaction.tableExists(SEQ)) { return transaction.getTable(SEQ).getRowCount(); } else { return 0; } } public int findTableName(TableName name) { final TableName SEQ_INFO = TableDataConglomerate.SYS_SEQUENCE_INFO; if (transaction.realTableExists(SEQ_INFO)) { // Search the table. MutableTableDataSource table = transaction.getTable(SEQ_INFO); RowEnumeration row_e = table.rowEnumeration(); int p = 0; while (row_e.hasMoreRows()) { int row_index = row_e.nextRowIndex(); TObject seq_type = table.getCellContents(3, row_index); if (!seq_type.operatorEquals(ONE_VAL).valuesEqual(TRUE_VAL)) { TObject ob_name = table.getCellContents(2, row_index); if (ob_name.getObject().toString().equals(name.getName())) { TObject ob_schema = table.getCellContents(1, row_index); if (ob_schema.getObject().toString().equals(name.getSchema())) { // Match so return this return p; } } ++p; } } } return -1; } public TableName getTableName(int i) { final TableName SEQ_INFO = TableDataConglomerate.SYS_SEQUENCE_INFO; if (transaction.realTableExists(SEQ_INFO)) { // Search the table. MutableTableDataSource table = transaction.getTable(SEQ_INFO); RowEnumeration row_e = table.rowEnumeration(); int p = 0; while (row_e.hasMoreRows()) { int row_index = row_e.nextRowIndex(); TObject seq_type = table.getCellContents(3, row_index); if (!seq_type.operatorEquals(ONE_VAL).valuesEqual(TRUE_VAL)) { if (i == p) { TObject ob_schema = table.getCellContents(1, row_index); TObject ob_name = table.getCellContents(2, row_index); return new TableName(ob_schema.getObject().toString(), ob_name.getObject().toString()); } ++p; } } } throw new RuntimeException("Out of bounds."); } public boolean containsTableName(TableName name) { final TableName SEQ_INFO = TableDataConglomerate.SYS_SEQUENCE_INFO; // This set can not contain the table that is backing it, so we always // return false for that. This check stops an annoying recursive // situation for table name resolution. if (name.equals(SEQ_INFO)) { return false; } else { return findTableName(name) != -1; } } public String getTableType(int i) { return "SEQUENCE"; } public DataTableDef getDataTableDef(int i) { TableName table_name = getTableName(i); return createDataTableDef(table_name.getSchema(), table_name.getName()); } public MutableTableDataSource createInternalTable(int index) { MutableTableDataSource table = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE_INFO); RowEnumeration row_e = table.rowEnumeration(); int p = 0; int i; int row_i = -1; while (row_e.hasMoreRows() && row_i == -1) { i = row_e.nextRowIndex(); // Is this is a type 1 sequence we ignore (native table sequence). TObject seq_type = table.getCellContents(3, i); if (!seq_type.operatorEquals(ONE_VAL).valuesEqual(TRUE_VAL)) { if (p == index) { row_i = i; } ++p; } } if (row_i != -1) { TObject seq_id = table.getCellContents(0, row_i); String schema = table.getCellContents(1, row_i).getObject().toString(); String name = table.getCellContents(2, row_i).getObject().toString(); TableName table_name = new TableName(schema, name); // Find this id in the 'sequence' table MutableTableDataSource seq_table = transaction.getTable(TableDataConglomerate.SYS_SEQUENCE); SelectableScheme scheme = seq_table.getColumnScheme(0); IntegerVector ivec = scheme.selectEqual(seq_id); if (ivec.size() > 0) { int seq_row_i = ivec.intAt(0); // Generate the DataTableDef final DataTableDef table_def = createDataTableDef(schema, name); // Last value for this sequence generated by the transaction TObject lv; try { lv = TObject.longVal(transaction.lastSequenceValue(table_name)); } catch (StatementException e) { lv = TObject.longVal(-1); } final TObject last_value = lv; // The current value of the sequence generator SequenceManager manager = transaction.getConglomerate().getSequenceManager(); final TObject current_value = TObject.longVal(manager.curValue(transaction, table_name)); // Read the rest of the values from the SEQUENCE table. final TObject top_value = seq_table.getCellContents(1, seq_row_i); final TObject increment_by = seq_table.getCellContents(2, seq_row_i); final TObject min_value = seq_table.getCellContents(3, seq_row_i); final TObject max_value = seq_table.getCellContents(4, seq_row_i); final TObject start = seq_table.getCellContents(5, seq_row_i); final TObject cache = seq_table.getCellContents(6, seq_row_i); final TObject cycle = seq_table.getCellContents(7, seq_row_i); // Implementation of MutableTableDataSource that describes this // sequence generator. return new GTDataSource(transaction.getSystem()) { public DataTableDef getDataTableDef() { return table_def; } public int getRowCount() { return 1; } public TObject getCellContents(int col, int row) { switch (col) { case 0: return last_value; case 1: return current_value; case 2: return top_value; case 3: return increment_by; case 4: return min_value; case 5: return max_value; case 6: return start; case 7: return cache; case 8: return cycle; default: throw new RuntimeException("Column out of bounds."); } } }; } else { throw new RuntimeException("No SEQUENCE table entry for generator."); } } else { throw new RuntimeException("Index out of bounds."); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SimpleRowEnumeration.java000066400000000000000000000031501330501023400267070ustar00rootroot00000000000000/** * com.mckoi.database.SimpleRowEnumeration 19 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A RowEnumeration implementation that represents a sequence of rows that * can be referenced in incremental order between 0 and row_count (exclusive). * A Table that returns a SimpleRowEnumeration is guarenteed to provide valid * TObject values via the 'getCellContents' method between rows 0 and * getRowCount(). * * @author Tobias Downer */ public final class SimpleRowEnumeration implements RowEnumeration { /** * The current index. */ private int index = 0; /** * The number of rows in the enumeration. */ final int row_count_store; /** * Constructs the RowEnumeration. */ public SimpleRowEnumeration(int row_count) { row_count_store = row_count; } public final boolean hasMoreRows() { return (index < row_count_store); } public final int nextRowIndex() { ++index; return index - 1; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SimpleTableQuery.java000066400000000000000000000216441330501023400260160ustar00rootroot00000000000000/** * com.mckoi.database.SimpleTableQuery 16 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * A simple convenience interface for querying a MutableTableDataSource * instance. This is used as a very lightweight interface for changing a * table. It is most useful for internal low level users of a database * table which doesn't need the overhead of the Mckoi table hierarchy * mechanism. * * @author Tobias Downer */ public final class SimpleTableQuery { /** * The DataTableDef for this table. */ private DataTableDef table_def; /** * The TableDataSource we are wrapping. */ private TableDataSource table; /** * Constructs the SimpleTableQuery with the given MutableTableDataSource * object. */ public SimpleTableQuery(TableDataSource in_table) { // in_table.addRootLock(); this.table = in_table; this.table_def = table.getDataTableDef(); } /** * Returns a RowEnumeration that is used to iterate through the entire list * of valid rows in the table. */ public RowEnumeration rowEnumeration() { return table.rowEnumeration(); } /** * Returns the total number of rows in this table. */ public int getRowCount() { return table.getRowCount(); } /** * Gets the TObject at the given cell in the table. * Note that the offset between one valid row and the next may not necessily * be 1. It is possible for there to be gaps in the data. For an iterator * that returns successive row indexes, use the 'rowEnumeration' method. */ public TObject get(int column, int row) { return table.getCellContents(column, row); } /** * Finds the index of all the rows in the table where the given column is * equal to the given object. */ public IntegerVector selectIndexesEqual(int column, TObject cell) { return table.getColumnScheme(column).selectEqual(cell); } /** * Finds the index of all the rows in the table where the given column is * equal to the given object. *

* We assume value is not null, and it is either a BigNumber to represent * a number, a String, a java.util.Date or a ByteLongObject. */ public IntegerVector selectIndexesEqual(int column, Object value) { TType ttype = table_def.columnAt(column).getTType(); TObject cell = new TObject(ttype, value); return selectIndexesEqual(column, cell); } /** * Finds the index of all the rows in the table where the given column is * equal to the given object for both of the clauses. This implies an * AND for the two searches. */ public IntegerVector selectIndexesEqual(int col1, TObject cell1, int col2, TObject cell2) { // All the indexes that equal the first clause IntegerVector ivec = table.getColumnScheme(col1).selectEqual(cell1); // From this, remove all the indexes that don't equals the second clause. int index = ivec.size() - 1; while (index >= 0) { // If the value in column 2 at this index is not equal to value then // remove it from the list and move to the next. if (get(col2, ivec.intAt(index)).compareTo(cell2) != 0) { ivec.removeIntAt(index); } --index; } return ivec; } /** * Finds the index of all the rows in the table where the given column is * equal to the given object for both of the clauses. This implies an * AND for the two searches. *

* We assume value is not null, and it is either a BigNumber to represent * a number, a String, a java.util.Date or a ByteLongObject. */ public IntegerVector selectIndexesEqual(int col1, Object val1, int col2, Object val2) { TType t1 = table_def.columnAt(col1).getTType(); TType t2 = table_def.columnAt(col2).getTType(); TObject cell1 = new TObject(t1, val1); TObject cell2 = new TObject(t2, val2); return selectIndexesEqual(col1, cell1, col2, cell2); } /** * Returns true if there is a single row in the table where the given column * is equal to the given value, otherwise returns false. If there are 2 or * more rows an assertion exception is thrown. */ public boolean existsSingle(int col, Object val) { IntegerVector ivec = selectIndexesEqual(col, val); if (ivec.size() == 0) { return false; } else if (ivec.size() == 1) { return true; } else { throw new Error("Assertion failed: existsSingle found multiple values."); } } /** * Assuming the table stores a key/value mapping, this returns the contents * of value_column for any rows where key_column is equal to the key_value. * An assertion exception is thrown if there is more than 2 rows that match * the key. If no rows match the key then null is returned. */ public Object getVar(int value_column, int key_column, Object key_value) { // All indexes in the table where the key value is found. IntegerVector ivec = selectIndexesEqual(key_column, key_value); if (ivec.size() > 1) { throw new Error("Assertion failed: getVar found multiple key values."); } else if (ivec.size() == 0) { // Key found so return the value return get(value_column, ivec.intAt(0)); } else { // Key not found so return null return null; } } // ---------- Table mutable methods --------- /** * Adds a new key/value mapping in this table. If the key already exists * the old key/value row is deleted first. This method accepts two * arguments, the column that contains the key value, and an Object[] array * that is the list of cells to insert into the table. The Object[] array * must be the size of the number of columns in this tbale. *

* NOTE: Change will come into effect globally at the next commit. *

* NOTE: This method must be assured of exlusive access to the table within * the transaction. *

* NOTE: This only works if the given table implements MutableTableDataSource. */ public void setVar(int key_column, Object[] vals) { // Cast to a MutableTableDataSource MutableTableDataSource mtable = (MutableTableDataSource) table; // All indexes in the table where the key value is found. IntegerVector ivec = selectIndexesEqual(key_column, vals[key_column]); if (ivec.size() > 1) { throw new Error("Assertion failed: setVar found multiple key values."); } else if (ivec.size() == 1) { // Remove the current key mtable.removeRow(ivec.intAt(0)); } // Insert the new key RowData row_data = new RowData(table); for (int i = 0; i < table_def.columnCount(); ++i) { row_data.setColumnDataFromObject(i, vals[i]); } mtable.addRow(row_data); } /** * Deletes a single entry from the table where the given column equals the * given value. If there are multiple values found an assertion exception * is thrown. If a single value was found and deleted 'true' is returned * otherwise false. *

* NOTE: This only works if the given table implements MutableTableDataSource. */ public boolean deleteSingle(int col, Object val) { // Cast to a MutableTableDataSource MutableTableDataSource mtable = (MutableTableDataSource) table; IntegerVector ivec = selectIndexesEqual(col, val); if (ivec.size() == 0) { return false; } else if (ivec.size() == 1) { mtable.removeRow(ivec.intAt(0)); return true; } else { throw new Error("Assertion failed: deleteSingle found multiple values."); } } /** * Deletes all the given indexes in this table. *

* NOTE: This only works if the given table implements MutableTableDataSource. */ public void deleteRows(IntegerVector list) { // Cast to a MutableTableDataSource MutableTableDataSource mtable = (MutableTableDataSource) table; for (int i = 0; i < list.size(); ++i) { mtable.removeRow(list.intAt(i)); } } /** * Disposes this object and frees any resources associated with it. This * should be called when the query object is no longer being used. */ public void dispose() { if (table != null) { // table.removeRootLock(); table = null; } } /** * To be save we call dispose from the finalize method. */ public void finalize() { dispose(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SimpleTransaction.java000066400000000000000000000573551330501023400262360ustar00rootroot00000000000000/** * com.mckoi.database.SimpleTransaction 09 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.DebugLogger; import java.util.ArrayList; import java.util.HashMap; /** * An simple implementation of Transaction that provides various facilities for * implementing a Transaction object on a number of MasterTableDataSource * tables. The Transaction object is designed such that concurrent * modification can happen to the database via other transactions without this * view of the database being changed. *

* This object does not implement any transaction control mechanisms such as * 'commit' or 'rollback'. This object is most useful for setting up a * short-term minimal transaction for modifying or querying some data in the * database given on some view. * * @author Tobias Downer */ public abstract class SimpleTransaction { /** * The TransactionSystem context. */ private TransactionSystem system; /** * The list of tables that represent this transaction's view of the database. * (MasterTableDataSource). */ private ArrayList visible_tables; /** * An IndexSet for each visible table from the above list. These objects * are used to represent index information for all tables. * (IndexSet) */ private ArrayList table_indices; /** * A queue of MasterTableDataSource and IndexSet objects that are pending to * be cleaned up when this transaction is disposed. */ private ArrayList cleanup_queue; /** * A cache of tables that have been accessed via this transaction. This is * a map of table_name -> MutableTableDataSource. */ private HashMap table_cache; /** * A local cache for sequence values. */ private HashMap sequence_value_cache; /** * The SequenceManager for this abstract transaction. */ private SequenceManager sequence_manager; /** * If true, this is a read-only transaction and does not permit any type of * modification to this vew of the database. */ private boolean read_only; /** * Constructs the AbstractTransaction. SequenceManager may be null in which * case sequence generator operations are not permitted. */ SimpleTransaction(TransactionSystem system, SequenceManager sequence_manager) { this.system = system; this.visible_tables = new ArrayList(); this.table_indices = new ArrayList(); this.table_cache = new HashMap(); this.sequence_value_cache = new HashMap(); this.sequence_manager = sequence_manager; this.read_only = false; } /** * Sets this transaction as read only. A read only transaction does not * allow for the view to be modified in any way. */ public void setReadOnly() { read_only = true; } /** * Returns true if the transaction is read-only, otherwise returns false. */ public boolean isReadOnly() { return read_only; } /** * Returns the TransactionSystem that this Transaction is part of. */ public final TransactionSystem getSystem() { return system; } /** * Returns a list of all visible tables. */ protected final ArrayList getVisibleTables() { return visible_tables; } /** * Returns a DebugLogger object that we use to log debug messages to. */ public final DebugLogger Debug() { return getSystem().Debug(); } /** * Returns the number of visible tables being managed by this transaction. */ protected int getVisibleTableCount() { return visible_tables.size(); } /** * Returns a MasterTableDataSource object representing table 'n' in the set * of tables visible in this transaction. */ protected MasterTableDataSource getVisibleTable(int n) { return (MasterTableDataSource) visible_tables.get(n); } /** * Searches through the list of tables visible within this transaction and * returns the MasterTableDataSource object with the given name. Returns * null if no visible table with the given name could be found. */ protected MasterTableDataSource findVisibleTable(TableName table_name, boolean ignore_case) { int size = visible_tables.size(); for (int i = 0; i < size; ++i) { MasterTableDataSource master = (MasterTableDataSource) visible_tables.get(i); DataTableDef table_def = master.getDataTableDef(); if (ignore_case) { if (table_def.getTableName().equalsIgnoreCase(table_name)) { return master; } } else { // Not ignore case if (table_def.getTableName().equals(table_name)) { return master; } } } return null; } /** * Returns the IndexSet for the given MasterTableDataSource object that * is visible in this transaction. */ final IndexSet getIndexSetForTable(MasterTableDataSource table) { int sz = table_indices.size(); for (int i = 0; i < sz; ++i) { if (visible_tables.get(i) == table) { return (IndexSet) table_indices.get(i); } } throw new RuntimeException( "MasterTableDataSource not found in this transaction."); } /** * Sets the IndexSet for the given MasterTableDataSource object in this * transaction. */ protected final void setIndexSetForTable(MasterTableDataSource table, IndexSet index_set) { int sz = table_indices.size(); for (int i = 0; i < sz; ++i) { if (visible_tables.get(i) == table) { table_indices.set(i, index_set); return; } } throw new RuntimeException( "MasterTableDataSource not found in this transaction."); } /** * Returns true if the given table name is a dynamically generated table and * is not a table that is found in the table list defined in this transaction * object. *

* It is intended this is implemented by derived classes to handle dynamically * generated tables (tables based on some function or from an external data * source) */ protected boolean isDynamicTable(TableName table_name) { // By default, dynamic tables are not implemented. return false; } /** * If this transaction implementation defines dynamic tables (tables whose * content is determined by some function), this should return the * table here as a MutableTableDataSource object. If the table is not * defined an exception is generated. *

* It is intended this is implemented by derived classes to handle dynamically * generated tables (tables based on some function or from an external data * source) */ protected MutableTableDataSource getDynamicTable(TableName table_name) { // By default, dynamic tables are not implemented. throw new StatementException("Table '" + table_name + "' not found."); } /** * Returns the DataTableDef for a dynamic table defined in this transaction. *

* It is intended this is implemented by derived classes to handle dynamically * generated tables (tables based on some function or from an external data * source) */ protected DataTableDef getDynamicDataTableDef(TableName table_name) { // By default, dynamic tables are not implemented. throw new StatementException("Table '" + table_name + "' not found."); } /** * Returns a string type describing the type of the dynamic table. *

* It is intended this is implemented by derived classes to handle dynamically * generated tables (tables based on some function or from an external data * source) */ protected String getDynamicTableType(TableName table_name) { // By default, dynamic tables are not implemented. throw new StatementException("Table '" + table_name + "' not found."); } /** * Returns a list of all dynamic table names. We can assume that the object * returned here is static so the content of this list should not be changed. *

* It is intended this is implemented by derived classes to handle dynamically * generated tables (tables based on some function or from an external data * source) */ protected TableName[] getDynamicTableList() { return new TableName[0]; } // ----- /** * Returns a new MutableTableDataSource for the view of the * MasterTableDataSource at the start of this transaction. Note that this is * called only once per table accessed in this transaction. */ abstract MutableTableDataSource createMutableTableDataSourceAtCommit( MasterTableDataSource master); // ----- /** * Flushes the table cache and purges the cache of the entry for the given * table name. */ protected void flushTableCache(TableName table_name) { table_cache.remove(table_name); } /** * Adds a MasterTableDataSource and IndexSet to this transaction view. */ void addVisibleTable(MasterTableDataSource table, IndexSet index_set) { if (isReadOnly()) { throw new RuntimeException("Transaction is read-only."); } visible_tables.add(table); table_indices.add(index_set); } /** * Removes a MasterTableDataSource (and its IndexSet) from this view and * puts the information on the cleanup queue. */ void removeVisibleTable(MasterTableDataSource table) { if (isReadOnly()) { throw new RuntimeException("Transaction is read-only."); } int i = visible_tables.indexOf(table); if (i != -1) { visible_tables.remove(i); IndexSet index_set = (IndexSet) table_indices.remove(i); if (cleanup_queue == null) { cleanup_queue = new ArrayList(); } cleanup_queue.add(table); cleanup_queue.add(index_set); // Remove from the table cache TableName table_name = table.getTableName(); table_cache.remove(table_name); } } /** * Updates a MastertableDataSource (and its IndexSet) for this view. The * existing IndexSet/MasterTableDataSource for this is put on the clean up * queue. */ void updateVisibleTable(MasterTableDataSource table, IndexSet index_set) { if (isReadOnly()) { throw new RuntimeException("Transaction is read-only."); } removeVisibleTable(table); addVisibleTable(table, index_set); } /** * Disposes of all IndexSet objects currently accessed by this Transaction. * This includes IndexSet objects on tables that have been dropped by * operations on this transaction and are in the 'cleanup_queue' object. * Disposing of the IndexSet is a common cleanup practice and would typically * be used at the end of a transaction. */ protected void disposeAllIndices() { // Dispose all the IndexSet for each table try { for (int i = 0; i < table_indices.size(); ++i) { ((IndexSet) table_indices.get(i)).dispose(); } } catch (Throwable e) { Debug().writeException(e); } // Dispose all tables we dropped (they will be in the cleanup_queue. try { if (cleanup_queue != null) { for (int i = 0; i < cleanup_queue.size(); i += 2) { MasterTableDataSource master = (MasterTableDataSource) cleanup_queue.get(i); IndexSet index_set = (IndexSet) cleanup_queue.get(i + 1); index_set.dispose(); } cleanup_queue = null; } } catch (Throwable e) { Debug().writeException(e); } } // ----- /** * Returns a TableDataSource object that represents the table with the * given name within this transaction. This table is represented by an * immutable interface. */ public TableDataSource getTableDataSource(TableName table_name) { return getTable(table_name); } /** * Returns a MutableTableDataSource object that represents the table with * the given name within this transaction. Any changes made to this table * are only made within the context of this transaction. This means if a * row is added or removed, it is not made perminant until the transaction * is committed. *

* If the table does not exist then an exception is thrown. */ public MutableTableDataSource getTable(TableName table_name) { // If table is in the cache, return it MutableTableDataSource table = (MutableTableDataSource) table_cache.get(table_name); if (table != null) { return table; } // Is it represented as a master table? MasterTableDataSource master = findVisibleTable(table_name, false); // Not a master table, so see if it's a dynamic table instead, if (master == null) { // Is this a dynamic table? if (isDynamicTable(table_name)) { return getDynamicTable(table_name); } } else { // Otherwise make a view of tha master table data source and put it in // the cache. table = createMutableTableDataSourceAtCommit(master); // Put table name in the cache table_cache.put(table_name, table); } return table; } /** * Returns the DataTableDef for the table with the given name that is * visible within this transaction. *

* Returns null if table name doesn't refer to a table that exists. */ public DataTableDef getDataTableDef(TableName table_name) { // If this is a dynamic table then handle specially if (isDynamicTable(table_name)) { return getDynamicDataTableDef(table_name); } else { // Otherwise return from the pool of visible tables int sz = visible_tables.size(); for (int i = 0; i < sz; ++i) { MasterTableDataSource master = (MasterTableDataSource) visible_tables.get(i); DataTableDef table_def = master.getDataTableDef(); if (table_def.getTableName().equals(table_name)) { return table_def; } } return null; } } /** * Returns a list of table names that are visible within this transaction. */ public TableName[] getTableList() { TableName[] internal_tables = getDynamicTableList(); int sz = visible_tables.size(); // The result list TableName[] tables = new TableName[sz + internal_tables.length]; // Add the master tables for (int i = 0; i < sz; ++i) { MasterTableDataSource master = (MasterTableDataSource) visible_tables.get(i); DataTableDef table_def = master.getDataTableDef(); tables[i] = new TableName(table_def.getSchema(), table_def.getName()); } // Add any internal system tables to the list for (int i = 0; i < internal_tables.length; ++i) { tables[sz + i] = internal_tables[i]; } return tables; } /** * Returns true if the database table object with the given name exists * within this transaction. */ public boolean tableExists(TableName table_name) { // // NASTY HACK: This hack is to get around an annoying recursive problem // // when resolving views. We know this table can't possibly be an // // internal table. // boolean is_view_table = (table_name.getName().equals("sUSRView") && // table_name.getSchema().equals("SYS_INFO")); // if (is_view_table) { // return findVisibleTable(table_name, false) != null; // } // return isDynamicTable(table_name) || realTableExists(table_name); } /** * Returns true if the table with the given name exists within this * transaction. This is different from 'tableExists' because it does not try * to resolve against dynamic tables, and is therefore useful for quickly * checking if a system table exists or not. */ final boolean realTableExists(TableName table_name) { return findVisibleTable(table_name, false) != null; } /** * Attempts to resolve the given table name to its correct case assuming * the table name represents a case insensitive version of the name. For * example, "aPP.CuSTOMer" may resolve to "APP.Customer". If the table * name can not resolve to a valid identifier it returns the input table * name, therefore the actual presence of the table should always be * checked by calling 'tableExists' after this method returns. */ public TableName tryResolveCase(TableName table_name) { // Is it a visable table (match case insensitive) MasterTableDataSource table = findVisibleTable(table_name, true); if (table != null) { return table.getTableName(); } // Is it an internal table? String tschema = table_name.getSchema(); String tname = table_name.getName(); TableName[] list = getDynamicTableList(); for (int i = 0; i < list.length; ++i) { TableName ctable = list[i]; if (ctable.getSchema().equalsIgnoreCase(tschema) && ctable.getName().equalsIgnoreCase(tname)) { return ctable; } } // No matches so return the original object. return table_name; } /** * Returns the type of the table object with the given name. If the table * is a base table, this method returns "TABLE". If it is a virtual table, * it returns the type assigned to by the InternalTableInfo interface. */ public String getTableType(TableName table_name) { if (isDynamicTable(table_name)) { return getDynamicTableType(table_name); } else if (findVisibleTable(table_name, false) != null) { return "TABLE"; } // No table found so report the error. throw new RuntimeException("No table '" + table_name + "' to report type for."); } /** * Resolves the given string to a table name, throwing an exception if * the reference is ambiguous. This also generates an exception if the * table object is not found. */ public TableName resolveToTableName(String current_schema, String name, boolean case_insensitive) { TableName table_name = TableName.resolve(current_schema, name); TableName[] tables = getTableList(); TableName found = null; for (int i = 0; i < tables.length; ++i) { boolean match; if (case_insensitive) { match = tables[i].equalsIgnoreCase(table_name); } else { match = tables[i].equals(table_name); } if (match) { if (found != null) { throw new StatementException("Ambiguous reference: " + name); } else { found = tables[i]; } } } if (found == null) { throw new StatementException("Object not found: " + name); } return found; } // ---------- Sequence management ---------- /** * Flushes the sequence cache. This should be used whenever a sequence * is changed. */ void flushSequenceManager(TableName name) { sequence_manager.flushGenerator(name); } /** * Requests of the sequence generator the next value from the sequence. *

* NOTE: This does NOT check that the user owning this connection has the * correct privs to perform this operation. */ public long nextSequenceValue(TableName name) { if (isReadOnly()) { throw new RuntimeException( "Sequence operation not permitted for read only transaction."); } // Check: if null sequence manager then sequence ops not allowed. if (sequence_manager == null) { throw new RuntimeException("Sequence operations are not permitted."); } SequenceManager seq = sequence_manager; long val = seq.nextValue(this, name); // No synchronized because a DatabaseConnection should be single threaded // only. sequence_value_cache.put(name, new Long(val)); return val; } /** * Returns the sequence value for the given sequence generator that * was last returned by a call to 'nextSequenceValue'. If a value was not * last returned by a call to 'nextSequenceValue' then a statement exception * is generated. *

* NOTE: This does NOT check that the user owning this connection has the * correct privs to perform this operation. */ public long lastSequenceValue(TableName name) { // No synchronized because a DatabaseConnection should be single threaded // only. Long v = (Long) sequence_value_cache.get(name); if (v != null) { return v.longValue(); } else { throw new StatementException( "Current value for sequence generator " + name + " is not available."); } } /** * Sets the sequence value for the given sequence generator. If the generator * does not exist or it is not possible to set the value for the generator * then an exception is generated. *

* NOTE: This does NOT check that the user owning this connection has the * correct privs to perform this operation. */ public void setSequenceValue(TableName name, long value) { if (isReadOnly()) { throw new RuntimeException( "Sequence operation not permitted for read only transaction."); } // Check: if null sequence manager then sequence ops not allowed. if (sequence_manager == null) { throw new RuntimeException("Sequence operations are not permitted."); } SequenceManager seq = sequence_manager; seq.setValue(this, name, value); sequence_value_cache.put(name, new Long(value)); } /** * Returns the current unique id for the given table name. Note that this * is NOT a view of the ID, it is the actual ID value at this time regardless * of transaction. */ public long currentUniqueID(TableName table_name) { MasterTableDataSource master = findVisibleTable(table_name, false); if (master == null) { throw new StatementException( "Table with name '" + table_name + "' could not be " + "found to retrieve unique id."); } return master.currentUniqueID(); } /** * Atomically returns a unique id that can be used as a seed for a set of * unique identifiers for a table. Values returned by this method are * guarenteed unique within this table. This is true even across * transactions. *

* NOTE: This change can not be rolled back. */ public long nextUniqueID(TableName table_name) { if (isReadOnly()) { throw new RuntimeException( "Sequence operation not permitted for read only transaction."); } MasterTableDataSource master = findVisibleTable(table_name, false); if (master == null) { throw new StatementException( "Table with name '" + table_name + "' could not be " + "found to retrieve unique id."); } return master.nextUniqueID(); } /** * Sets the unique id for the given table name. This must only be called * under very controlled situations, such as when altering a table or when * we need to fix sequence corruption. */ public void setUniqueID(TableName table_name, long unique_id) { if (isReadOnly()) { throw new RuntimeException( "Sequence operation not permitted for read only transaction."); } MasterTableDataSource master = findVisibleTable(table_name, false); if (master == null) { throw new StatementException( "Table with name '" + table_name + "' could not be " + "found to set unique id."); } master.setUniqueID(unique_id); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/StateStore.java000066400000000000000000000337601330501023400246660ustar00rootroot00000000000000/** * com.mckoi.database.StateStore 10 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.store.*; import com.mckoi.util.ByteArrayUtil; import java.io.*; import java.util.ArrayList; import com.mckoi.debug.DebugLogger; /** * A store that manages the current state of all tables in a Conglomerate. It * persistantly manages three pieces of information about a conglomerate - the * tables that are visible, the tables that are deleted, and a table_id value * assigned to new tables that are created. * * @author Tobias Downer */ class StateStore { /** * The MAGIC value used for state header areas. */ private int MAGIC = 0x0BAC8001; /** * The Store object this state store wraps around. */ private Store store; /** * The current table identifier. */ private int table_id; /** * The header area of the state store. The format of the header area is; * MAGIC(4) - RESERVED(4) - TABLE_ID(8) - * VISIBLE_TABLES_POINTER(8) - DELETED_TABLES_POINTER(8) */ private MutableArea header_area; /** * Pointer to the visible table area in the store. */ private long vis_p; /** * Pointer to the delete table area in the store. */ private long del_p; /** * The list of visible state resources. */ private ArrayList visible_list; /** * The list of deleted state resources. */ private ArrayList delete_list; /** * Set to true if the visible list was changed. */ private boolean vis_list_change; /** * Set to true if the delete list was changed. */ private boolean del_list_change; /** * Constructs the StateStore. */ public StateStore(Store store) { this.store = store; vis_list_change = false; del_list_change = false; } /** * Removes the given resource from the list. */ private void removeResource(ArrayList list, String name) { int sz = list.size(); for (int i = 0; i < sz; ++i) { StateResource resource = (StateResource) list.get(i); if (name.equals(resource.name)) { list.remove(i); return; } } throw new RuntimeException("Couldn't find resource '" + name + "' in list."); } /** * Reads the state resource list from the given area in the store. */ private void readStateResourceList(ArrayList list, long pointer) throws IOException { DataInputStream d_in = new DataInputStream( store.getAreaInputStream(pointer)); int version = d_in.readInt(); // version int count = (int) d_in.readLong(); for (int i = 0; i < count; ++i) { long table_id = d_in.readLong(); String name = d_in.readUTF(); StateResource resource = new StateResource(table_id, name); list.add(resource); } d_in.close(); } /** * Writes the state resource list to the given area in the store. */ private void writeStateResourceList(ArrayList list, DataOutputStream d_out) throws IOException { d_out.writeInt(1); int sz = list.size(); d_out.writeLong(sz); for (int i = 0; i < sz; ++i) { StateResource resource = (StateResource) list.get(i); d_out.writeLong(resource.table_id); d_out.writeUTF(resource.name); } } /** * Writes the given list to the store and returns a pointer to the area once * the write has finished. */ private long writeListToStore(ArrayList list) throws IOException { ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream d_out = new DataOutputStream(bout); writeStateResourceList(list, d_out); d_out.flush(); d_out.close(); byte[] buf = bout.toByteArray(); AreaWriter a = store.createArea(buf.length); long list_p = a.getID(); a.put(buf); a.finish(); return list_p; } /** * Creates the state store in the store and returns a pointer to the header * used later for initializing the state. */ public synchronized long create() throws IOException { // Allocate empty visible and deleted tables area AreaWriter vis_tables_area = store.createArea(12); AreaWriter del_tables_area = store.createArea(12); vis_p = vis_tables_area.getID(); del_p = del_tables_area.getID(); // Write empty entries for both of these vis_tables_area.putInt(1); vis_tables_area.putLong(0); vis_tables_area.finish(); del_tables_area.putInt(1); del_tables_area.putLong(0); del_tables_area.finish(); // Now allocate an empty state header AreaWriter header_writer = store.createArea(32); long header_p = header_writer.getID(); header_writer.putInt(MAGIC); header_writer.putInt(0); header_writer.putLong(0); header_writer.putLong(vis_p); header_writer.putLong(del_p); header_writer.finish(); header_area = store.getMutableArea(header_p); // Reset table_id table_id = 0; visible_list = new ArrayList(); delete_list = new ArrayList(); // Return pointer to the header area return header_p; } /** * Initializes the state store given a pointer to the header area in the * store. */ public synchronized void init(long header_p) throws IOException { header_area = store.getMutableArea(header_p); int mag_value = header_area.getInt(); if (mag_value != MAGIC) { throw new IOException("Magic value for state header area is incorrect."); } if (header_area.getInt() != 0) { throw new IOException("Unknown version for state header area."); } table_id = (int) header_area.getLong(); vis_p = header_area.getLong(); del_p = header_area.getLong(); // Setup the visible and delete list visible_list = new ArrayList(); delete_list = new ArrayList(); // Read the resource list for the visible and delete list. readStateResourceList(visible_list, vis_p); readStateResourceList(delete_list, del_p); } /** * Reads a legacy state file (pre version 1) and converts it to a state store * format compatible with this store. Fortunately the conversion is fairly * straight-forward. This is otherwise the same as using the 'create' method. */ public synchronized long convert(File legacy_sf, DebugLogger debug) throws IOException { // Create a blank area in the store. long header_p = create(); // Open the state file. FixedSizeDataStore state_file = new FixedSizeDataStore(legacy_sf, 507, debug); state_file.open(true); // Read the header. byte[] reserved_buffer = new byte[64]; state_file.readReservedBuffer(reserved_buffer, 0, 64); // Read the list of visible tables.... int tables_sector = ByteArrayUtil.getInt(reserved_buffer, 4); InputStream sin = state_file.getSectorInputStream(tables_sector); DataInputStream din = new DataInputStream(sin); int vtver = din.readInt(); // The version. int size = din.readInt(); // For each committed table, for (int i = 0 ; i < size; ++i) { int table_id = din.readInt(); String resource_name = din.readUTF(); // Convert to new resource type if (!resource_name.startsWith(":")) { resource_name = ":1" + resource_name; } // Add this entry to the visible resource. addVisibleResource(new StateResource(table_id, resource_name)); } din.close(); // Read the list of dropped tables.... int dropped_sector = ByteArrayUtil.getInt(reserved_buffer, 12); if (dropped_sector > -1) { sin = state_file.getSectorInputStream(dropped_sector); din = new DataInputStream(sin); int dsver = din.readInt(); // The version. size = din.readInt(); // For each deleted table file name, for (int i = 0; i < size; ++i) { String resource_name = din.readUTF(); // Convert to new resource type if (!resource_name.startsWith(":")) { resource_name = ":1" + resource_name; } // Add this entry to the delete resource. addDeleteResource(new StateResource(-1, resource_name)); } din.close(); } // The sector that contains state information (the table id value).... int state_sector = ByteArrayUtil.getInt(reserved_buffer, 8); sin = state_file.getSectorInputStream(state_sector); din = new DataInputStream(sin); din.readInt(); // The version int conv_table_id = din.readInt(); din.close(); // Close the state file. state_file.close(); // Update the table_id state header_area.position(8); header_area.putLong(conv_table_id); // Check out the change header_area.checkOut(); // Finally commit the changes commit(); // Return a pointer to the structure return header_p; } /** * Returns the next table id and increments the table_id counter. */ public synchronized int nextTableID() throws IOException { int cur_counter = table_id; ++table_id; try { store.lockForWrite(); // Update the state in the file header_area.position(8); header_area.putLong(table_id); // Check out the change header_area.checkOut(); } finally { store.unlockForWrite(); } return cur_counter; } /** * Returns a list of all table resources that are currently in the visible * list. */ public synchronized StateResource[] getVisibleList() { return (StateResource[]) visible_list.toArray(new StateResource[visible_list.size()]); } /** * Returns a list of all table resources that are currently in the deleted * list. */ public synchronized StateResource[] getDeleteList() { return (StateResource[]) delete_list.toArray(new StateResource[delete_list.size()]); } /** * Returns true if the visible list contains a state resource with the given * table id value. */ public synchronized boolean containsVisibleResource(int table_id) { int sz = visible_list.size(); for (int i = 0; i < sz; ++i) { if (((StateResource) visible_list.get(i)).table_id == table_id) { return true; } } return false; } /** * Adds the given StateResource to the visible table list. This does not * persist the state. To persist this change a call to 'commit' must be * called. */ public synchronized void addVisibleResource(StateResource resource) { visible_list.add(resource); vis_list_change = true; } /** * Adds the given StateResource to the deleted table list. This does not * persist the state. To persist this change a call to 'commit' must be * called. */ public synchronized void addDeleteResource(StateResource resource) { delete_list.add(resource); del_list_change = true; } /** * Removes the resource with the given name from the visible list. This does * not persist the state. To persist this change a call to 'commit' must be * called. */ public synchronized void removeVisibleResource(String name) { removeResource(visible_list, name); vis_list_change = true; } /** * Removes the resource with the given name from the deleted list. This does * not persist the state. To persist this change a call to 'commit' must be * called. */ public synchronized void removeDeleteResource(String name) { removeResource(delete_list, name); del_list_change = true; } /** * Commits the current state to disk so that it makes a persistent change to * the state. A further call to 'synch()' will synchronize the file. This * will only commit changes if there were modifications to the state. * Returns true if this commit caused any changes to the persistant state. */ public synchronized boolean commit() throws IOException { boolean changes = false; long new_vis_p = vis_p; long new_del_p = del_p; try { store.lockForWrite(); // If the lists changed, then write new state areas to the store. if (vis_list_change) { new_vis_p = writeListToStore(visible_list); vis_list_change = false; changes = true; } if (del_list_change) { new_del_p = writeListToStore(delete_list); del_list_change = false; changes = true; } // Commit the changes, if (changes) { header_area.position(16); header_area.putLong(new_vis_p); header_area.putLong(new_del_p); // Check out the change. header_area.checkOut(); if (vis_p != new_vis_p) { store.deleteArea(vis_p); vis_p = new_vis_p; } if (del_p != new_del_p) { store.deleteArea(del_p); del_p = new_del_p; } } } finally { store.unlockForWrite(); } return changes; } // ---------- Inner classes ---------- /** * Represents a single StateResource in either a visible or delete list in * this state file. */ static class StateResource { /** * The unique identifier for the resource. */ long table_id; /** * The unique name given to the resource to distinguish it from all other * resources. */ String name; public StateResource(long table_id, String name) { this.table_id = table_id; this.name = name; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/StatementCache.java000066400000000000000000000063421330501023400254550ustar00rootroot00000000000000/** * com.mckoi.database.StatementCache 15 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.Cache; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.global.ByteLongObject; import com.mckoi.debug.*; /** * A cache that maintains a serialized set of StatementTree objects that can * be deserialized on demand. The purpose of this cache is to improve the * performance of queries that are run repeatedly (for example, multiple * INSERT statements). *

* SYNCHRONIZATION: This object is safe to use over multiple threads. * * @author Tobias Downer */ public final class StatementCache { /** * The DatabaseSystem of this cache. */ private DatabaseSystem system; /** * The internal cache representation. */ private Cache cache; /** * Constructs the cache. */ public StatementCache(DatabaseSystem system, int hash_size, int max_size, int clean_percentage) { this.system = system; cache = new Cache(hash_size, max_size, clean_percentage); } /** * Returns a DebugLogger object we can use to log debug messages. */ public final DebugLogger Debug() { return system.Debug(); } /** * Puts a new query string/StatementTree into the cache. */ public synchronized void put(String query_string, StatementTree statement_tree) { query_string = query_string.trim(); // Is this query string already in the cache? if (cache.get(query_string) == null) { try { Object cloned_tree = statement_tree.clone(); cache.put(query_string, cloned_tree); } catch (CloneNotSupportedException e) { Debug().writeException(e); throw new Error("Unable to clone statement tree: " + e.getMessage()); } } } /** * Gets a StatementTree for the query string if it is stored in the cache. * If it isn't stored in the cache returns null. */ public synchronized StatementTree get(String query_string) { query_string = query_string.trim(); Object ob = cache.get(query_string); if (ob != null) { try { // System.out.println("CACHE HIT!"); // We found a cached version of this query so deserialize and return // it. StatementTree cloned_tree = (StatementTree) ob; return (StatementTree) cloned_tree.clone(); } catch (CloneNotSupportedException e) { Debug().writeException(e); throw new Error("Unable to clone statement tree: " + e.getMessage()); } } // Not found so return null return null; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/StatementException.java000066400000000000000000000017611330501023400264100ustar00rootroot00000000000000/** * com.mckoi.database.StatementException 14 Dec 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An error that is thrown when there is erronious information in a statement. * * @author Tobias Downer */ public class StatementException extends RuntimeException { public StatementException(String msg) { super(msg); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/StatementTree.java000066400000000000000000000212521330501023400253460ustar00rootroot00000000000000/** * com.mckoi.database.StatementTree 08 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.HashMap; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.ListIterator; import java.math.BigDecimal; /** * A serializable container class for a parsed query language statement. The * structure of the tree is entirely dependant on the grammar that was used * to create the tree. This object is a convenient form that can be cached and * serialized to be stored. *

* Think of this as the model of a query after the grammar has been parsed * and before it is evaluated. * * @author Tobias Downer */ public final class StatementTree implements java.io.Serializable, Cloneable { static final long serialVersionUID = -5907058730080713004L; /** * The class of statement this is. This is set to one of the query objects * from the com.mckoi.database.interpret package. For example, if this is * a select statement then it points to * 'com.mckoi.database.interpret.Select'. */ private String statement_class; /** * A map that maps from the name of the tree element to the object * that contains information about. For example, if this is an SQL * SELECT statement then entries in this map may be; *

   *   "columns" -> sql.SelectColumn[]
   *   "distinct" -> new Boolean(true)
   * 
*/ private HashMap map; /** * Constructs the StatementTree. * * @param statement_class the name of the class that interpretes this * statement (eg. com.mckoi.database.interpret.Select). */ public StatementTree(String statement_class) { if (!statement_class.startsWith("com.mckoi.database.interpret.")) { throw new Error("statement_class must be in the " + "com.mckoi.database.interpret package."); } this.statement_class = statement_class; map = new HashMap(); } /** * Puts a new entry into the statement tree map. */ public void putObject(String entry_name, Object ob) { if (entry_name == null) { throw new NullPointerException("entry_name is null."); } // Check on is derived from a known class if (ob == null || ob instanceof Boolean || ob instanceof String || ob instanceof BigDecimal || ob instanceof Variable || ob instanceof Integer || ob instanceof TObject || ob instanceof TType || ob instanceof Expression || ob instanceof Expression[] || ob instanceof List || ob instanceof StatementTree || ob instanceof StatementTreeObject) { Object v = map.put(entry_name, ob); if (v != null) { throw new Error("Entry '" + entry_name + "' is already present in this tree."); } } else { throw new Error("ob of entry '" + entry_name + "' is not derived from a recognised class"); } } /** * Puts a boolean into the statement tree map. */ public void putBoolean(String entry_name, boolean b) { putObject(entry_name, b ? Boolean.TRUE : Boolean.FALSE); } /** * Puts an integer into the statement tree map. */ public void putInt(String entry_name, int v) { putObject(entry_name, new Integer(v)); } /** * Gets an object entry from the statement tree. */ public Object getObject(String entry_name) { return map.get(entry_name); } /** * Gets a boolean entry from the statement tree. */ public boolean getBoolean(String entry_name) { Object ob = map.get(entry_name); return ((Boolean) ob).booleanValue(); } /** * Gets an integer entry from the statement tree. */ public int getInt(String entry_name) { Object ob = map.get(entry_name); return ((Integer) ob).intValue(); } /** * Gets the interpreter class that services this tree. */ public String getClassName() { return statement_class; } /** * For each expression in this StatementTree this method will call the * 'prepare' method in each expression. The prepare method is intended to * mutate each expression so that references can be qualified, sub-queries * can be resolved, and variable substitutions can be substituted. */ public void prepareAllExpressions(ExpressionPreparer preparer) throws DatabaseException { Iterator i = map.values().iterator(); while (i.hasNext()) { Object v = i.next(); if (v != null) { prepareExpressionsInObject(v, preparer); } } } private void prepareExpressionsInObject(Object v, ExpressionPreparer preparer) throws DatabaseException { // If expression if (v instanceof Expression) { ((Expression) v).prepare(preparer); } // If an array of expression else if (v instanceof Expression[]) { Expression[] exp_list = (Expression[]) v; for (int n = 0; n < exp_list.length; ++n) { exp_list[n].prepare(preparer); } } // If a StatementTreeObject then can use the 'prepareExpressions' method. else if (v instanceof StatementTreeObject) { StatementTreeObject stob = (StatementTreeObject) v; stob.prepareExpressions(preparer); } // If a StatementTree then can use the prepareAllExpressions method. else if (v instanceof StatementTree) { StatementTree st = (StatementTree) v; st.prepareAllExpressions(preparer); } // If a list of objects, else if (v instanceof List) { List list = (List) v; for (int n = 0; n < list.size(); ++n) { Object ob = list.get(n); prepareExpressionsInObject(ob, preparer); } } } /** * Clones a single object. */ public static Object cloneSingleObject(Object entry) throws CloneNotSupportedException { // Immutable entries, if (entry == null || entry instanceof TObject || entry instanceof TType || entry instanceof Boolean || entry instanceof String || entry instanceof BigDecimal || entry instanceof Integer) { // Immutable entries } else if (entry instanceof Expression) { entry = ((Expression) entry).clone(); } else if (entry instanceof Expression[]) { Expression[] exps = (Expression[]) ((Expression[]) entry).clone(); // Clone each element of the array for (int n = 0; n < exps.length; ++n) { exps[n] = (Expression) exps[n].clone(); } entry = exps; } else if (entry instanceof Variable) { entry = ((Variable) entry).clone(); } else if (entry instanceof StatementTreeObject) { entry = ((StatementTreeObject) entry).clone(); } else if (entry instanceof StatementTree) { entry = ((StatementTree) entry).clone(); } else if (entry instanceof List) { // Clone the list by making a new ArrayList and adding a cloned version // of each element into it. List list = (List) entry; ArrayList cloned_list = new ArrayList(list.size()); Iterator i = list.iterator(); while (i.hasNext()) { cloned_list.add(cloneSingleObject(i.next())); } entry = cloned_list; } else { throw new CloneNotSupportedException(entry.getClass().toString()); } return entry; } /** * Performs a deep clone of this object, calling 'clone' on any elements * that are mutable or shallow copying immutable members. */ public Object clone() throws CloneNotSupportedException { // Shallow clone first StatementTree v = (StatementTree) super.clone(); // Clone the map HashMap cloned_map = new HashMap(); v.map = cloned_map; // For each key, clone the entry Iterator i = map.keySet().iterator(); while (i.hasNext()) { Object key = i.next(); Object entry = map.get(key); entry = cloneSingleObject(entry); cloned_map.put(key, entry); } return v; } /** * For diagnostic. */ public String toString() { return "[ " + getClassName() + " [ " + map + " ] ]"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/StatementTreeObject.java000066400000000000000000000030631330501023400264750ustar00rootroot00000000000000/** * com.mckoi.database.StatementTreeObject 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An complex object that is to be contained within a StatementTree object. * A statement tree object must be serializable, and it must be able to * reference all Expression objects so that they may be prepared. * * @author Tobias Downer */ public interface StatementTreeObject { /** * Prepares all expressions in this statement tree object by passing the * ExpressionPreparer object to the 'prepare' method of the expression. */ void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException; /** * Performs a DEEP clone of this object if it is mutable, or a deep clone * of its mutable members. If the object is immutable then it may return * 'this'. */ Object clone() throws CloneNotSupportedException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/StoreSystem.java000066400000000000000000000104761330501023400250710ustar00rootroot00000000000000/** * com.mckoi.database.StoreSystem 03 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.store.Store; import java.io.IOException; /** * An object that creates and manages the Store objects that the database engine * uses to represent itself on an external medium such as a disk, and that * constitute the low level persistent data format. *

* This interface is an abstraction of the database persistence layer. For * example, an implementation could represent itself as 1 file per store on a * disk, or as a number of stores in a single file, or as an entirely in-memory * database. * * @author Tobias Downer */ interface StoreSystem { /** * Returns true if the store with the given name exists within the system, * or false otherwise. */ boolean storeExists(String name); /** * Creates and returns a new persistent Store object given the unique name of * the store. If the system is read-only or the table otherwise can not be * created then an exception is thrown. *

* At the most, you should assume that this will return an implementation of * AbstractStore but you should not be assured of this fact. * * @param name a unique identifier string representing the name of the store. */ Store createStore(String name); /** * Opens an existing persistent Store object in the system and returns the * Store object that contains its data. An exception is thrown if the store * can not be opened. *

* At the most, you should assume that this will return an implementation of * AbstractStore but you should not be assured of this fact. * * @param name a unique identifier string representing the name of the store. */ Store openStore(String name); /** * Closes a store that has been either created or opened with the * 'createStore' or 'openStore' methods. Returns true if the * store was successfully closed. */ boolean closeStore(Store store); /** * Permanently deletes a store from the system - use with care! Returns * true if the store was successfully deleted and the resources associated * with it were freed. Returns false if the store could not be deleted. Note * that it is quite likely that a store may fail to delete in which case the * delete operation should be re-tried after a short timeout. */ boolean deleteStore(Store store); /** * Sets a new check point at the current state of this store system. This is * intended to help journalling check point and recovery systems. A check * point is set whenever data is committed to the database. Some systems * can be designed to be able to roll forward or backward to different * check points. Each check point represents a stable state in the database * life cycle. *

* A checkpoint based system greatly improves stability because if a crash * occurs in an intermediate state the changes can simply be rolled back to * the last stable state. *

* An implementation may choose not to implement check points in which case * this would be a no-op. */ void setCheckPoint(); // ---------- Locking ---------- /** * Attempts to lock this store system exclusively so that no other process * may access or change the persistent data in the store. If this fails to * lock, an IOException is generated, otherwise the lock is obtained and the * method returns. */ void lock(String lock_name) throws IOException; /** * Unlocks the exclusive access to the persistent store objects. After this * method completes, access to the store system by other processes is allowed. */ void unlock(String lock_name) throws IOException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SubsetColumnTable.java000066400000000000000000000157061330501023400261640ustar00rootroot00000000000000/** * com.mckoi.database.SubsetColumnTable 06 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * This object is a filter that sits atop a Table object. Its purpose is to * only provide a view of the columns that are required. In a Select * query we may create a query with only the subset of columns that were * originally in the table set. This object allows us to provide an * interface to only the columns that the Table is allowed to access. *

* This method implements RootTable which means a union operation will not * decend further past this table when searching for the roots. * * @author Tobias Downer */ public final class SubsetColumnTable extends FilterTable implements RootTable { /** * Maps from the column in this table to the column in the parent table. * The number of entries of this should match the number of columns in this * table. */ private int[] column_map; /** * Maps from the column in the parent table, to the column in this table. * The size of this should match the number of columns in the parent * table. */ private int[] reverse_column_map; /** * The DataTableDef object that describes the subset column of this * table. */ private DataTableDef subset_table_def; /** * The resolved Variable aliases for this subset. These are returned by * getResolvedVariable and used in searches for findResolvedVariable. This * can be used to remap the variable names used to match the columns. */ private Variable[] aliases; /** * The Constructor. */ public SubsetColumnTable(Table parent) { super(parent); } /** * Adds a column map into this table. The int array contains a map to the * column in the parent object that we want the column number to reference. * For example, to select columns 4, 8, 1, 2 into this new table, the * array would be { 4, 8, 1, 2 }. */ public void setColumnMap(int[] mapping, Variable[] aliases) { reverse_column_map = new int[parent.getColumnCount()]; for (int i = 0; i < reverse_column_map.length; ++i) { reverse_column_map[i] = -1; } column_map = mapping; this.aliases = aliases; subset_table_def = new DataTableDef(); DataTableDef parent_def = parent.getDataTableDef(); subset_table_def.setTableName(parent_def.getTableName()); for (int i = 0; i < mapping.length; ++i) { int map_to = mapping[i]; DataTableColumnDef col_def = new DataTableColumnDef(parent.getColumnDefAt(map_to)); col_def.setName(aliases[i].getName()); subset_table_def.addVirtualColumn(col_def); reverse_column_map[map_to] = i; } subset_table_def.setImmutable(); } /** * Returns the number of columns in the table. */ public int getColumnCount() { return aliases.length; } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public int findFieldName(Variable v) { for (int i = 0; i < aliases.length; ++i) { if (v.equals(aliases[i])) { return i; } } return -1; } /** * Returns the DataTableDef object that describes the columns and name * of this table. For a SubsetColumnTable object, this returns the * columns that were mapped via the 'setColumnMap' method. */ public DataTableDef getDataTableDef() { return subset_table_def; } /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public Variable getResolvedVariable(int column) { return aliases[column]; } /** * Returns a SelectableScheme for the given column in the given VirtualTable * row domain. */ final SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table) { // We need to map the original_column if the original column is a reference // in this subset column table. Otherwise we leave as is. // The reason is because FilterTable pretends the call came from its // parent if a request is made on this table. int mapped_original_column = original_column; if (table == this) { mapped_original_column = column_map[original_column]; } return super.getSelectableSchemeFor(column_map[column], mapped_original_column, table); } /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. */ final void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor) { super.setToRowTableDomain(column_map[column], row_set, ancestor); } /** * Return the list of DataTable and row sets that make up the raw information * in this table. */ final RawTableInformation resolveToRawTable(RawTableInformation info) { throw new Error("Tricky to implement this method!"); // ( for a SubsetColumnTable that is ) } /** * Returns an object that represents the information in the given cell * in the table. This will generally be an expensive algorithm, so calls * to it should be kept to a minimum. Note that the offset between two * rows is not necessarily 1. */ public final TObject getCellContents(int column, int row) { return parent.getCellContents(column_map[column], row); } // ---------- Implemented from RootTable ---------- /** * This function is used to check that two tables are identical. This * is used in operations like 'union' that need to determine that the * roots are infact of the same type. */ public boolean typeEquals(RootTable table) { return (this == table); } /** * Returns a string that represents this table. */ public String toString() { String name = "SCT" + hashCode(); return name + "[" + getRowCount() + "]"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/SystemQueryContext.java000066400000000000000000000065711330501023400264500ustar00rootroot00000000000000/** * com.mckoi.database.SystemQueryContext 25 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A QueryContext that only wraps around a TransactionSystem and does not * provide implementations for the 'getTable', and 'getDatabase' methods. * * @author Tobias Downer */ final class SystemQueryContext extends AbstractQueryContext { /** * The wrapped TransactionSystem object. */ private TransactionSystem system; /** * The Transaction this is a part of. */ private SimpleTransaction transaction; /** * The context schema of this context. */ private String current_schema; /** * Constructs the QueryContext. */ SystemQueryContext(SimpleTransaction transaction, String current_schema) { this.transaction = transaction; this.system = transaction.getSystem(); this.current_schema = current_schema; } /** * Returns a TransactionSystem object that is used to determine information * about the transactional system. */ public TransactionSystem getSystem() { return system; } /** * Returns the system FunctionLookup object. */ public FunctionLookup getFunctionLookup() { return getSystem().getFunctionLookup(); } /** * Increments the sequence generator and returns the next unique key. */ public long nextSequenceValue(String name) { TableName tn = transaction.resolveToTableName(current_schema, name, system.ignoreIdentifierCase()); return transaction.nextSequenceValue(tn); } /** * Returns the current sequence value returned for the given sequence * generator within the connection defined by this context. If a value was * not returned for this connection then a statement exception is generated. */ public long currentSequenceValue(String name) { TableName tn = transaction.resolveToTableName(current_schema, name, system.ignoreIdentifierCase()); return transaction.lastSequenceValue(tn); } /** * Sets the current sequence value for the given sequence generator. */ public void setSequenceValue(String name, long value) { TableName tn = transaction.resolveToTableName(current_schema, name, system.ignoreIdentifierCase()); transaction.setSequenceValue(tn, value); } /** * Returns a unique key for the given table source in the database. */ public long nextUniqueID(String table_name) { TableName tname = TableName.resolve(current_schema, table_name); return transaction.nextUniqueID(tname); } /** * Returns the user name of the connection. */ public String getUserName() { return "@SYSTEM"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TArrayType.java000066400000000000000000000030221330501023400246210ustar00rootroot00000000000000/** * com.mckoi.database.TArrayType 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.SQLTypes; /** * An implementation of TType for an expression array. * * @author Tobias Downer */ public class TArrayType extends TType { static final long serialVersionUID = 6551509064212831922L; /** * Constructs the type. */ public TArrayType() { // There is no SQL type for a query plan node so we make one up here super(SQLTypes.ARRAY); } public boolean comparableTypes(TType type) { throw new Error("Query Plan types should not be compared."); } public int compareObs(Object ob1, Object ob2) { throw new Error("Query Plan types should not be compared."); } public int calculateApproximateMemoryUse(Object ob) { return 5000; } public Class javaClass() { return Expression[].class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TBinaryType.java000066400000000000000000000071251330501023400247770ustar00rootroot00000000000000/** * com.mckoi.database.TBinaryType 31 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.InputStream; import java.io.BufferedInputStream; import java.io.IOException; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.BlobRef; import com.mckoi.database.global.BlobAccessor; /** * An implementation of TType for a binary block of data. * * @author Tobias Downer */ public class TBinaryType extends TType { static final long serialVersionUID = 5141996433600529406L; /** * This constrained size of the binary block of data or -1 if there is no * size limit. */ private int max_size; /** * Constructs the type. */ public TBinaryType(int sql_type, int max_size) { super(sql_type); this.max_size = max_size; } /** * Returns the maximum size of this binary type. */ public int getMaximumSize() { return max_size; } // ---------- Static utility method for comparing blobs ---------- /** * Utility method for comparing one blob with another. Uses the * BlobAccessor interface to compare the blobs. This will collate larger * blobs higher than smaller blobs. */ static int compareBlobs(BlobAccessor blob1, BlobAccessor blob2) { // We compare smaller sized blobs before larger sized blobs int c = blob1.length() - blob2.length(); if (c != 0) { return c; } else { // Size of the blobs are the same, so find the first non equal byte in // the byte array and return the difference between the two. eg. // compareTo({ 0, 0, 0, 1 }, { 0, 0, 0, 3 }) == -3 int len = blob1.length(); InputStream b1 = blob1.getInputStream(); InputStream b2 = blob2.getInputStream(); try { BufferedInputStream bin1 = new BufferedInputStream(b1); BufferedInputStream bin2 = new BufferedInputStream(b2); while (len > 0) { c = bin1.read() - bin2.read(); if (c != 0) { return c; } --len; } return 0; } catch (IOException e) { throw new RuntimeException("IO Error when comparing blobs: " + e.getMessage()); } } } // ---------- Implemented from TType ---------- public boolean comparableTypes(TType type) { return (type instanceof BlobAccessor); } public int compareObs(Object ob1, Object ob2) { if (ob1 == ob2) { return 0; } BlobAccessor blob1 = (BlobAccessor) ob1; BlobAccessor blob2 = (BlobAccessor) ob2; return compareBlobs(blob1, blob2); } public int calculateApproximateMemoryUse(Object ob) { if (ob != null) { if (ob instanceof BlobRef) { return 256; } else { return ((ByteLongObject) ob).length() + 24; } } else { return 32; } } public Class javaClass() { return BlobAccessor.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TBooleanType.java000066400000000000000000000034571330501023400251360ustar00rootroot00000000000000/** * com.mckoi.database.TBooleanType 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.BigNumber; /** * An implementation of TType for a boolean value. * * @author Tobias Downer */ public final class TBooleanType extends TType { static final long serialVersionUID = 5602396246537490259L; /** * Constructs the type. */ public TBooleanType(int sql_type) { super(sql_type); } public boolean comparableTypes(TType type) { return (type instanceof TBooleanType || type instanceof TNumericType); } public int compareObs(Object ob1, Object ob2) { if (ob2 instanceof BigNumber) { BigNumber n2 = (BigNumber) ob2; BigNumber n1 = ob1.equals(Boolean.FALSE) ? BigNumber.BIG_NUMBER_ZERO : BigNumber.BIG_NUMBER_ONE; return n1.compareTo(n2); } if (ob1 == ob2 || ob1.equals(ob2)) { return 0; } else if (ob1.equals(Boolean.TRUE)) { return 1; } else { return -1; } } public int calculateApproximateMemoryUse(Object ob) { return 5; } public Class javaClass() { return Boolean.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TDateType.java000066400000000000000000000025731330501023400244320ustar00rootroot00000000000000/** * com.mckoi.database.TDateType 31 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.Date; /** * An implementation of TType for date objects. * * @author Tobias Downer */ public class TDateType extends TType { static final long serialVersionUID = 1494137367081481985L; /** * Constructs the type. */ public TDateType(int sql_type) { super(sql_type); } public boolean comparableTypes(TType type) { return (type instanceof TDateType); } public int compareObs(Object ob1, Object ob2) { return ((Date) ob1).compareTo((Date) ob2); } public int calculateApproximateMemoryUse(Object ob) { return 4 + 8; } public Class javaClass() { return Date.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TJavaObjectType.java000066400000000000000000000040121330501023400255530ustar00rootroot00000000000000/** * com.mckoi.database.TJavaObjectType 31 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.global.ByteLongObject; /** * An implementation of TType for a java object of possibly defined type. * * @author Tobias Downer */ public class TJavaObjectType extends TType { static final long serialVersionUID = -4413863997719593305L; /** * The type of class this is contrained to or null if it is not constrained * to a java class. */ private String class_type; /** * Constructs the type. */ public TJavaObjectType(String class_type) { super(SQLTypes.JAVA_OBJECT); this.class_type = class_type; } /** * Returns the java class type of this type. For example, "java.net.URL" if * this type is constrained to a java.net.URL object. */ public String getJavaClassTypeString() { return class_type; } public boolean comparableTypes(TType type) { return (type instanceof TJavaObjectType); } public int compareObs(Object ob1, Object ob2) { throw new Error("Java object types can not be compared."); } public int calculateApproximateMemoryUse(Object ob) { if (ob != null) { return ((ByteLongObject) ob).length() + 4; } else { return 4 + 8; } } public Class javaClass() { return ByteLongObject.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TNullType.java000066400000000000000000000032611330501023400244620ustar00rootroot00000000000000/** * com.mckoi.database.TNullType 02 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.SQLTypes; /** * An implementation of TType that represents a NULL type. A Null type is * an object that can't be anything else except null. * * @author Tobias Downer */ public class TNullType extends TType { static final long serialVersionUID = -271824967935043427L; /** * Constructs the type. */ public TNullType() { // There is no SQL type for a query plan node so we make one up here super(SQLTypes.NULL); } public boolean comparableTypes(TType type) { return (type instanceof TNullType); } public int compareObs(Object ob1, Object ob2) { // It's illegal to compare NULL types with this method so we throw an // exception here (see method specification). throw new Error("compareObs can not compare NULL types."); } public int calculateApproximateMemoryUse(Object ob) { return 16; } public Class javaClass() { return Object.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TNumericType.java000066400000000000000000000046331330501023400251560ustar00rootroot00000000000000/** * com.mckoi.database.TNumericType 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.BigNumber; /** * An implementation of TType for a number. * * @author Tobias Downer */ public final class TNumericType extends TType { static final long serialVersionUID = -5133489773377747175L; /** * The size of the number. */ private int size; /** * The scale of the number. */ private int scale; /** * Constructs a type with the given sql_type value, the size, * and the scale of the number. Note that the 'sql_type' MUST be a numeric * SQL type (FLOAT, INTEGER, DOUBLE, etc). */ public TNumericType(int sql_type, int size, int scale) { super(sql_type); this.size = size; this.scale = scale; } /** * Returns the size of the number (-1 is don't care). */ public int getSize() { return size; } /** * Returns the scale of the number (-1 is don't care). */ public int getScale() { return scale; } // ---------- Implemented from TType ---------- public boolean comparableTypes(TType type) { return (type instanceof TNumericType || type instanceof TBooleanType); } public int compareObs(Object ob1, Object ob2) { BigNumber n1 = (BigNumber) ob1; BigNumber n2; if (ob2 instanceof BigNumber) { n2 = (BigNumber) ob2; } else { n2 = ob2.equals(Boolean.TRUE) ? BigNumber.BIG_NUMBER_ONE : BigNumber.BIG_NUMBER_ZERO; } return n1.compareTo(n2); } public int calculateApproximateMemoryUse(Object ob) { // A heuristic - it's difficult to come up with an accurate number // for this. return 25 + 16; } public Class javaClass() { return BigNumber.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TObject.java000066400000000000000000000514731330501023400241240ustar00rootroot00000000000000/** * com.mckoi.database.TObject 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.IOException; import java.io.ObjectOutputStream; import java.util.Locale; import com.mckoi.util.BigNumber; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.BlobRef; import com.mckoi.database.global.ClobRef; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.global.StringObject; /** * A TObject is a strongly typed object in a database engine. A TObject must * maintain type information (eg. STRING, NUMBER, etc) along with the * object value being represented itself. * * @author Tobias Downer */ public final class TObject implements java.io.Serializable { static final long serialVersionUID = -5129157457207765079L; /** * The type of this object. */ private TType type; /** * The Java representation of the object. */ private Object ob; /** * Constructs the TObject as the given type. */ public TObject(TType type, Object ob) { this.type = type; if (ob instanceof String) { this.ob = StringObject.fromString((String) ob); } else { this.ob = ob; } } /** * Returns the type of this object. */ public TType getTType() { return type; } /** * Returns true if the object is null. Note that we must still be able to * determine type information for an object that is NULL. */ public boolean isNull() { return (getObject() == null); } /** * Returns a java.lang.Object that is the data behind this object. */ public Object getObject() { return ob; } /** * Returns the approximate memory use of this object in bytes. This is used * when the engine is caching objects and we need a general indication of how * much space it takes up in memory. */ public int approximateMemoryUse() { return getTType().calculateApproximateMemoryUse(getObject()); } /** * Returns true if the type of this object is logically comparable to the * type of the given object. For example, VARCHAR and LONGVARCHAR are * comparable types. DOUBLE and FLOAT are comparable types. DOUBLE and * VARCHAR are not comparable types. */ public boolean comparableTypes(TObject ob) { return getTType().comparableTypes(ob.getTType()); } /** * Returns the BigNumber of this object if this object is a numeric type. If * the object is not a numeric type or is NULL then a null object is * returned. This method can not be used to cast from a type to a number. */ public BigNumber toBigNumber() { if (getTType() instanceof TNumericType) { return (BigNumber) getObject(); } return null; } /** * Returns the Boolean of this object if this object is a boolean type. If * the object is not a boolean type or is NULL then a null object is * returned. This method must not be used to cast from a type to a boolean. */ public Boolean toBoolean() { if (getTType() instanceof TBooleanType) { return (Boolean) getObject(); } return null; } /** * Returns the String of this object if this object is a string type. If * the object is not a string type or is NULL then a null object is * returned. This method must not be used to cast from a type to a string. */ public String toStringValue() { if (getTType() instanceof TStringType) { return getObject().toString(); } return null; } public static final TObject BOOLEAN_TRUE = new TObject(TType.BOOLEAN_TYPE, Boolean.TRUE); public static final TObject BOOLEAN_FALSE = new TObject(TType.BOOLEAN_TYPE, Boolean.FALSE); public static final TObject BOOLEAN_NULL = new TObject(TType.BOOLEAN_TYPE, null); public static final TObject NULL_OBJECT = new TObject(TType.NULL_TYPE, null); /** * Returns a TObject of boolean type that is either true or false. */ public static TObject booleanVal(boolean b) { if (b) { return BOOLEAN_TRUE; } return BOOLEAN_FALSE; } /** * Returns a TObject of numeric type that represents the given int value. */ public static TObject intVal(int val) { return bigNumberVal(BigNumber.fromLong(val)); } /** * Returns a TObject of numeric type that represents the given long value. */ public static TObject longVal(long val) { return bigNumberVal(BigNumber.fromLong(val)); } /** * Returns a TObject of numeric type that represents the given double value. */ public static TObject doubleVal(double val) { return bigNumberVal(BigNumber.fromDouble(val)); } /** * Returns a TObject of numeric type that represents the given BigNumber * value. */ public static TObject bigNumberVal(BigNumber val) { return new TObject(TType.NUMERIC_TYPE, val); } /** * Returns a TObject of VARCHAR type that represents the given StringObject * value. */ public static TObject stringVal(StringObject str) { return new TObject(TType.STRING_TYPE, str); } /** * Returns a TObject of VARCHAR type that represents the given String value. */ public static TObject stringVal(String str) { return new TObject(TType.STRING_TYPE, StringObject.fromString(str)); } /** * Returns a TObject of DATE type that represents the given time value. */ public static TObject dateVal(java.util.Date d) { return new TObject(TType.DATE_TYPE, d); } /** * Returns a TObject of NULL type that represents a null value. */ public static TObject nullVal() { return NULL_OBJECT; } /** * Returns a TObject from the given Java value. */ public static TObject objectVal(Object ob) { if (ob == null) { return nullVal(); } else if (ob instanceof BigNumber) { return bigNumberVal((BigNumber) ob); } else if (ob instanceof StringObject) { return stringVal((StringObject) ob); } else if (ob instanceof Boolean) { return booleanVal(((Boolean) ob).booleanValue()); } else if (ob instanceof java.util.Date) { return dateVal((java.util.Date) ob); } else if (ob instanceof ByteLongObject) { return new TObject(TType.BINARY_TYPE, (ByteLongObject) ob); } else if (ob instanceof byte[]) { return new TObject(TType.BINARY_TYPE, new ByteLongObject((byte[]) ob)); } else if (ob instanceof BlobRef) { return new TObject(TType.BINARY_TYPE, (BlobRef) ob); } else if (ob instanceof ClobRef) { return new TObject(TType.STRING_TYPE, (ClobRef) ob); } else { throw new Error("Don't know how to convert object type " + ob.getClass()); } } /** * Compares this object with the given object (which is of a logically * comparable type). Returns 0 if the value of the objects are equal, < 0 * if this object is smaller than the given object, and > 0 if this object * is greater than the given object. *

* This can not be used to compare null values so it assumes that checks * for null have already been made. */ public int compareToNoNulls(TObject tob) { TType type = getTType(); // Strings must be handled as a special case. if (type instanceof TStringType) { // We must determine the locale to compare against and use that. TStringType stype = (TStringType) type; // If there is no locale defined for this type we use the locale in the // given type. if (stype.getLocale() == null) { type = tob.getTType(); } } return type.compareObs(getObject(), tob.getObject()); } /** * Compares this object with the given object (which is of a logically * comparable type). Returns 0 if the value of the objects are equal, < 0 * if this object is smaller than the given object, and > 0 if this object * is greater than the given object. *

* This compares NULL values before non null values, and null values are * equal. */ public int compareTo(TObject tob) { // If this is null if (isNull()) { // and value is null return 0 return less if (tob.isNull()) { return 0; } else { return -1; } } else { // If this is not null and value is null return +1 if (tob.isNull()) { return 1; } else { // otherwise both are non null so compare normally. return compareToNoNulls(tob); } } } /** * Equality test. This will throw an exception if it is used. The reason * for this is because it's not clear what we would be testing the equality * of with this method. Equality of the object + the type or equality of the * objects only? */ public boolean equals(Object ob) { throw new Error("equals method should not be used."); } /** * Equality test. Returns true if this object is equivalent to the given * TObject. This means the types are the same, and the object itself is the * same. */ public boolean valuesEqual(TObject ob) { if (this == ob) { return true; } if (getTType().comparableTypes(ob.getTType())) { return compareTo(ob) == 0; } return false; } // ---------- Object operators ---------- /** * Bitwise OR operation of this object with the given object. If either * numeric value has a scale of 1 or greater then it returns null. If this * or the given object is not a numeric type then it returns null. If either * this object or the given object is NULL, then the NULL object is returned. */ public TObject operatorOr(TObject val) { BigNumber v1 = toBigNumber(); BigNumber v2 = val.toBigNumber(); TType result_type = TType.getWidestType(getTType(), val.getTType()); if (v1 == null || v2 == null) { return new TObject(result_type, null); } return new TObject(result_type, v1.bitWiseOr(v2)); } /** * Mathematical addition of this object to the given object. If this or * the given object is not a numeric type then it returns null. * If either this object or the given object is NULL, then the NULL object * is returned. */ public TObject operatorAdd(TObject val) { BigNumber v1 = toBigNumber(); BigNumber v2 = val.toBigNumber(); TType result_type = TType.getWidestType(getTType(), val.getTType()); if (v1 == null || v2 == null) { return new TObject(result_type, null); } return new TObject(result_type, v1.add(v2)); } /** * Mathematical subtraction of this object to the given object. If this or * the given object is not a numeric type then it returns null. * If either this object or the given object is NULL, then the NULL object * is returned. */ public TObject operatorSubtract(TObject val) { BigNumber v1 = toBigNumber(); BigNumber v2 = val.toBigNumber(); TType result_type = TType.getWidestType(getTType(), val.getTType()); if (v1 == null || v2 == null) { return new TObject(result_type, null); } return new TObject(result_type, v1.subtract(v2)); } /** * Mathematical multiply of this object to the given object. If this or * the given object is not a numeric type then it returns null. * If either this object or the given object is NULL, then the NULL object * is returned. */ public TObject operatorMultiply(TObject val) { BigNumber v1 = toBigNumber(); BigNumber v2 = val.toBigNumber(); TType result_type = TType.getWidestType(getTType(), val.getTType()); if (v1 == null || v2 == null) { return new TObject(result_type, null); } return new TObject(result_type, v1.multiply(v2)); } /** * Mathematical division of this object to the given object. If this or * the given object is not a numeric type then it returns null. * If either this object or the given object is NULL, then the NULL object * is returned. */ public TObject operatorDivide(TObject val) { BigNumber v1 = toBigNumber(); BigNumber v2 = val.toBigNumber(); TType result_type = TType.getWidestType(getTType(), val.getTType()); if (v1 == null || v2 == null) { return new TObject(result_type, null); } return new TObject(result_type, v1.divide(v2)); } /** * String concat of this object to the given object. If this or the given * object is not a string type then it returns null. If either this object * or the given object is NULL, then the NULL object is returned. *

* This operator always returns an object that is a VARCHAR string type of * unlimited size with locale inherited from either this or val depending * on whether the locale information is defined or not. */ public TObject operatorConcat(TObject val) { // If this or val is null then return the null value if (isNull()) { return this; } else if (val.isNull()) { return val; } TType tt1 = getTType(); TType tt2 = val.getTType(); if (tt1 instanceof TStringType && tt2 instanceof TStringType) { // Pick the first locale, TStringType st1 = (TStringType) tt1; TStringType st2 = (TStringType) tt2; Locale str_locale = null; int str_strength = 0; int str_decomposition = 0; if (st1.getLocale() != null) { str_locale = st1.getLocale(); str_strength = st1.getStrength(); str_decomposition = st1.getDecomposition(); } else if (st2.getLocale() != null) { str_locale = st2.getLocale(); str_strength = st2.getStrength(); str_decomposition = st2.getDecomposition(); } TStringType dest_type = st1; if (str_locale != null) { dest_type = new TStringType(SQLTypes.VARCHAR, -1, str_locale, str_strength, str_decomposition); } return new TObject(dest_type, StringObject.fromString(toStringValue() + val.toStringValue())); } // Return null if LHS or RHS are not strings return new TObject(tt1, null); } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns false. This * is able to compare null values. */ public TObject operatorIs(TObject val) { if (isNull() && val.isNull()) { return BOOLEAN_TRUE; } if (comparableTypes(val)) { return booleanVal(compareTo(val) == 0); } // Not comparable types so return false return BOOLEAN_FALSE; } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns null (doesn't know). If either * this object or the given object is NULL then NULL is returned. */ public TObject operatorEquals(TObject val) { // Check the types are comparable if (comparableTypes(val) && !isNull() && !val.isNull()) { return booleanVal(compareToNoNulls(val) == 0); } // Not comparable types so return null return BOOLEAN_NULL; } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns null (doesn't know). If either * this object or the given object is NULL then NULL is returned. */ public TObject operatorNotEquals(TObject val) { // Check the types are comparable if (comparableTypes(val) && !isNull() && !val.isNull()) { return booleanVal(compareToNoNulls(val) != 0); } // Not comparable types so return null return BOOLEAN_NULL; } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns null (doesn't know). If either * this object or the given object is NULL then NULL is returned. */ public TObject operatorGreater(TObject val) { // Check the types are comparable if (comparableTypes(val) && !isNull() && !val.isNull()) { return booleanVal(compareToNoNulls(val) > 0); } // Not comparable types so return null return BOOLEAN_NULL; } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns null (doesn't know). If either * this object or the given object is NULL then NULL is returned. */ public TObject operatorGreaterEquals(TObject val) { // Check the types are comparable if (comparableTypes(val) && !isNull() && !val.isNull()) { return booleanVal(compareToNoNulls(val) >= 0); } // Not comparable types so return null return BOOLEAN_NULL; } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns null (doesn't know). If either * this object or the given object is NULL then NULL is returned. */ public TObject operatorLess(TObject val) { // Check the types are comparable if (comparableTypes(val) && !isNull() && !val.isNull()) { return booleanVal(compareToNoNulls(val) < 0); } // Not comparable types so return null return BOOLEAN_NULL; } /** * Comparison of this object and the given object. The compared objects * must be the same type otherwise it returns null (doesn't know). If either * this object or the given object is NULL then NULL is returned. */ public TObject operatorLessEquals(TObject val) { // Check the types are comparable if (comparableTypes(val) && !isNull() && !val.isNull()) { return booleanVal(compareToNoNulls(val) <= 0); } // Not comparable types so return null return BOOLEAN_NULL; } /** * Performs a logical NOT on this value. */ public TObject operatorNot() { // If type is null if (isNull()) { return this; } Boolean b = toBoolean(); if (b != null) { return booleanVal(!b.booleanValue()); } return BOOLEAN_NULL; } // ---------- Casting methods ----------- /** * Returns a TObject of the given type and with the given Java object. If * the object is not of the right type then it is cast to the correct type. */ public static TObject createAndCastFromObject(TType type, Object ob) { return new TObject(type, TType.castObjectToTType(ob, type)); } /** * Casts this object to the given type and returns a new TObject. */ public TObject castTo(TType cast_to_type) { Object ob = getObject(); return createAndCastFromObject(cast_to_type, ob); } public String toString() { if (isNull()) { return "NULL"; } else { return getObject().toString(); } } // // ------ Default casting objects ---------- // // /** // * Casts this object to a number. If this object is NULL then the returned // * object is a numeric typed NULL. // */ // public TObject castToNumber() { // if (getTType().isString()) { // try { // return new BigDecimal((String) ob); // } // catch (Throwable e) { // return BD_ZERO; // } // } // if (getTType().isBoolean()) { // if (((Boolean) ob).booleanValue() == true) { // return BD_ONE; // } // else { // return BD_ZERO; // } // } // if (getTType().isDate()) { // return new BigDecimal(((Date) ob).getTime()); // } // return (BigDecimal) ob; // } // // // // ---------- Convenience statics ---------- // // private final static BigDecimal BD_ZERO = new BigDecimal(0); // private final static BigDecimal BD_ONE = new BigDecimal(1); /** * Writes the state of this object to the object stream. This method is * implemented because GCJ doesn't like it if you implement readObject * without writeObject. */ private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject(); } /** * Serialization overwritten method. We overwrite this method because of a * change with how strings are stored. In 0.93 we stored strings in this * object as java.lang.String and in 0.94 we stored strings as * java.lang.StringObject. This performs a conversion between the old and * new format. */ private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); // HACK: We convert old TObject that used String to represent a string object // to StringObject if (ob instanceof String) { ob = StringObject.fromString((String) ob); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TQueryPlanType.java000066400000000000000000000027471330501023400255000ustar00rootroot00000000000000/** * com.mckoi.database.TQueryPlanType 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An implementation of TType for a query plan value. * * @author Tobias Downer */ public class TQueryPlanType extends TType { static final long serialVersionUID = -1122548450083929179L; /** * Constructs the type. */ public TQueryPlanType() { // There is no SQL type for a query plan node so we make one up here super(-19443); } public boolean comparableTypes(TType type) { throw new Error("Query Plan types should not be compared."); } public int compareObs(Object ob1, Object ob2) { throw new Error("Query Plan types should not be compared."); } public int calculateApproximateMemoryUse(Object ob) { return 5000; } public Class javaClass() { return QueryPlanNode.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TStringType.java000066400000000000000000000213011330501023400250110ustar00rootroot00000000000000/** * com.mckoi.database.TStringType 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.Locale; import java.text.Collator; import com.mckoi.database.global.StringAccessor; import java.io.Reader; import java.io.IOException; /** * An implementation of TType for a String. * * @author Tobias Downer */ public final class TStringType extends TType { static final long serialVersionUID = -4189752898050725908L; /** * The maximum allowed size for the string. */ private int max_size; /** * The locale of the string. */ private Locale locale; /** * The strength of the collator for this string (as defined in * java.text.Collator). */ private int strength; /** * The decomposition mode of the collator for this string type (as defined in * java.text.Collator). */ private int decomposition; /** * The Collator object for this type, created when we first compare objects. */ private transient Collator collator; /** * Constructs a type with the given sql_type value, the maximum size, * and the locale of the string. Note that the 'sql_type' MUST be a string * SQL type. *

* Note that a string type may be constructed with a NULL locale which * means strings are compared lexicographically. */ public TStringType(int sql_type, int max_size, Locale locale, int strength, int decomposition) { super(sql_type); this.max_size = max_size; this.strength = strength; this.decomposition = decomposition; this.locale = locale; } /** * Constructs a type with the given sql_type value, the maximum size, * and the locale of the string. Note that the 'sql_type' MUST be a string * SQL type. *

* Note that a string type may be constructed with a NULL locale which * means strings are compared lexicographically. The string locale is * formated as [2 char language][2 char country][rest is variant]. For * example, US english would be 'enUS', French would be 'fr' and Germany * would be 'deDE'. */ public TStringType(int sql_type, int max_size, String locale_str, int strength, int decomposition) { super(sql_type); this.max_size = max_size; this.strength = strength; this.decomposition = decomposition; if (locale_str != null && locale_str.length() >= 2) { String language = locale_str.substring(0, 2); String country = ""; String variant = ""; if (locale_str.length() > 2) { country = locale_str.substring(2, 4); if (locale_str.length() > 4) { variant = locale_str.substring(4); } } locale = new Locale(language, country, variant); } } /** * Constructor without strength and decomposition that sets to default * levels. */ public TStringType(int sql_type, int max_size, String locale_str) { this(sql_type, max_size, locale_str, -1, -1); } /** * Returns the maximum size of the string (-1 is don't care). */ public int getMaximumSize() { return max_size; } /** * Returns the strength of this string type as defined in java.text.Collator. */ public int getStrength() { return strength; } /** * Returns the decomposition of this string type as defined in * java.text.Collator. */ public int getDecomposition() { return decomposition; } /** * Returns the locale of the string. */ public Locale getLocale() { return locale; } /** * Returns the locale information as a formatted string. *

* Note that a string type may be constructed with a NULL locale which * means strings are compared lexicographically. The string locale is * formated as [2 char language][2 char country][rest is variant]. For * example, US english would be 'enUS', French would be 'fr' and Germany * would be 'deDE'. */ public String getLocaleString() { if (locale == null) { return ""; } else { StringBuffer locale_str = new StringBuffer(); locale_str.append(locale.getLanguage()); locale_str.append(locale.getCountry()); locale_str.append(locale.getVariant()); return new String(locale_str); } } /** * An implementation of a lexicographical compareTo operation on a * StringAccessor object. This uses the Reader object to compare the strings * over a stream if the size is such that it is more efficient to do so. */ private int lexicographicalOrder(StringAccessor str1, StringAccessor str2) { // If both strings are small use the 'toString' method to compare the // strings. This saves the overhead of having to store very large string // objects in memory for all comparisons. long str1_size = str1.length(); long str2_size = str2.length(); if (str1_size < 32 * 1024 && str2_size < 32 * 1024) { return str1.toString().compareTo(str2.toString()); } // The minimum size long size = Math.min(str1_size, str2_size); Reader r1 = str1.getReader(); Reader r2 = str2.getReader(); try { try { while (size > 0) { int c1 = r1.read(); int c2 = r2.read(); if (c1 != c2) { return c1 - c2; } --size; } // They compare equally up to the limit, so now compare sizes, if (str1_size > str2_size) { // If str1 is larger return 1; } else if (str1_size < str2_size) { // If str1 is smaller return -1; } // Must be equal return 0; } finally { r1.close(); r2.close(); } } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } } /** * Returns the java.text.Collator object for this string type. This collator * is used to compare strings of this locale. *

* This method is synchronized because a side effect of this method is to * store the collator object instance in a local variable. */ private synchronized Collator getCollator() { if (collator != null) { return collator; } else { // NOTE: Even if we are creating a lot of these objects, it shouldn't // be too bad on memory usage because Collator.getInstance caches // collation information behind the scenes. collator = Collator.getInstance(locale); int strength = getStrength(); int decomposition = getStrength(); if (strength >= 0) { collator.setStrength(strength); } if (decomposition >= 0) { collator.setDecomposition(decomposition); } return collator; } } // ---------- Overwritten from TType ---------- /** * For strings, the locale must be the same for the types to be comparable. * If the locale is not the same then they are not comparable. Note that * strings with a locale of null can be compared with any other locale. So * this will only return false if both types have different (but defined) * locales. */ public boolean comparableTypes(TType type) { // Are we comparing with another string type? if (type instanceof TStringType) { TStringType s_type = (TStringType) type; // If either locale is null return true if (getLocale() == null || s_type.getLocale() == null) { return true; } // If the locales are the same return true return getLocale().equals(s_type.getLocale()); } return false; } public int compareObs(Object ob1, Object ob2) { if (ob1 == ob2) { return 0; } // If lexicographical ordering, if (locale == null) { return lexicographicalOrder((StringAccessor) ob1, (StringAccessor) ob2); // return ob1.toString().compareTo(ob2.toString()); } else { return getCollator().compare(ob1.toString(), ob2.toString()); } } public int calculateApproximateMemoryUse(Object ob) { if (ob != null) { return (((StringAccessor) ob).length() * 2) + 24; } else { return 32; } } public Class javaClass() { return StringAccessor.class; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TType.java000066400000000000000000000367631330501023400236440ustar00rootroot00000000000000/** * com.mckoi.database.TType 26 Jul 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.CastHelper; import com.mckoi.util.BigNumber; import com.mckoi.util.StringUtil; import java.util.Date; import java.util.List; /** * A TType object represents a type in a database engine. For example, an * implementation might represent a STRING or a NUMBER. This is an * immutable class. See implementations of this object for further examples. * * @author Tobias Downer */ public abstract class TType implements java.io.Serializable { static final long serialVersionUID = 5866230818579853961L; /** * The type as an SQL identifier from com.mckoi.database.global.SQLTypes. */ private int sql_type; /** * Constructs the type object. */ protected TType(int sql_type) { this.sql_type = sql_type; } /** * Returns the SQL type of this. */ public int getSQLType() { return sql_type; } /** * Returns this TType as a fully parsable declared SQL type. For example, * if this represents a string we might return "VARCHAR(30) COLLATE 'jpJP'" * This method is used for debugging and display purposes only and we would * not expect to actually feed this back into an SQL parser. */ public String asSQLString() { return DataTableColumnDef.sqlTypeToString(getSQLType()); } /** * Returns true if the type of this object is logically comparable to the * type of the given object. For example, VARCHAR and LONGVARCHAR are * comparable types. DOUBLE and FLOAT are comparable types. DOUBLE and * VARCHAR are not comparable types. */ public abstract boolean comparableTypes(TType type); /** * Compares two objects that are logically comparable under this * type. Returns 0 if the values are equal, >1 if ob1 is greater than * ob2, and <1 if ob1 is less than ob2. It is illegal to pass NULL values * for ob1 or ob2 into this method. */ public abstract int compareObs(Object ob1, Object ob2); /** * Calculates the approximate memory usage of an object of this type in * bytes. */ public abstract int calculateApproximateMemoryUse(Object ob); /** * Returns the Java Class that is used to represent this type of object. * For example, string types would return String.class. */ public abstract Class javaClass(); // ----- Static methods for Encoding/Decoding TType to strings ----- /** * Returns the value of a string that is quoted. For example, 'test' becomes * test. */ private static String parseQuotedString(String str) { if (str.startsWith("'") && str.endsWith("'")) { return str.substring(1, str.length() - 1); } else { throw new RuntimeException("String is not quoted: " + str); } } /** * Encodes a TType into a string which is a useful way to serialize a TType. * The encoded string should be understandable when read. */ public static String asEncodedString(TType type) { StringBuffer buf = new StringBuffer(); if (type instanceof TBooleanType) { buf.append("BOOLEAN("); buf.append(type.getSQLType()); buf.append(')'); } else if (type instanceof TStringType) { TStringType str_type = (TStringType) type; buf.append("STRING("); buf.append(type.getSQLType()); buf.append(','); buf.append(str_type.getMaximumSize()); buf.append(",'"); buf.append(str_type.getLocaleString()); buf.append("',"); buf.append(str_type.getStrength()); buf.append(','); buf.append(str_type.getDecomposition()); buf.append(')'); } else if (type instanceof TNumericType) { TNumericType num_type = (TNumericType) type; buf.append("NUMERIC("); buf.append(type.getSQLType()); buf.append(','); buf.append(num_type.getSize()); buf.append(','); buf.append(num_type.getScale()); buf.append(')'); } else if (type instanceof TBinaryType) { TBinaryType bin_type = (TBinaryType) type; buf.append("BINARY("); buf.append(type.getSQLType()); buf.append(','); buf.append(bin_type.getMaximumSize()); buf.append(')'); } else if (type instanceof TDateType) { buf.append("DATE("); buf.append(type.getSQLType()); buf.append(')'); } else if (type instanceof TNullType) { buf.append("NULL("); buf.append(type.getSQLType()); buf.append(')'); } else if (type instanceof TJavaObjectType) { buf.append("JAVAOBJECT("); buf.append(type.getSQLType()); buf.append(",'"); buf.append(((TJavaObjectType) type).getJavaClassTypeString()); buf.append("')"); } else { throw new RuntimeException("Can not encode type: " + type); } return new String(buf); } /** * Given an array of TType, returns a String that that is the encoded form * of the array and that can be later decoded back into an array of TType. * Useful for serializing a list of TType information. */ public static String asEncodedString(TType[] types) { StringBuffer buf = new StringBuffer(); for (int i = 0; i < types.length; ++i) { buf.append(asEncodedString(types[i])); if (i < types.length - 1) { buf.append("!|"); } } return new String(buf); } /** * Decodes a String that has been encoded with the 'asEncodedString' method * and returns a TType that represented the type. */ public static TType decodeString(String encoded_str) { int param_s = encoded_str.indexOf('('); int param_e = encoded_str.lastIndexOf(')'); String params = encoded_str.substring(param_s + 1, param_e); List param_list = StringUtil.explode(params, ","); int sql_type = Integer.parseInt((String) param_list.get(0)); if (encoded_str.startsWith("BOOLEAN(")) { return new TBooleanType(sql_type); } else if (encoded_str.startsWith("STRING(")) { int size = Integer.parseInt((String) param_list.get(1)); String locale_str = parseQuotedString((String) param_list.get(2)); if (locale_str.length() == 0) { locale_str = null; } int strength = Integer.parseInt((String) param_list.get(3)); int decomposition = Integer.parseInt((String) param_list.get(4)); return new TStringType(sql_type, size, locale_str, strength, decomposition); } else if (encoded_str.startsWith("NUMERIC(")) { int size = Integer.parseInt((String) param_list.get(1)); int scale = Integer.parseInt((String) param_list.get(2)); return new TNumericType(sql_type, size, scale); } else if (encoded_str.startsWith("BINARY(")) { int size = Integer.parseInt((String) param_list.get(1)); return new TBinaryType(sql_type, size); } else if (encoded_str.startsWith("DATE(")) { return new TDateType(sql_type); } else if (encoded_str.startsWith("NULL(")) { return new TNullType(); } else if (encoded_str.startsWith("JAVAOBJECT(")) { String class_str = parseQuotedString((String) param_list.get(1)); return new TJavaObjectType(class_str); } else { throw new RuntimeException("Can not parse encoded string: " + encoded_str); } } /** * Decodes a list (or array) of TType objects that was previously encoded * with the 'asEncodedString(Type[])' method. */ public static TType[] decodeTypes(String encoded_str) { List items = StringUtil.explode(encoded_str, "!|"); // Handle the empty string (no args) if (items.size() == 1) { if (items.get(0).equals("")) { return new TType[0]; } } int sz = items.size(); TType[] return_types = new TType[sz]; for (int i = 0; i < sz; ++i) { String str = (String) items.get(i); return_types[i] = decodeString(str); } return return_types; } // ----- /** * Returns a TBinaryType constrained for the given class. */ public static TType javaObjectType(String class_name) { return new TJavaObjectType(class_name); } /** * Returns a TStringType object of the given size and locale information. * If locale is null then collation is lexicographical. */ public static TType stringType(int sql_type, int size, String locale, int strength, int decomposition) { return new TStringType(sql_type, size, locale, strength, decomposition); } /** * Returns a TNumericType object of the given size and scale. */ public static TType numericType(int sql_type, int size, int scale) { return new TNumericType(sql_type, size, scale); } /** * Returns a TBooleanType object. */ public static TType booleanType(int sql_type) { return new TBooleanType(sql_type); } /** * Returns a TDateType object. */ public static TType dateType(int sql_type) { return new TDateType(sql_type); } /** * Returns a TBinaryType object. */ public static TType binaryType(int sql_type, int size) { return new TBinaryType(sql_type, size); } // ----- /** * Casts the given Java object to the given type. For example, given * a BigNumber object and STRING_TYPE, this would return the number as a * string. */ public static Object castObjectToTType(Object ob, TType type) { // Handle the null case if (ob == null) { return null; } int size = -1; int scale = -1; int sql_type = type.getSQLType(); if (type instanceof TStringType) { size = ((TStringType) type).getMaximumSize(); } else if (type instanceof TNumericType) { TNumericType num_type = (TNumericType) type; size = num_type.getSize(); scale = num_type.getScale(); } else if (type instanceof TBinaryType) { size = ((TBinaryType) type).getMaximumSize(); } ob = CastHelper.castObjectToSQLType(ob, type.getSQLType(), size, scale, DataTableColumnDef.sqlTypeToString(sql_type)); return ob; } /** * Given a java class, this will return a default TType object that can * encapsulate Java objects of this type. For example, given * java.lang.String, this will return a TStringType with no locale and * maximum size. *

* Note that using this method is generally not recommended unless you * really can't determine more type information than from the Java object * itself. */ public static TType fromClass(Class c) { if (c == String.class) { return STRING_TYPE; } else if (c == BigNumber.class) { return NUMERIC_TYPE; } else if (c == java.util.Date.class) { return DATE_TYPE; } else if (c == Boolean.class) { return BOOLEAN_TYPE; } else if (c == ByteLongObject.class) { return BINARY_TYPE; } else { throw new Error("Don't know how to convert " + c + " to a TType."); } } /** * Assuming that the two types are numeric types, this will return the * 'widest' of the two types. For example, an INTEGER is a wider type than a * SHORT, and a FLOAT is wider than an INTEGER. *

* Code by Jim McBeath. */ public static TType getWidestType(TType t1, TType t2) { int t1SQLType = t1.getSQLType(); int t2SQLType = t2.getSQLType(); if (t1SQLType == SQLTypes.DECIMAL) { return t1; } if (t2SQLType == SQLTypes.DECIMAL) { return t2; } if (t1SQLType == SQLTypes.NUMERIC) { return t1; } if (t2SQLType == SQLTypes.NUMERIC) { return t2; } if (t1SQLType == SQLTypes.BIT) { return t2; // It can't be any smaller than a BIT } if (t2SQLType == SQLTypes.BIT) { return t1; } int t1IntSize = getIntSize(t1SQLType); int t2IntSize = getIntSize(t2SQLType); if (t1IntSize > 0 && t2IntSize > 0) { // Both are int types, use the largest size return (t1IntSize > t2IntSize)?t1:t2; } int t1FloatSize = getFloatSize(t1SQLType); int t2FloatSize = getFloatSize(t2SQLType); if (t1FloatSize > 0 && t2FloatSize > 0) { // Both are floating types, use the largest size return (t1FloatSize > t2FloatSize)?t1:t2; } if (t1FloatSize > t2IntSize) { return t1; } if (t2FloatSize > t1IntSize) { return t2; } if (t1IntSize >= t2FloatSize || t2IntSize >= t1FloatSize) { // Must be a long (8 bytes) and a real (4 bytes), widen to a double return new TNumericType(SQLTypes.DOUBLE,8,-1); } // NOTREACHED - can't get here, the last three if statements cover // all possibilities. throw new Error("Widest type error."); } /** * Get the number of bytes used by an integer type. *

* Code by Jim McBeath. * * @param sqlType The SQL type. * @return The number of bytes required for data of that type, or 0 * if not an int type. */ private static int getIntSize(int sqlType) { switch (sqlType) { case SQLTypes.TINYINT: return 1; case SQLTypes.SMALLINT: return 2; case SQLTypes.INTEGER: return 4; case SQLTypes.BIGINT: return 8; default: return 0; } } /** * Get the number of bytes used by a floating type. *

* Code by Jim McBeath. * * @param sqlType The SQL type. * @return The number of bytes required for data of that type, or 0 * if not an int type. */ private static int getFloatSize(int sqlType) { switch (sqlType) { default: return 0; case SQLTypes.REAL: return 4; case SQLTypes.FLOAT: case SQLTypes.DOUBLE: return 8; } } // ------ Useful convenience statics ------ /** * A default boolean (SQL BIT) type. */ public static final TBooleanType BOOLEAN_TYPE = new TBooleanType(SQLTypes.BIT); /** * A default string (SQL VARCHAR) type of unlimited maximum size and null * locale. */ public static final TStringType STRING_TYPE = new TStringType( SQLTypes.VARCHAR, -1, (String) null); /** * A default numeric (SQL NUMERIC) type of unlimited size and scale. */ public static final TNumericType NUMERIC_TYPE = new TNumericType(SQLTypes.NUMERIC, -1, -1); /** * A default date (SQL TIMESTAMP) type. */ public static final TDateType DATE_TYPE = new TDateType(SQLTypes.TIMESTAMP); /** * A default binary (SQL BLOB) type of unlimited maximum size. */ public static final TBinaryType BINARY_TYPE = new TBinaryType(SQLTypes.BLOB, -1); /** * A default NULL type. */ public static final TNullType NULL_TYPE = new TNullType(); /** * A type that represents a query plan (sub-select). */ public static final TQueryPlanType QUERY_PLAN_TYPE = new TQueryPlanType(); /** * A type that represents an array. */ public static final TArrayType ARRAY_TYPE = new TArrayType(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Table.java000066400000000000000000001711161330501023400236160ustar00rootroot00000000000000/** * com.mckoi.database.Table 02 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import com.mckoi.debug.*; //import com.mckoi.database.sql.SelectStatement; // Evaluating sub-selects import java.util.HashMap; import java.util.Map; import java.io.IOException; import java.io.PrintStream; /** * This is a definition for a table in the database. It stores the name of * the table, and the fields (columns) in the table. A table represents either * a 'core' DataTable that directly maps to the information stored in the * database, or a temporary table generated on the fly. *

* It is an abstract class, because it does not implement the methods to add, * remove or access row data in the table. * * @author Tobias Downer */ public abstract class Table implements TableDataSource { // Set to true to output query debugging information. All table operation // commands will be output. protected static boolean DEBUG_QUERY = true; /** * The Constructor. Requires a name and the fields in the table. */ protected Table() { } /** * Returns the Database object that this table is derived from. */ public abstract Database getDatabase(); /** * Returns the TransactionSystem object that this table is part of. */ public final TransactionSystem getSystem() { return getDatabase().getSystem(); } /** * Returns a DebugLogger object that we can use to log debug messages to. */ public DebugLogger Debug() { return getSystem().Debug(); } /** * Returns the number of columns in the table. */ public abstract int getColumnCount(); /** * Returns the number of rows stored in the table. */ public abstract int getRowCount(); /** * Returns a TType object that would represent values at the given * column index. Throws an error if the column can't be found. */ public TType getTTypeForColumn(int column) { return getDataTableDef().columnAt(column).getTType(); } /** * Returns a TType object that would represent values in the given * column. Throws an error if the column can't be found. */ public TType getTTypeForColumn(Variable v) { return getTTypeForColumn(findFieldName(v)); } /** * Given a fully qualified variable field name, ie. 'APP.CUSTOMER.CUSTOMERID' * this will return the column number the field is at. Returns -1 if the * field does not exist in the table. */ public abstract int findFieldName(Variable v); /** * Returns a fully qualified Variable object that represents the name of * the column at the given index. For example, * new Variable(new TableName("APP", "CUSTOMER"), "ID") */ public abstract Variable getResolvedVariable(int column); /** * Returns a SelectableScheme for the given column in the given VirtualTable * row domain. The 'column' variable may be modified as it traverses through * the tables, however the 'original_column' retains the link to the column * in 'table'. */ abstract SelectableScheme getSelectableSchemeFor(int column, int original_column, Table table); /** * Given a set, this trickles down through the Table hierarchy resolving * the given row_set to a form that the given ancestor understands. * Say you give the set { 0, 1, 2, 3, 4, 5, 6 }, this function may check * down three levels and return a new 7 element set with the rows fully * resolved to the given ancestors domain. */ abstract void setToRowTableDomain(int column, IntegerVector row_set, TableDataSource ancestor); /** * Return the list of DataTable and row sets that make up the raw information * in this table. */ abstract RawTableInformation resolveToRawTable(RawTableInformation info); /** * Returns an object that represents the information in the given cell * in the table. This will generally be an expensive algorithm, so calls * to it should be kept to a minimum. Note that the offset between two * rows is not necessarily 1. Use 'rowEnumeration' to get the contents * of a set. */ public abstract TObject getCellContents(int column, int row); /** * Returns an Enumeration of the rows in this table. Each call to * 'RowEnumeration.nextRowIndex()' returns the next valid row in the table. * Note that the order that rows are retreived depend on a number of factors. * For a DataTable the rows are accessed in the order they are in the data * file. For a VirtualTable, the rows are accessed in the order of the last * select operation. *

* If you want the rows to be returned by a specific column order then use * the 'selectxxx' methods. */ public abstract RowEnumeration rowEnumeration(); /** * Returns a DataTableDef object that defines the name of the table and the * layout of the columns of the table. Note that for tables that are joined * with other tables, the table name and schema for this object become * mangled. For example, a table called 'PERSON' joined with a table called * 'MUSIC' becomes a table called 'PERSON#MUSIC' in a null schema. */ public abstract DataTableDef getDataTableDef(); /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ abstract void addDataTableListener(DataTableListener listener); /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ abstract void removeDataTableListener(DataTableListener listener); /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). * NOTE: This is nothing to do with the 'LockingMechanism' object. */ public abstract void lockRoot(int lock_key); /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public abstract void unlockRoot(int lock_key); /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public abstract boolean hasRootsLocked(); // ---------- Implemented from TableDataSource ---------- /** * Returns the SelectableScheme that indexes the given column in this table. */ public SelectableScheme getColumnScheme(int column) { return getSelectableSchemeFor(column, column, this); } // ---------- Convenience methods ---------- /** * Returns the DataTableColumnDef object for the given column index. */ public DataTableColumnDef getColumnDefAt(int col_index) { return getDataTableDef().columnAt(col_index); } /** ======================= Table Operations ========================= */ /** * Dumps the contents of the table in a human readable form to the given * output stream. * This should only be used for debuging the database. */ public final void dumpTo(PrintStream out) throws IOException { DumpHelper.dump(this, out); } /** * Returns a new Table based on this table with no rows in it. */ public final Table emptySelect() { if (getRowCount() == 0) { return this; } else { VirtualTable table = new VirtualTable(this); table.set(this, new IntegerVector(0)); return table; } } /** * Selects a single row at the given index from this table. */ public final Table singleRowSelect(int row_index) { VirtualTable table = new VirtualTable(this); IntegerVector ivec = new IntegerVector(1); ivec.addInt(row_index); table.set(this, ivec); return table; } /** * Returns a Table that is a merge of this table and the destination table. * The rows that are in the destination table are included in this table. * The tables must have */ public final Table columnMerge(Table table) { if (getRowCount() != table.getRowCount()) { throw new Error("Tables have different row counts."); } // Create the new VirtualTable with the joined tables. IntegerVector all_row_set = new IntegerVector(); int rcount = getRowCount(); for (int i = 0; i < rcount; ++i) { all_row_set.addInt(i); } Table[] tabs = new Table[] { this, table }; IntegerVector[] row_sets = new IntegerVector[] { all_row_set, all_row_set }; VirtualTable out_table = new VirtualTable(tabs); out_table.set(tabs, row_sets); return out_table; } // ---------- Queries using Expression class ---------- /** * A single column range select on this table. This can often be solved * very quickly especially if there is an index on the column. The * SelectableRange array represents a set of ranges that are returned that * meet the given criteria. * * @param col_var the column variable in this table (eg. Part.id) * @param ranges the normalized (no overlapping) set of ranges to find. */ public final Table rangeSelect(Variable col_var, SelectableRange[] ranges) { // If this table is empty then there is no range to select so // trivially return this object. if (getRowCount() == 0) { return this; } // Are we selecting a black or null range? if (ranges == null || ranges.length == 0) { // Yes, so return an empty table return emptySelect(); } // Are we selecting the entire range? if (ranges.length == 1 && ranges[0].equals(SelectableRange.FULL_RANGE)) { // Yes, so return this table. return this; } // Must be a non-trivial range selection. // Find the column index of the column selected int column = findFieldName(col_var); if (column == -1) { throw new RuntimeException( "Unable to find the column given to select the range of: " + col_var.getName()); } // Select the range IntegerVector rows; rows = selectRange(column, ranges); // Make a new table with the range selected VirtualTable table = new VirtualTable(this); table.set(this, rows); // We know the new set is ordered by the column. table.optimisedPostSet(column); if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, table + " = " + this + ".rangeSelect(" + col_var + ", " + ranges + " )"); } } return table; } /** * A simple select on this table. We select against a column, with an * Operator and a rhs Expression that is constant (only needs to be * evaluated once). * * @param context the context of the query. * @param lhs_var the left has side column reference. * @param op the operator. * @param rhs the expression to select against (the expression must * be a constant). */ public final Table simpleSelect(QueryContext context, Variable lhs_var, Operator op, Expression rhs) { String DEBUG_SELECT_WITH = null; // Find the row with the name given in the condition. int column = findFieldName(lhs_var); if (column == -1) { throw new RuntimeException( "Unable to find the LHS column specified in the condition: " + lhs_var.getName()); } IntegerVector rows; boolean ordered_by_select_column = false; // If we are doing a sub-query search if (op.isSubQuery()) { // We can only handle constant expressions in the RHS expression, and // we must assume that the RHS is a Expression[] array. Object ob = rhs.last(); if (!(ob instanceof TObject)) { throw new RuntimeException("Sub-query not a TObject"); } TObject tob = (TObject) ob; if (tob.getTType() instanceof TArrayType) { Expression[] list = (Expression[]) tob.getObject(); // Construct a temporary table with a single column that we are // comparing to. TemporaryTable table; DataTableColumnDef col = getColumnDefAt(findFieldName(lhs_var)); DatabaseQueryContext db_context = (DatabaseQueryContext) context; table = new TemporaryTable(db_context.getDatabase(), "single", new DataTableColumnDef[] { col } ); for (int i = 0; i < list.length; ++i) { table.newRow(); table.setRowObject(list[i].evaluate(null, null, context), 0); } table.setupAllSelectableSchemes(); // Perform the any/all sub-query on the constant table. return TableFunctions.anyAllNonCorrelated( this, new Variable[] { lhs_var }, op, table); } else { throw new RuntimeException("Error with format or RHS expression."); } } // If we are doing a LIKE or REGEX pattern search else if (op.is("like") || op.is("not like") || op.is("regex")) { // Evaluate the right hand side. We know rhs is constant so don't // bother passing a VariableResolver object. TObject rhs_const = rhs.evaluate(null, context); if (op.is("regex")) { // Use the regular expression search to determine matching rows. rows = selectFromRegex(column, op, rhs_const); } else { // Use the standard SQL pattern matching engine to determine // matching rows. rows = selectFromPattern(column, op, rhs_const); } // These searches guarentee result is ordered by the column ordered_by_select_column = true; // Describe the 'LIKE' select if (DEBUG_QUERY) { DEBUG_SELECT_WITH = op.toString() + " " + rhs_const; } } // Otherwise, we doing an index based comparison. else { // Is the column we are searching on indexable? DataTableColumnDef col_def = getColumnDefAt(column); if (!col_def.isIndexableType()) { throw new StatementException("Can not search on field type " + col_def.getSQLTypeString() + " in '" + col_def.getName() + "'"); } // Evaluate the right hand side. We know rhs is constant so don't // bother passing a VariableResolver object. TObject rhs_const = rhs.evaluate(null, context); // Get the rows of the selected set that match the given condition. rows = selectRows(column, op, rhs_const); ordered_by_select_column = true; // Describe the select if (DEBUG_QUERY) { DEBUG_SELECT_WITH = op.toString() + " " + rhs_const; } } // We now has a set of rows from this table to make into a // new table. VirtualTable table = new VirtualTable(this); table.set(this, rows); // OPTIMIZATION: Since we know that the 'select' return is ordered by the // LHS column, we can easily generate a SelectableScheme for the given // column. This doesn't work for the non-constant set. if (ordered_by_select_column) { table.optimisedPostSet(column); } if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, table + " = " + this + ".simpleSelect(" + lhs_var + " " + DEBUG_SELECT_WITH + " )"); } } return table; } /** * A simple join operation. A simple join operation is one that has a * single joining operator, a Variable on the lhs and a simple expression on * the rhs that includes only columns in the rhs table. For example, * 'id = part_id' or 'id == part_id * 2' or 'id == part_id + vendor_id * 2' *

* It is important to understand how this algorithm works because all * optimization of the expression must happen before the method starts. *

* The simple join algorithm works as follows: Every row of the right hand * side table 'table' is iterated through. The select opreation is applied * to this table given the result evaluation. Each row that matches is * included in the result table. *

* For optimal performance, the expression should be arranged so that the rhs * table is the smallest of the two tables (because we must iterate through * all rows of this table). This table should be the largest. */ public final Table simpleJoin(QueryContext context, Table table, Variable lhs_var, Operator op, Expression rhs) { // Find the row with the name given in the condition. int lhs_column = findFieldName(lhs_var); if (lhs_column == -1) { throw new RuntimeException( "Unable to find the LHS column specified in the condition: " + lhs_var.toString()); } // Create a variable resolver that can resolve columns in the destination // table. TableVariableResolver resolver = table.getVariableResolver(); // The join algorithm. It steps through the RHS expression, selecting the // cells that match the relation from the LHS table (this table). IntegerVector this_row_set = new IntegerVector(); IntegerVector table_row_set = new IntegerVector(); RowEnumeration e = table.rowEnumeration(); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); resolver.setRow(row_index); // Resolve expression into a constant. TObject rhs_val = rhs.evaluate(resolver, context); // Select all the rows in this table that match the joining condition. IntegerVector selected_set = selectRows(lhs_column, op, rhs_val); // Include in the set. int size = selected_set.size(); for (int i = 0; i < size; ++i) { table_row_set.addInt(row_index); } this_row_set.append(selected_set); } // Create the new VirtualTable with the joined tables. Table[] tabs = new Table[] { this, table }; IntegerVector[] row_sets = new IntegerVector[] { this_row_set, table_row_set }; VirtualTable out_table = new VirtualTable(tabs); out_table.set(tabs, row_sets); if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, out_table + " = " + this + ".simpleJoin(" + table + ", " + lhs_var + ", " + op + ", " + rhs + " )"); } } return out_table; } /** * Exhaustively searches through this table for rows that match the * expression given. This is the slowest type of query and is not able to * use any type of indexing. *

* A QueryContext object is used for resolving sub-query plans. If there * are no sub-query plans in the expression, this can safely be 'null'. */ public final Table exhaustiveSelect(QueryContext context, Expression exp) { Table result = this; // Exit early if there's nothing in the table to select from int row_count = getRowCount(); if (row_count > 0) { TableVariableResolver resolver = getVariableResolver(); RowEnumeration e = rowEnumeration(); IntegerVector selected_set = new IntegerVector(row_count); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); resolver.setRow(row_index); // Resolve expression into a constant. TObject rhs_val = exp.evaluate(resolver, context); // If resolved to true then include in the selected set. if (!rhs_val.isNull() && rhs_val.getTType() instanceof TBooleanType && rhs_val.getObject().equals(Boolean.TRUE)) { selected_set.addInt(row_index); } } // Make into a table to return. VirtualTable table = new VirtualTable(this); table.set(this, selected_set); result = table; } if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, result + " = " + this + ".exhaustiveSelect(" + exp + " )"); } } return result; } /** * Evaluates a non-correlated ANY type operator given the LHS expression, * the RHS subquery and the ANY operator to use. For example; *

   *   Table.col > ANY ( SELECT .... )
   * 

* ANY creates a new table that contains only the rows in this table that * the expression and operator evaluate to true for any values in the * given table. *

* The IN operator can be represented by using '= ANY'. *

* Note that unlike the other join and select methods in this object this * will take a complex expression as the lhs provided all the Variable * objects resolve to this table. * * @param lhs the left has side expression. The Variable objects in this * expression must all reference columns in this table. * @param op the operator to use. * @param right_table the subquery table should only contain on column. * @param context the context of the query. * @returns the result of the ANY function on the table. */ public Table any(QueryContext context, Expression lhs, Operator op, Table right_table) { Table table = right_table; // Check the table only has 1 column if (table.getColumnCount() != 1) { throw new Error("Input table <> 1 columns."); } // Handle trivial case of no entries to select from if (getRowCount() == 0) { return this; } // If 'table' is empty then we return an empty set. ANY { empty set } is // always false. if (table.getRowCount() == 0) { return emptySelect(); } // Is the lhs expression a constant? if (lhs.isConstant()) { // We know lhs is a constant so no point passing arguments, TObject lhs_const = lhs.evaluate(null, context); // Select from the table. IntegerVector ivec = table.selectRows(0, op, lhs_const); if (ivec.size() > 0) { // There's some entries so return the whole table, return this; } // No entries matches so return an empty table. return emptySelect(); } Table source_table; int lhs_col_index; // Is the lhs expression a single variable? Variable lhs_var = lhs.getVariable(); // NOTE: It'll be less common for this part to be called. if (lhs_var == null) { // This is a complex expression so make a FunctionTable as our new // source. DatabaseQueryContext db_context = (DatabaseQueryContext) context; FunctionTable fun_table = new FunctionTable( this, new Expression[] { lhs }, new String[] { "1" }, db_context); source_table = fun_table; lhs_col_index = 0; } else { // The expression is an easy to resolve reference in this table. source_table = this; lhs_col_index = source_table.findFieldName(lhs_var); if (lhs_col_index == -1) { throw new Error("Can't find column '" + lhs_var + "'."); } } // Check that the first column of 'table' is of a compatible type with // source table column (lhs_col_index). // ISSUE: Should we convert to the correct type via a FunctionTable? DataTableColumnDef source_col = source_table.getColumnDefAt(lhs_col_index); DataTableColumnDef dest_col = table.getColumnDefAt(0); if (!source_col.getTType().comparableTypes(dest_col.getTType())) { throw new Error("The type of the sub-query expression " + source_col.getSQLTypeString() + " is incompatible " + "with the sub-query " + dest_col.getSQLTypeString() + "."); } // We now have all the information to solve this query. // We work out as follows: // For >, >= type ANY we find the lowest value in 'table' and // select from 'source' all the rows that are >, >= than the // lowest value. // For <, <= type ANY we find the highest value in 'table' and // select from 'source' all the rows that are <, <= than the // highest value. // For = type ANY we use same method from INHelper. // For <> type ANY we iterate through 'source' only including those // rows that a <> query on 'table' returns size() != 0. IntegerVector select_vec; if (op.is(">") || op.is(">=")) { // Select the first from the set (the lowest value), TObject lowest_cell = table.getFirstCellContent(0); // Select from the source table all rows that are > or >= to the // lowest cell, select_vec = source_table.selectRows(lhs_col_index, op, lowest_cell); } else if (op.is("<") || op.is("<=")) { // Select the last from the set (the highest value), TObject highest_cell = table.getLastCellContent(0); // Select from the source table all rows that are < or <= to the // highest cell, select_vec = source_table.selectRows(lhs_col_index, op, highest_cell); } else if (op.is("=")) { // Equiv. to IN select_vec = INHelper.in(source_table, table, lhs_col_index, 0); } else if (op.is("<>")) { // Select the value that is the same of the entire column TObject cell = table.getSingleCellContent(0); if (cell != null) { // All values from 'source_table' that are <> than the given cell. select_vec = source_table.selectRows(lhs_col_index, op, cell); } else { // No, this means there are different values in the given set so the // query evaluates to the entire table. return this; } } else { throw new Error("Don't understand operator '" + op + "' in ANY."); } // Make into a table to return. VirtualTable rtable = new VirtualTable(this); rtable.set(this, select_vec); // Query logging information if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, rtable + " = " + this + ".any(" + lhs + ", " + op + ", " + right_table + ")"); } return rtable; } /** * Evaluates a non-correlated ALL type operator given the LHS expression, * the RHS subquery and the ALL operator to use. For example; *

   *   Table.col > ALL ( SELECT .... )
   * 

* ALL creates a new table that contains only the rows in this table that * the expression and operator evaluate to true for all values in the * giventable. *

* The NOT IN operator can be represented by using '<> ALL'. *

* Note that unlike the other join and select methods in this object this * will take a complex expression as the lhs provided all the Variable * objects resolve to this table. * * @param lhs the left has side expression. The Variable objects in this * expression must all reference columns in this table. * @param op the operator to use. * @param table The subquery table should only contain on column. * @param context The context of the query. * @returns the result of the ALL function on the table. */ public Table all(QueryContext context, Expression lhs, Operator op, Table table) { // Check the table only has 1 column if (table.getColumnCount() != 1) { throw new Error("Input table <> 1 columns."); } // Handle trivial case of no entries to select from if (getRowCount() == 0) { return this; } // If 'table' is empty then we return the complete set. ALL { empty set } // is always true. if (table.getRowCount() == 0) { return this; } // Is the lhs expression a constant? if (lhs.isConstant()) { // We know lhs is a constant so no point passing arguments, TObject lhs_const = lhs.evaluate(null, context); boolean compared_to_true; // The various operators if (op.is(">") || op.is(">=")) { // Find the maximum value in the table TObject cell = table.getLastCellContent(0); compared_to_true = compareCells(lhs_const, cell, op); } else if (op.is("<") || op.is("<=")) { // Find the minimum value in the table TObject cell = table.getFirstCellContent(0); compared_to_true = compareCells(lhs_const, cell, op); } else if (op.is("=")) { // Only true if rhs is a single value TObject cell = table.getSingleCellContent(0); compared_to_true = (cell != null && compareCells(lhs_const, cell, op)); } else if (op.is("<>")) { // true only if lhs_cell is not found in column. compared_to_true = !table.columnContainsCell(0, lhs_const); } else { throw new Error("Don't understand operator '" + op + "' in ALL."); } // If matched return this table if (compared_to_true) { return this; } // No entries matches so return an empty table. return emptySelect(); } Table source_table; int lhs_col_index; // Is the lhs expression a single variable? Variable lhs_var = lhs.getVariable(); // NOTE: It'll be less common for this part to be called. if (lhs_var == null) { // This is a complex expression so make a FunctionTable as our new // source. DatabaseQueryContext db_context = (DatabaseQueryContext) context; FunctionTable fun_table = new FunctionTable( this, new Expression[] { lhs }, new String[] { "1" }, db_context); source_table = fun_table; lhs_col_index = 0; } else { // The expression is an easy to resolve reference in this table. source_table = this; lhs_col_index = source_table.findFieldName(lhs_var); if (lhs_col_index == -1) { throw new Error("Can't find column '" + lhs_var + "'."); } } // Check that the first column of 'table' is of a compatible type with // source table column (lhs_col_index). // ISSUE: Should we convert to the correct type via a FunctionTable? DataTableColumnDef source_col = source_table.getColumnDefAt(lhs_col_index); DataTableColumnDef dest_col = table.getColumnDefAt(0); if (!source_col.getTType().comparableTypes(dest_col.getTType())) { throw new Error("The type of the sub-query expression " + source_col.getSQLTypeString() + " is incompatible " + "with the sub-query " + dest_col.getSQLTypeString() + "."); } // We now have all the information to solve this query. // We work out as follows: // For >, >= type ALL we find the highest value in 'table' and // select from 'source' all the rows that are >, >= than the // highest value. // For <, <= type ALL we find the lowest value in 'table' and // select from 'source' all the rows that are <, <= than the // lowest value. // For = type ALL we see if 'table' contains a single value. If it // does we select all from 'source' that equals the value, otherwise an // empty table. // For <> type ALL we use the 'not in' algorithm. IntegerVector select_vec; if (op.is(">") || op.is(">=")) { // Select the last from the set (the highest value), TObject highest_cell = table.getLastCellContent(0); // Select from the source table all rows that are > or >= to the // highest cell, select_vec = source_table.selectRows(lhs_col_index, op, highest_cell); } else if (op.is("<") || op.is("<=")) { // Select the first from the set (the lowest value), TObject lowest_cell = table.getFirstCellContent(0); // Select from the source table all rows that are < or <= to the // lowest cell, select_vec = source_table.selectRows(lhs_col_index, op, lowest_cell); } else if (op.is("=")) { // Select the single value from the set (if there is one). TObject single_cell = table.getSingleCellContent(0); if (single_cell != null) { // Select all from source_table all values that = this cell select_vec = source_table.selectRows(lhs_col_index, op, single_cell); } else { // No single value so return empty set (no value in LHS will equal // a value in RHS). return emptySelect(); } } else if (op.is("<>")) { // Equiv. to NOT IN select_vec = INHelper.notIn(source_table, table, lhs_col_index, 0); } else { throw new Error("Don't understand operator '" + op + "' in ALL."); } // Make into a table to return. VirtualTable rtable = new VirtualTable(this); rtable.set(this, select_vec); // Query logging information if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, rtable + " = " + this + ".all(" + lhs + ", " + op + ", " + table + ")"); } return rtable; } // ---------- The original table functions ---------- /** * Performs a natural join of this table with the given table. This is * the same as calling the above 'join' with no conditional. */ public final Table join(Table table) { boolean QUICK_NAT_JOIN = true; Table out_table; if (QUICK_NAT_JOIN) { // This implementation doesn't materialize the join out_table = new NaturallyJoinedTable(this, table); } else { Table[] tabs = new Table[2]; tabs[0] = this; tabs[1] = table; IntegerVector[] row_sets = new IntegerVector[2]; // Optimized trivial case, if either table has zero rows then result of // join will contain zero rows also. if (getRowCount() == 0 || table.getRowCount() == 0) { row_sets[0] = new IntegerVector(0); row_sets[1] = new IntegerVector(0); } else { // The natural join algorithm. IntegerVector this_row_set = new IntegerVector(); IntegerVector table_row_set = new IntegerVector(); // Get the set of all rows in the given table. IntegerVector table_selected_set = new IntegerVector(); RowEnumeration e = table.rowEnumeration(); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); table_selected_set.addInt(row_index); } int table_selected_set_size = table_selected_set.size(); // Join with the set of rows in this table. e = rowEnumeration(); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); for (int i = 0; i < table_selected_set_size; ++i) { this_row_set.addInt(row_index); } table_row_set.append(table_selected_set); } // The row sets we are joining from each table. row_sets[0] = this_row_set; row_sets[1] = table_row_set; } // Create the new VirtualTable with the joined tables. VirtualTable virt_table = new VirtualTable(tabs); virt_table.set(tabs, row_sets); out_table = virt_table; } if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, out_table + " = " + this + ".naturalJoin(" + table + " )"); } } return out_table; } /** * Finds all rows in this table that are 'outside' the result in the * given table. This is used in OUTER JOIN's. We perform a normal join, * then determine unmatched joins with this function. We can then create * an OuterTable with this result to make the completed table. *

* 'rtable' must be a decendent of this table. */ public final VirtualTable outside(Table rtable) { // Form the row list for right hand table, IntegerVector row_list = new IntegerVector(rtable.getRowCount()); RowEnumeration e = rtable.rowEnumeration(); while (e.hasMoreRows()) { row_list.addInt(e.nextRowIndex()); } int col_index = rtable.findFieldName(getResolvedVariable(0)); rtable.setToRowTableDomain(col_index, row_list, this); // This row set IntegerVector this_table_set = new IntegerVector(getRowCount()); e = rowEnumeration(); while (e.hasMoreRows()) { this_table_set.addInt(e.nextRowIndex()); } // 'row_list' is now the rows in this table that are in 'rtable'. // Sort both 'this_table_set' and 'row_list' this_table_set.quickSort(); row_list.quickSort(); // Find all rows that are in 'this_table_set' and not in 'row_list' IntegerVector result_list = new IntegerVector(96); int size = this_table_set.size(); int row_list_index = 0; int row_list_size = row_list.size(); for (int i = 0; i < size; ++i) { int this_val = this_table_set.intAt(i); if (row_list_index < row_list_size) { int in_val = row_list.intAt(row_list_index); if (this_val < in_val) { result_list.addInt(this_val); } else if (this_val == in_val) { while (row_list_index < row_list_size && row_list.intAt(row_list_index) == in_val) { ++row_list_index; } } else { throw new Error("'this_val' > 'in_val'"); } } else { result_list.addInt(this_val); } } // Return the new VirtualTable VirtualTable table = new VirtualTable(this); table.set(this, result_list); return table; } /** * Returns a new Table that is the union of the this table and the given * table. A union operation will remove any duplicate rows. */ public final Table union(Table table) { // Optimizations - handle trivial case of row count in one of the tables // being 0. // NOTE: This optimization assumes this table and the unioned table are // of the same type. if ((getRowCount() == 0 && table.getRowCount() == 0) || table.getRowCount() == 0) { if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, this + " = " + this + ".union(" + table + " )"); } } return this; } else if (getRowCount() == 0) { if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, table + " = " + this + ".union(" + table + " )"); } } return table; } // First we merge this table with the input table. RawTableInformation raw1 = resolveToRawTable(new RawTableInformation()); RawTableInformation raw2 = table.resolveToRawTable(new RawTableInformation()); // DIAGNOSTIC // IntegerVector[] rows1 = raw1.getRows(); // IntegerVector[] rows2 = raw2.getRows(); // System.out.println(rows1.length); // System.out.println(rows1[0]); // System.out.println(rows2.length); // System.out.println(rows2[0]); // System.out.println(raw1.getTables()[0]); // System.out.println(raw2.getTables()[0]); // This will throw an exception if the table types do not match up. raw1.union(raw2); // DIAGNOSTIC // System.out.println("---"); // rows1 = raw1.getRows(); // System.out.println(rows1.length); // System.out.println(rows1[0]); // System.out.println(raw1.getTables()[0]); // System.out.println("--end--"); // Now 'raw1' contains a list of uniquely merged rows (ie. the union). // Now make it into a new table and return the information. Table[] table_list = raw1.getTables(); VirtualTable table_out = new VirtualTable(table_list); table_out.set(table_list, raw1.getRows()); // DIAGNOSTIC // RowEnumeration renum = table_out.rowEnumeration(); // while (renum.hasMoreRows()) { // int rindex = renum.nextRowIndex(); // System.out.println(table_out.getCellContents(0, rindex)); // } if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, table_out + " = " + this + ".union(" + table + " )"); } } return table_out; } /** * Returns a new table with any duplicate rows in this table removed. * * @deprecated - not a proper SQL distinct. */ public final VirtualTable distinct() { RawTableInformation raw = resolveToRawTable(new RawTableInformation()); raw.removeDuplicates(); Table[] table_list = raw.getTables(); VirtualTable table_out = new VirtualTable(table_list); table_out.set(table_list, raw.getRows()); if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, table_out + " = " + this + ".distinct()"); } } return table_out; } /** * Returns a new table that has only distinct rows in it. This is an * expensive operation. We sort over all the columns, then iterate through * the result taking out any duplicate rows. *

* The int array contains the columns to make distinct over. *

* NOTE: This will change the order of this table in the result. */ public final Table distinct(int[] col_map) { IntegerVector result_list = new IntegerVector(); IntegerVector row_list = orderedRowList(col_map); int r_count = row_list.size(); int previous_row = -1; for (int i = 0; i < r_count; ++i) { int row_index = row_list.intAt(i); if (previous_row != -1) { boolean equal = true; // Compare cell in column in this row with previous row. for (int n = 0; n < col_map.length && equal; ++n) { TObject c1 = getCellContents(col_map[n], row_index); TObject c2 = getCellContents(col_map[n], previous_row); equal = equal && (c1.compareTo(c2) == 0); } if (!equal) { result_list.addInt(row_index); } } else { result_list.addInt(row_index); } previous_row = row_index; } // Return the new table with distinct rows only. VirtualTable vt = new VirtualTable(this); vt.set(this, result_list); if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, vt + " = " + this + ".distinct(" + col_map + ")"); } } return vt; } /** * Helper function. Returns the index in the String array of the given * string value. */ private final int indexStringArray(String val, String[] array) { for (int n = 0; n < array.length; ++n) { if (array[n].equals(val)) { return n; } } return -1; } /** * Returns true if the given column number contains the value given. */ public final boolean columnContainsValue(int column, TObject ob) { return columnMatchesValue(column, Operator.get("="), ob); } /** * Returns true if the given column contains a value that the given * operator returns true for with the given value. */ public final boolean columnMatchesValue(int column, Operator op, TObject ob) { IntegerVector ivec = selectRows(column, op, ob); return (ivec.size() > 0); } /** * Returns true if the given column contains all values that the given * operator returns true for with the given value. */ public final boolean allColumnMatchesValue(int column, Operator op, TObject ob) { IntegerVector ivec = selectRows(column, op, ob); return (ivec.size() == getRowCount()); } /** * Returns a table that is ordered by the given column numbers. This * can be used by various functions from grouping to distinction to * ordering. Always sorted by ascending. */ public final Table orderByColumns(int[] col_map) { // Sort by the column list. Table work = this; for (int i = col_map.length - 1; i >= 0; --i) { work = work.orderByColumn(col_map[i], true); } // A nice post condition to check on. if (getRowCount() != work.getRowCount()) { throw new Error("Internal Error, row count != sorted row count"); } return work; } /** * Returns an IntegerVector that represents the list of rows in this * table in sorted order by the given column map. */ public final IntegerVector orderedRowList(int[] col_map) { Table work = orderByColumns(col_map); // 'work' is now sorted by the columns, // Get the rows in this tables domain, int r_count = getRowCount(); IntegerVector row_list = new IntegerVector(r_count); RowEnumeration e = work.rowEnumeration(); while (e.hasMoreRows()) { row_list.addInt(e.nextRowIndex()); } work.setToRowTableDomain(0, row_list, this); return row_list; } /** * Returns a Table which is identical to this table, except it is sorted by * the given column name. This means that if you access the rows * sequentually you will be reading the sorted order of the column. */ public final VirtualTable orderByColumn(int col_index, boolean ascending) { // Check the field can be sorted DataTableColumnDef col_def = getColumnDefAt(col_index); IntegerVector rows = selectAll(col_index); // Reverse the list if we are not ascending if (ascending == false) { rows.reverse(); } // We now has an int[] array of rows from this table to make into a // new table. VirtualTable table = new VirtualTable(this); table.set(this, rows); if (DEBUG_QUERY) { if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, table + " = " + this + ".orderByColumn(" + col_index + ", " + ascending + ")"); } } return table; } public final VirtualTable orderByColumn(Variable column, boolean ascending) { int col_index = findFieldName(column); if (col_index == -1) { throw new Error("Unknown column in 'orderByColumn' ( " + column + " )"); } return orderByColumn(col_index, ascending); } public final VirtualTable orderByColumn(Variable column) { return orderByColumn(column, true); } /** * This returns an object that can only access the cells that are in this * table, and has no other access to the 'Table' class's functionality. The * purpose of this object is to provide a clean way to access the state of a * table without being able to access any of the row sorting * (SelectableScheme) methods that would return incorrect information in the * situation where the table locks (via LockingMechanism) were removed. * NOTE: The methods in this class will only work if this table has its * rows locked via the 'lockRoot(int)' method. */ public final TableAccessState getTableAccessState() { return new TableAccessState(this); } /** * Returns a set that respresents the list of multi-column row numbers * selected from the table given the condition. *

* NOTE: This can be used to exploit multi-column indexes if they exist. */ final IntegerVector selectRows(int[] cols, Operator op, TObject[] cells) { // PENDING: Look for an multi-column index to make this a lot faster, if (cols.length > 1) { throw new Error("Multi-column select not supported."); } return selectRows(cols[0], op, cells[0]); } /** * Returns a set that represents the list of row numbers selected from the * table given the condition. */ final IntegerVector selectRows(int column, Operator op, TObject cell) { // If the cell is of an incompatible type, return no results, TType col_type = getTTypeForColumn(column); if (!cell.getTType().comparableTypes(col_type)) { // Types not comparable, so return 0 return new IntegerVector(0); } // Get the selectable scheme for this column SelectableScheme ss = getSelectableSchemeFor(column, column, this); // If the operator is a standard operator, use the interned SelectableScheme // methods. if (op.is("=")) { return ss.selectEqual(cell); } else if (op.is("<>")) { return ss.selectNotEqual(cell); } else if (op.is(">")) { return ss.selectGreater(cell); } else if (op.is("<")) { return ss.selectLess(cell); } else if (op.is(">=")) { return ss.selectGreaterOrEqual(cell); } else if (op.is("<=")) { return ss.selectLessOrEqual(cell); } // If it's not a standard operator (such as IS, NOT IS, etc) we generate the // range set especially. SelectableRangeSet range_set = new SelectableRangeSet(); range_set.intersect(op, cell); return ss.selectRange(range_set.toSelectableRangeArray()); } /** * Selects the rows in a table column between two minimum and maximum bounds. * This is all rows which are >= min_cell and < max_cell. *

* NOTE: The returns IntegerVector _must_ be sorted be the 'column' cells. */ IntegerVector selectRows(int column, TObject min_cell, TObject max_cell) { // Check all the tables are comparable TType col_type = getTTypeForColumn(column); if (!min_cell.getTType().comparableTypes(col_type) || !max_cell.getTType().comparableTypes(col_type)) { // Types not comparable, so return 0 return new IntegerVector(0); } SelectableScheme ss = getSelectableSchemeFor(column, column, this); return ss.selectBetween(min_cell, max_cell); } /** * Selects all the rows where the given column matches the regular * expression. This uses the static class 'PatternSearch' to perform the * operation. *

* This method must guarentee the result is ordered by the given column. */ final IntegerVector selectFromRegex(int column, Operator op, TObject ob) { if (ob.isNull()) { return new IntegerVector(0); } return PatternSearch.regexSearch(this, column, ob.getObject().toString()); } /** * Selects all the rows where the given column matches the given pattern. * This uses the static class 'PatternSearch' to perform these operations. * 'operation' will be either Condition.LIKE or Condition.NOT_LIKE. * NOTE: The returns IntegerVector _must_ be sorted be the 'column' cells. */ final IntegerVector selectFromPattern(int column, Operator op, TObject ob) { if (ob.isNull()) { return new IntegerVector(); } if (op.is("not like")) { // How this works: // Find the set or rows that are like the pattern. // Find the complete set of rows in the column. // Sort the 'like' rows // For each row that is in the original set and not in the like set, // add to the result list. // Result is the set of not like rows ordered by the column. IntegerVector like_set = PatternSearch.search(this, column, ob.toString()); // Don't include NULL values TObject null_cell = new TObject(ob.getTType(), null); IntegerVector original_set = selectRows(column, Operator.get("is not"), null_cell); int vec_size = Math.max(4, (original_set.size() - like_set.size()) + 4); IntegerVector result_set = new IntegerVector(vec_size); like_set.quickSort(); int size = original_set.size(); for (int i = 0; i < size; ++i) { int val = original_set.intAt(i); // If val not in like set, add to result if (like_set.sortedIntCount(val) == 0) { result_set.addInt(val); } } return result_set; } else { // if (op.is("like")) { return PatternSearch.search(this, column, ob.toString()); } } /** * Given a table and column (from this table), this returns all the rows * from this table that are also in the first column of the given table. * This is the basis of a fast 'in' process. */ final IntegerVector allRowsIn(int column, Table table) { IntegerVector iv = INHelper.in(this, table, column, 0); return iv; } /** * Given a table and column (from this table), this returns all the rows * from this table that are not in the first column of the given table. * This is the basis of a fast 'not in' process. */ final IntegerVector allRowsNotIn(int column, Table table) { return INHelper.notIn(this, table, column, 0); } /** * Returns an array that represents the sorted order of this table by * the given column number. */ public final IntegerVector selectAll(int column) { SelectableScheme ss = getSelectableSchemeFor(column, column, this); return ss.selectAll(); } /** * Returns a list of rows that represents the enumerator order of this * table. */ public final IntegerVector selectAll() { IntegerVector list = new IntegerVector(getRowCount()); RowEnumeration en = rowEnumeration(); while (en.hasMoreRows()) { list.addInt(en.nextRowIndex()); } return list; } /** * Returns an array that represents the sorted order of this table of all * values in the given SelectableRange objects of the given column index. * If there is an index on the column, the result can be found very quickly. * The range array must be normalized (no overlapping ranges). */ public final IntegerVector selectRange(int column, SelectableRange[] ranges) { SelectableScheme ss = getSelectableSchemeFor(column, column, this); return ss.selectRange(ranges); } /** * Returns an array that represents the last sorted element(s) of the given * column number. */ public final IntegerVector selectLast(int column) { SelectableScheme ss = getSelectableSchemeFor(column, column, this); return ss.selectLast(); } /** * Returns an array that represents the first sorted element(s) of the given * column number. */ public final IntegerVector selectFirst(int column) { SelectableScheme ss = getSelectableSchemeFor(column, column, this); return ss.selectFirst(); } /** * Returns an array that represents the rest of the sorted element(s) of the * given column number. (not the 'first' set). */ public final IntegerVector selectRest(int column) { SelectableScheme ss = getSelectableSchemeFor(column, column, this); return ss.selectNotFirst(); } /** * Convenience, returns a TObject[] array given a single TObject, or * null if the TObject is null (not if TObject represents a null value). */ private TObject[] singleArrayCellMap(TObject cell) { return cell == null ? null : new TObject[] { cell }; } /** * Returns the TObject value that represents the first item in the set or * null if there are no items in the column set. */ public final TObject getFirstCellContent(int column) { IntegerVector ivec = selectFirst(column); if (ivec.size() > 0) { return getCellContents(column, ivec.intAt(0)); } return null; } /** * Returns the TObject value that represents the first item in the set or * null if there are no items in the column set. */ public final TObject[] getFirstCellContent(int[] col_map) { if (col_map.length > 1) { throw new Error("Multi-column getLastCellContent not supported."); } return singleArrayCellMap(getFirstCellContent(col_map[0])); } /** * Returns the TObject value that represents the last item in the set or * null if there are no items in the column set. */ public final TObject getLastCellContent(int column) { IntegerVector ivec = selectLast(column); if (ivec.size() > 0) { return getCellContents(column, ivec.intAt(0)); } return null; } /** * Returns the TObject value that represents the last item in the set or * null if there are no items in the column set. */ public final TObject[] getLastCellContent(int[] col_map) { if (col_map.length > 1) { throw new Error("Multi-column getLastCellContent not supported."); } return singleArrayCellMap(getLastCellContent(col_map[0])); } /** * If the given column contains all items of the same value, this method * returns the value. If it doesn't, or the column set is empty it returns * null. */ public final TObject getSingleCellContent(int column) { IntegerVector ivec = selectFirst(column); int sz = ivec.size(); if (sz == getRowCount() && sz > 0) { return getCellContents(column, ivec.intAt(0)); } return null; } /** * If the given column contains all items of the same value, this method * returns the value. If it doesn't, or the column set is empty it returns * null. */ public final TObject[] getSingleCellContent(int[] col_map) { if (col_map.length > 1) { throw new Error("Multi-column getSingleCellContent not supported."); } return singleArrayCellMap(getSingleCellContent(col_map[0])); } /** * Returns true if the given cell is found in the table. */ public final boolean columnContainsCell(int column, TObject cell) { IntegerVector ivec = selectRows(column, Operator.get("="), cell); return ivec.size() > 0; } /** * Compares cell1 with cell2 and if the given operator evalutes to true then * returns true, otherwise false. */ public static boolean compareCells( TObject ob1, TObject ob2, Operator op) { TObject result = op.eval(ob1, ob2, null, null, null); // NOTE: This will be a NullPointerException if the result is not a // boolean type. return result.toBoolean().booleanValue(); } /** * Assuming this table is a 2 column key/value table, and the first column * is a string, this will convert it into a map. The returned map can * then be used to access values in the second column. */ public Map toMap() { if (getColumnCount() == 2) { HashMap map = new HashMap(); RowEnumeration en = rowEnumeration(); while (en.hasMoreRows()) { int row_index = en.nextRowIndex(); TObject key = getCellContents(0, row_index); TObject value = getCellContents(1, row_index); map.put(key.getObject().toString(), value.getObject()); } return map; } else { throw new Error("Table must have two columns."); } } // Stores col name -> col index lookups private HashMap col_name_lookup; private Object COL_LOOKUP_LOCK = new Object(); /** * A faster way to find a column index given a string column name. This * caches column name -> column index in a HashMap. */ public final int fastFindFieldName(Variable col) { synchronized (COL_LOOKUP_LOCK) { if (col_name_lookup == null) { col_name_lookup = new HashMap(30); } Object ob = col_name_lookup.get(col); if (ob == null) { int ci = findFieldName(col); col_name_lookup.put(col, new Integer(ci)); return ci; } else { return ((Integer) ob).intValue(); } } } /** * Returns a TableVariableResolver object for this table. */ final TableVariableResolver getVariableResolver() { return new TableVariableResolver(); } // ---------- Inner classes ---------- /** * An implementation of VariableResolver that we can use to resolve column * names in this table to cells for a specific row. */ final class TableVariableResolver implements VariableResolver { private int row_index = -1; public void setRow(int row_index) { this.row_index = row_index; } private int findColumnName(Variable variable) { int col_index = fastFindFieldName(variable); if (col_index == -1) { throw new Error("Can't find column: " + variable); } return col_index; } // --- Implemented --- public int setID() { return row_index; } public TObject resolve(Variable variable) { return getCellContents(findColumnName(variable), row_index); } public TType returnTType(Variable variable) { return getTTypeForColumn(variable); } } /** * Returns a string that represents this table. */ public String toString() { String name = "VT" + hashCode(); if (this instanceof AbstractDataTable) { name = ((AbstractDataTable) this).getTableName().toString(); } return name + "[" + getRowCount() + "]"; } /** * Prints a graph of the table hierarchy to the stream. */ public void printGraph(PrintStream out, int indent) { for (int i = 0; i < indent; ++i) { out.print(' '); } out.println("T[" + getClass() + "]"); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableAccessState.java000066400000000000000000000067711330501023400257450ustar00rootroot00000000000000/** * com.mckoi.database.TableAccessState 13 Sep 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * This class provides very limited access to a Table object. The purpose of * this object is to define the functionality of a table when the root table(s) * are locked via the 'Table.lockRoot(int)' method, and when the Table is no * longer READ or WRITE locked via the 'LockingMechanism' system. During these * conditions, the table is in a semi-volatile state, so this class provides * a safe way to access the table without having to worry about using some * functionality of Table which isn't supported at this time. * * @author Tobias Downer */ public final class TableAccessState { /** * The underlying Table object. */ private Table table; /** * Set to true when the table is first locked. */ private boolean been_locked; /** * The Constructor. */ TableAccessState(Table table) { this.table = table; been_locked = false; } /** * Returns the cell at the given row/column coordinates in the table. * This method is valid because it doesn't use any of the SelectableScheme * information in any of its parent tables which could change at any time * when there is no READ or WRITE lock on the table. */ public TObject getCellContents(int column, int row) { return table.getCellContents(column, row); } /** * Returns the DataTableDef object that contains information on the columns * of the table. */ public DataTableDef getDataTableDef() { return table.getDataTableDef(); } /** * Returns the TableName of the given column of this table. This, together * with 'getDataTableDef' is used to find the fully qualified name of a * column of the table. */ public Variable getResolvedVariable(int column) { return table.getResolvedVariable(column); } // /** // * Returns the TableField object of the given column. // * This information is constant per table. // */ // public TableField getFieldAt(int column) { // return table.getFieldAt(column); // } // /** // * Returns a fully resolved name of the given column. // */ // public String getResolvedColumnName(int column) { // return table.getResolvedColumnName(column); // } /** * Locks the root rows of the table. * This method is a bit of a HACK - why should the contract include being * able to lock the root rows? * This method only permits the roots to be locked once. */ public void lockRoot(int key) { if (!been_locked) { table.lockRoot(key); been_locked = true; } } /** * Unlocks the root rows of the table. */ public void unlockRoot(int key) { if (been_locked) { // && table.hasRootsLocked()) { table.unlockRoot(key); been_locked = false; } else { throw new RuntimeException("The root rows aren't locked."); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableBackedCache.java000066400000000000000000000143151330501023400256310ustar00rootroot00000000000000/** * com.mckoi.database.TableBackedCache 12 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.Cache; import com.mckoi.util.IntegerVector; /** * A TableBackedCache is a special type of a cache in a DataTableConglomerate * that is backed by a table in the database. The purpose of this object is to * provide efficient access to some specific information in a table via a * cache. *

* For example, we use this object to provide cached access to the system * privilege tables. The engine often performs identical types of priv * queries on the database and it's desirable to cache the access to this * table. *

* This class provides the following services; * 1) Allows for an instance of this object to be attached to a single * DatabaseConnection * 2) Listens for any changes that are committed to the table(s) and flushes the * cache as neccessary. *

* Note that this object is designed to fit into the pure serializable * transaction isolation system that Mckoi employs. This object will provide * a view of the table as it was when the transaction started. When the * transaction commits (or rollsback) the view is updated to the most current * version. If a change is committed to the tables this cache is backed by, * the cache is only flushed when there are no open transactions on the * connection. * * @author Tobias Downer */ abstract class TableBackedCache { /** * The table that this cache is backed by. */ private TableName backed_by_table; /** * The list of added rows to the table above when a change is * committed. */ private IntegerVector added_list; /** * The list of removed rows from the table above when a change is * committed. */ private IntegerVector removed_list; /** * Set to true when the backing DatabaseConnection has a transaction open. */ private boolean transaction_active; /** * The listener object. */ private TransactionModificationListener listener; /** * Constructs this object. */ protected TableBackedCache(TableName table) { this.backed_by_table = table; added_list = new IntegerVector(); removed_list = new IntegerVector(); } /** * Adds new row ids to the given list. */ private void addRowsToList(int[] from, IntegerVector list) { if (from != null) { for (int i = 0; i < from.length; ++i) { list.addInt(from[i]); } } } /** * Attaches this object to a conglomerate. This applies the appropriate * listeners to the tables. */ final void attachTo(TableDataConglomerate conglomerate) { // TableDataConglomerate conglomerate = connection.getConglomerate(); TableName table_name = backed_by_table; listener = new TransactionModificationListener() { public void tableChange(TableModificationEvent evt) { // Ignore. } public void tableCommitChange(TableCommitModificationEvent evt) { TableName table_name = evt.getTableName(); if (table_name.equals(backed_by_table)) { synchronized (removed_list) { addRowsToList(evt.getAddedRows(), added_list); addRowsToList(evt.getRemovedRows(), removed_list); } } } }; conglomerate.addTransactionModificationListener(table_name, listener); } /** * Call to detach this object from a TableDataConglomerate. */ final void detatchFrom(TableDataConglomerate conglomerate) { // TableDataConglomerate conglomerate = connection.getConglomerate(); TableName table_name = backed_by_table; conglomerate.removeTransactionModificationListener(table_name, listener); } /** * Called from DatabaseConnection to notify this object that a new transaction * has been started. When a transaction has started, any committed changes * to the table must NOT be immediately reflected in this cache. Only * when the transaction commits is there a possibility of the cache * information being incorrect. */ final void transactionStarted() { transaction_active = true; internalPurgeCache(); } /** * Called from DatabaseConnection to notify that object that a transaction * has closed. When a transaction is closed, information in the cache may * be invalidated. For example, if rows 10 - 50 were delete then any * information in the cache that touches this data must be flushed from the * cache. */ final void transactionFinished() { transaction_active = false; internalPurgeCache(); } /** * Internal method which copies the 'added' and 'removed' row lists and * calls the 'purgeCacheOfInvalidatedEntries' method. */ private void internalPurgeCache() { // Make copies of the added_list and removed_list IntegerVector add, remove; synchronized (removed_list) { add = new IntegerVector(added_list); remove = new IntegerVector(removed_list); // Clear the added and removed list added_list.clear(); removed_list.clear(); } // Make changes to the cache purgeCacheOfInvalidatedEntries(add, remove); } /** * This method is called when the transaction starts and finishes and must * purge the cache of all invalidated entries. *

* Note that this method must NOT make any queries on the database. It must * only, at the most, purge the cache of invalid entries. A trivial * implementation of this might completely clear the cache of all data if * removed_row.size() > 0. */ abstract void purgeCacheOfInvalidatedEntries( IntegerVector added_rows, IntegerVector removed_rows); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableCommitModificationEvent.java000066400000000000000000000060611330501023400303130ustar00rootroot00000000000000/** * com.mckoi.database.TableCommitModificationEvent 25 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * An object that encapsulates all row modification information about a table * when a change to the table is about to be committed. The object provides * information about what rows in the table were changed * (inserted/updated/deleted). * * @author Tobias Downer */ public class TableCommitModificationEvent { /** * A SimpleTransaction that can be used to query tables in the database - * the view of which will be the view when the transaction is committed. */ private SimpleTransaction transaction; /** * The name of the table that is being changed. */ private TableName table_name; /** * A normalized list of all rows that were added by the transaction being * committed. */ private int[] added_rows; /** * A normalized list of all rows that were removed by the transaction being * committed. */ private int[] removed_rows; /** * Constructs the event. */ public TableCommitModificationEvent(SimpleTransaction transaction, TableName table_name, int[] added, int[] removed) { this.transaction = transaction; this.table_name = table_name; this.added_rows = added; this.removed_rows = removed; } /** * Returns the Transaction that represents the view of the database when * the changes to the table have been committed. */ public SimpleTransaction getTransaction() { return transaction; } /** * Returns the name of the table. */ public TableName getTableName() { return table_name; } /** * Returns the normalized list of all rows that were inserted or updated * in this table of the transaction being committed. This is a normalized * list which means if a row is inserted and then deleted in the transaction * then it is not considered important and does not appear in this list. */ public int[] getAddedRows() { return added_rows; } /** * Returns the normalized list of all rows that were deleted or updated * in this table of the transaction being committed. This is a normalized * list which means if a row is inserted and then deleted in the transaction * then it is not considered important and does not appear in this list. */ public int[] getRemovedRows() { return removed_rows; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableDataConglomerate.java000066400000000000000000003637241330501023400267600ustar00rootroot00000000000000/** * com.mckoi.database.TableDataConglomerate 18 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.Iterator; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.IntegerIterator; import com.mckoi.util.IntegerVector; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.UserTerminal; import com.mckoi.util.BigNumber; import com.mckoi.debug.*; import com.mckoi.store.Store; import com.mckoi.store.MutableArea; import com.mckoi.store.Area; import com.mckoi.database.StateStore.StateResource; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.global.Ref; /** * A conglomerate of data that represents the contents of all tables in a * complete database. This object handles all data persistance management * (storage, retrieval, removal) issues. It is a transactional manager for * both data and indices in the database. * * @author Tobias Downer */ public class TableDataConglomerate { /** * The postfix on the name of the state file for the database store name. */ public static final String STATE_POST = "_sf"; // ---------- The standard constraint/schema tables ---------- /** * The name of the system schema where persistant conglomerate state is * stored. */ public static final String SYSTEM_SCHEMA = "SYS_INFO"; /** * The schema info table. */ public static final TableName SCHEMA_INFO_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRSchemaInfo"); public static final TableName PERSISTENT_VAR_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRDatabaseVars"); public static final TableName FOREIGN_COLS_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRForeignColumns"); public static final TableName UNIQUE_COLS_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRUniqueColumns"); public static final TableName PRIMARY_COLS_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRPrimaryColumns"); public static final TableName CHECK_INFO_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRCheckInfo"); public static final TableName UNIQUE_INFO_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRUniqueInfo"); public static final TableName FOREIGN_INFO_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRFKeyInfo"); public static final TableName PRIMARY_INFO_TABLE = new TableName(SYSTEM_SCHEMA, "sUSRPKeyInfo"); public static final TableName SYS_SEQUENCE_INFO = new TableName(SYSTEM_SCHEMA, "sUSRSequenceInfo"); public static final TableName SYS_SEQUENCE = new TableName(SYSTEM_SCHEMA, "sUSRSequence"); /** * The TransactionSystem that this Conglomerate is a child of. */ private final TransactionSystem system; /** * The StoreSystem object used by this conglomerate to store the underlying * representation. */ private final StoreSystem store_system; /** * The name given to this conglomerate. */ private String name; /** * The actual store that backs the state store. */ private Store act_state_store; /** * A store for the conglomerate state container. This * file stores information persistantly about the state of this object. */ private StateStore state_store; /** * The current commit id for committed transactions. Whenever transactional * changes are committed to the conglomerate, this id is incremented. */ private long commit_id; /** * The list of all tables that are currently open in this conglomerate. * This includes tables that are not committed. */ private ArrayList table_list; /** * The actual Store implementation that maintains the BlobStore information * for this conglomerate (if there is one). */ private Store act_blob_store; /** * The BlobStore object for this conglomerate. */ private BlobStore blob_store; /** * The SequenceManager object for this conglomerate. */ private SequenceManager sequence_manager; /** * The list of transactions that are currently open over this conglomerate. * This list is ordered from lowest commit_id to highest. This object is * shared with all the children MasterTableDataSource objects. */ private OpenTransactionList open_transactions; /** * The list of all name space journals for the history of committed * transactions. */ private ArrayList namespace_journal_list; // ---------- Table event listener ---------- /** * All listeners for modification events on tables in this conglomerate. * This is a mapping from TableName -> ArrayList of listeners. */ private final HashMap modification_listeners; // ---------- Locks ---------- /** * This lock is obtained when we go to commit a change to the table. * Grabbing this lock ensures that no other commits can occur at the same * time on this conglomerate. */ final Object commit_lock = new Object(); // // ---------- Shutdown hook thread ---------- // // /** // * The ConglomerateShutdownHookThread object which we create when the // * conglomerate in openned, and removed when we close the conglomerate. // */ // private ConglomerateShutdownHookThread shutdown_hook = null; /** * Constructs the conglomerate. */ public TableDataConglomerate(TransactionSystem system, StoreSystem store_system) { this.system = system; this.store_system = store_system; this.open_transactions = new OpenTransactionList(system); this.modification_listeners = new HashMap(); this.namespace_journal_list = new ArrayList(); this.sequence_manager = new SequenceManager(this); } /** * Returns the TransactionSystem that this conglomerate is part of. */ public final TransactionSystem getSystem() { return system; } /** * Returns the StoreSystem used by this conglomerate to manage the * persistent state of the database. */ public final StoreSystem storeSystem() { return store_system; } /** * Returns the SequenceManager object for this conglomerate. */ final SequenceManager getSequenceManager() { return sequence_manager; } /** * Returns the BlobStore for this conglomerate. */ final BlobStore getBlobStore() { return blob_store; } /** * Returns the DebugLogger object that we use to log debug messages to. */ public final DebugLogger Debug() { return getSystem().Debug(); } /** * Returns the name given to this conglomerate. */ String getName() { return name; } // ---------- Conglomerate state methods ---------- /** * Marks the given table id as committed dropped. */ private void markAsCommittedDropped(int table_id) { MasterTableDataSource master_table = getMasterTable(table_id); state_store.addDeleteResource( new StateResource(table_id, createEncodedTableFile(master_table))); } /** * Loads the master table given the table_id and the name of the table * resource in the database path. The table_string is a specially formatted * string that we parse to determine the file structure of the table. */ private MasterTableDataSource loadMasterTable(int table_id, String table_str, int table_type) throws IOException { // Open the table if (table_type == 1) { V1MasterTableDataSource master = new V1MasterTableDataSource(getSystem(), storeSystem(), open_transactions); if (master.exists(table_str)) { return master; } } else if (table_type == 2) { V2MasterTableDataSource master = new V2MasterTableDataSource(getSystem(), storeSystem(), open_transactions, blob_store); if (master.exists(table_str)) { return master; } } // If not exists, then generate an error message Debug().write(Lvl.ERROR, this, "Couldn't find table source - resource name: " + table_str + " table_id: " + table_id); return null; } /** * Returns a string that is an encoded table file name. An encoded table * file name includes information about the table type with the name of the * table. For example, ":1ThisTable" represents a V1MasterTableDataSource * table with file name "ThisTable". */ private static String createEncodedTableFile(MasterTableDataSource table) { char type; if (table instanceof V1MasterTableDataSource) { type = '1'; } else if (table instanceof V2MasterTableDataSource) { type = '2'; } else { throw new RuntimeException("Unrecognised MasterTableDataSource class."); } StringBuffer buf = new StringBuffer(); buf.append(':'); buf.append(type); buf.append(table.getSourceIdent()); return new String(buf); } /** * Reads in the list of committed tables in this conglomerate. This should * only be called during an 'open' like method. This method fills the * 'committed_tables' and 'table_list' lists with the tables in this * conglomerate. */ private void readVisibleTables() throws IOException { // The list of all visible tables from the state file StateResource[] tables = state_store.getVisibleList(); // For each visible table for (int i = 0; i < tables.length; ++i) { StateResource resource = tables[i]; int master_table_id = (int) resource.table_id; String file_name = resource.name; // Parse the file name string and determine the table type. int table_type = 1; if (file_name.startsWith(":")) { if (file_name.charAt(1) == '1') { table_type = 1; } else if (file_name.charAt(1) == '2') { table_type = 2; } else { throw new RuntimeException("Table type is not known."); } file_name = file_name.substring(2); } // Load the master table from the resource information MasterTableDataSource master = loadMasterTable(master_table_id, file_name, table_type); if (master == null) { throw new Error("Table file for " + file_name + " was not found."); } if (master instanceof V1MasterTableDataSource) { V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master; v1_master.open(file_name); } else if (master instanceof V2MasterTableDataSource) { V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master; v2_master.open(file_name); } else { throw new Error("Unknown master table type: " + master.getClass()); } // Add the table to the table list table_list.add(master); } } /** * Checks the list of committed tables in this conglomerate. This should * only be called during an 'check' like method. This method fills the * 'committed_tables' and 'table_list' lists with the tables in this * conglomerate. */ public void checkVisibleTables(UserTerminal terminal) throws IOException { // The list of all visible tables from the state file StateResource[] tables = state_store.getVisibleList(); // For each visible table for (int i = 0; i < tables.length; ++i) { StateResource resource = tables[i]; int master_table_id = (int) resource.table_id; String file_name = resource.name; // Parse the file name string and determine the table type. int table_type = 1; if (file_name.startsWith(":")) { if (file_name.charAt(1) == '1') { table_type = 1; } else if (file_name.charAt(1) == '2') { table_type = 2; } else { throw new RuntimeException("Table type is not known."); } file_name = file_name.substring(2); } // Load the master table from the resource information MasterTableDataSource master = loadMasterTable(master_table_id, file_name, table_type); if (master instanceof V1MasterTableDataSource) { V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master; v1_master.checkAndRepair(file_name, terminal); } else if (master instanceof V2MasterTableDataSource) { V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master; v2_master.checkAndRepair(file_name, terminal); } else { throw new Error("Unknown master table type: " + master.getClass()); } // Add the table to the table list table_list.add(master); // Set a check point store_system.setCheckPoint(); } } /** * Reads in the list of committed dropped tables on this conglomerate. This * should only be called during an 'open' like method. This method fills * the 'committed_dropped' and 'table_list' lists with the tables in this * conglomerate. *

* @param terminal the terminal to ask questions if problems are found. If * null then an exception is thrown if there are problems. */ private void readDroppedTables() throws IOException { // The list of all dropped tables from the state file StateResource[] tables = state_store.getDeleteList(); // For each visible table for (int i = 0; i < tables.length; ++i) { StateResource resource = tables[i]; int master_table_id = (int) resource.table_id; String file_name = resource.name; // Parse the file name string and determine the table type. int table_type = 1; if (file_name.startsWith(":")) { if (file_name.charAt(1) == '1') { table_type = 1; } else if (file_name.charAt(1) == '2') { table_type = 2; } else { throw new RuntimeException("Table type is not known."); } file_name = file_name.substring(2); } // Load the master table from the resource information MasterTableDataSource master = loadMasterTable(master_table_id, file_name, table_type); // File wasn't found so remove from the delete resources if (master == null) { state_store.removeDeleteResource(resource.name); } else { if (master instanceof V1MasterTableDataSource) { V1MasterTableDataSource v1_master = (V1MasterTableDataSource) master; v1_master.open(file_name); } else if (master instanceof V2MasterTableDataSource) { V2MasterTableDataSource v2_master = (V2MasterTableDataSource) master; v2_master.open(file_name); } else { throw new Error("Unknown master table type: " + master.getClass()); } // Add the table to the table list table_list.add(master); } } // Commit any changes to the state store state_store.commit(); } /** * Create the system tables that must be present in a conglomerates. These * tables consist of contraint and table management data. *

*

   * sUSRPKeyInfo - Primary key constraint information.
   * sUSRFKeyInfo - Foreign key constraint information.
   * sUSRUniqueInfo - Unique set constraint information.
   * sUSRCheckInfo  - Check constraint information.
   * sUSRPrimaryColumns - Primary columns information (refers to PKeyInfo)
   * sUSRUniqueColumns  - Unique columns information (refers to UniqueInfo)
   * sUSRForeignColumns1 - Foreign column information (refers to FKeyInfo)
   * sUSRForeignColumns2 - Secondary Foreign column information (refers to
   *                       FKeyInfo).
   * 
* These tables handle data for referential integrity. There are also some * additional tables containing general table information. *
   * sUSRTableColumnInfo - All table and column information.
   * 
* The design is fairly elegant in that we are using the database to store * information to maintain referential integrity. *

   * The schema layout for these tables;
   *
   *  CREATE TABLE sUSRPKeyInfo (
   *    id          NUMERIC NOT NULL,
   *    name        TEXT NOT NULL,  // The name of the primary key constraint
   *    schema      TEXT NOT NULL,  // The name of the schema
   *    table       TEXT NOT NULL,  // The name of the table
   *    deferred    BIT  NOT NULL,  // Whether deferred or immediate
   *    PRIMARY KEY (id),
   *    UNIQUE (schema, table)
   *  );
   *  CREATE TABLE sUSRFKeyInfo (
   *    id          NUMERIC NOT NULL,
   *    name        TEXT NOT NULL,  // The name of the foreign key constraint
   *    schema      TEXT NOT NULL,  // The name of the schema
   *    table       TEXT NOT NULL,  // The name of the table
   *    ref_schema  TEXT NOT NULL,  // The name of the schema referenced
   *    ref_table   TEXT NOT NULL,  // The name of the table referenced
   *    update_rule TEXT NOT NULL,  // The rule for updating to table
   *    delete_rule TEXT NOT NULL,  // The rule for deleting from table
   *    deferred    BIT  NOT NULL,  // Whether deferred or immediate
   *    PRIMARY KEY (id)
   *  );
   *  CREATE TABLE sUSRUniqueInfo (
   *    id          NUMERIC NOT NULL,
   *    name        TEXT NOT NULL,  // The name of the unique constraint
   *    schema      TEXT NOT NULL,  // The name of the schema
   *    table       TEXT NOT NULL,  // The name of the table
   *    deferred    BIT  NOT NULL,  // Whether deferred or immediate
   *    PRIMARY KEY (id)
   *  );
   *  CREATE TABLE sUSRCheckInfo (
   *    id          NUMERIC NOT NULL,
   *    name        TEXT NOT NULL,  // The name of the check constraint
   *    schema      TEXT NOT NULL,  // The name of the schema
   *    table       TEXT NOT NULL,  // The name of the table
   *    expression  TEXT NOT NULL,  // The check expression
   *    deferred    BIT  NOT NULL,  // Whether deferred or immediate
   *    PRIMARY KEY (id)
   *  );
   *  CREATE TABLE sUSRPrimaryColumns (
   *    pk_id   NUMERIC NOT NULL, // The primary key constraint id
   *    column  TEXT NOT NULL,    // The name of the primary
   *    seq_no  INTEGER NOT NULL, // The sequence number of this constraint
   *    FOREIGN KEY pk_id REFERENCES sUSRPKeyInfo
   *  );
   *  CREATE TABLE sUSRUniqueColumns (
   *    un_id   NUMERIC NOT NULL, // The unique constraint id
   *    column  TEXT NOT NULL,    // The column that is unique
   *    seq_no  INTEGER NOT NULL, // The sequence number of this constraint
   *    FOREIGN KEY un_id REFERENCES sUSRUniqueInfo
   *  );
   *  CREATE TABLE sUSRForeignColumns (
   *    fk_id   NUMERIC NOT NULL, // The foreign key constraint id
   *    fcolumn TEXT NOT NULL,    // The column in the foreign key
   *    pcolumn TEXT NOT NULL,    // The column in the primary key
   *                              // (referenced)
   *    seq_no  INTEGER NOT NULL, // The sequence number of this constraint
   *    FOREIGN KEY fk_id REFERENCES sUSRFKeyInfo
   *  );
   *  CREATE TABLE sUSRSchemaInfo (
   *    id     NUMERIC NOT NULL,
   *    name   TEXT NOT NULL,
   *    type   TEXT,              // Schema type (system, etc)
   *    other  TEXT,
   *
   *    UNIQUE ( name )
   *  );
   *  CREATE TABLE sUSRTableInfo (
   *    id     NUMERIC NOT NULL,
   *    name   TEXT NOT NULL,     // The name of the table
   *    schema TEXT NOT NULL,     // The name of the schema of this table
   *    type   TEXT,              // Table type (temporary, system, etc)
   *    other  TEXT,              // Notes, etc
   *
   *    UNIQUE ( name )
   *  );
   *  CREATE TABLE sUSRColumnColumns (
   *    t_id    NUMERIC NOT NULL,  // Foreign key to sUSRTableInfo
   *    column  TEXT NOT NULL,     // The column name
   *    seq_no  INTEGER NOT NULL,  // The sequence in the table
   *    type    TEXT NOT NULL,     // The SQL type of this column
   *    size    NUMERIC,           // The size of the column if applicable
   *    scale   NUMERIC,           // The scale of the column if applicable
   *    default TEXT NOT NULL,     // The default expression
   *    constraints TEXT NOT NULL, // The constraints of this column
   *    other   TEXT,              // Notes, etc
   *
   *    FOREIGN KEY t_id REFERENCES sUSRTableInfo,
   *    UNIQUE ( t_id, column )
   *  );
   * 
   * 
*/ void updateSystemTableSchema() { // Create the transaction Transaction transaction = createTransaction(); DataTableDef table; table = new DataTableDef(); table.setTableName(SYS_SEQUENCE_INFO); table.addColumn(DataTableColumnDef.createNumericColumn("id")); table.addColumn(DataTableColumnDef.createStringColumn("schema")); table.addColumn(DataTableColumnDef.createStringColumn("name")); table.addColumn(DataTableColumnDef.createNumericColumn("type")); transaction.alterCreateTable(table, 187, 128); table = new DataTableDef(); table.setTableName(SYS_SEQUENCE); table.addColumn(DataTableColumnDef.createNumericColumn("seq_id")); table.addColumn(DataTableColumnDef.createNumericColumn("last_value")); table.addColumn(DataTableColumnDef.createNumericColumn("increment")); table.addColumn(DataTableColumnDef.createNumericColumn("minvalue")); table.addColumn(DataTableColumnDef.createNumericColumn("maxvalue")); table.addColumn(DataTableColumnDef.createNumericColumn("start")); table.addColumn(DataTableColumnDef.createNumericColumn("cache")); table.addColumn(DataTableColumnDef.createBooleanColumn("cycle")); transaction.alterCreateTable(table, 187, 128); table = new DataTableDef(); table.setTableName(PRIMARY_INFO_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("id")); table.addColumn(DataTableColumnDef.createStringColumn("name")); table.addColumn(DataTableColumnDef.createStringColumn("schema")); table.addColumn(DataTableColumnDef.createStringColumn("table")); table.addColumn(DataTableColumnDef.createNumericColumn("deferred")); transaction.alterCreateTable(table, 187, 128); table = new DataTableDef(); table.setTableName(FOREIGN_INFO_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("id")); table.addColumn(DataTableColumnDef.createStringColumn("name")); table.addColumn(DataTableColumnDef.createStringColumn("schema")); table.addColumn(DataTableColumnDef.createStringColumn("table")); table.addColumn(DataTableColumnDef.createStringColumn("ref_schema")); table.addColumn(DataTableColumnDef.createStringColumn("ref_table")); table.addColumn(DataTableColumnDef.createStringColumn("update_rule")); table.addColumn(DataTableColumnDef.createStringColumn("delete_rule")); table.addColumn(DataTableColumnDef.createNumericColumn("deferred")); transaction.alterCreateTable(table, 187, 128); table = new DataTableDef(); table.setTableName(UNIQUE_INFO_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("id")); table.addColumn(DataTableColumnDef.createStringColumn("name")); table.addColumn(DataTableColumnDef.createStringColumn("schema")); table.addColumn(DataTableColumnDef.createStringColumn("table")); table.addColumn(DataTableColumnDef.createNumericColumn("deferred")); transaction.alterCreateTable(table, 187, 128); table = new DataTableDef(); table.setTableName(CHECK_INFO_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("id")); table.addColumn(DataTableColumnDef.createStringColumn("name")); table.addColumn(DataTableColumnDef.createStringColumn("schema")); table.addColumn(DataTableColumnDef.createStringColumn("table")); table.addColumn(DataTableColumnDef.createStringColumn("expression")); table.addColumn(DataTableColumnDef.createNumericColumn("deferred")); table.addColumn( DataTableColumnDef.createBinaryColumn("serialized_expression")); transaction.alterCreateTable(table, 187, 128); table = new DataTableDef(); table.setTableName(PRIMARY_COLS_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("pk_id")); table.addColumn(DataTableColumnDef.createStringColumn("column")); table.addColumn(DataTableColumnDef.createNumericColumn("seq_no")); transaction.alterCreateTable(table, 91, 128); table = new DataTableDef(); table.setTableName(UNIQUE_COLS_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("un_id")); table.addColumn(DataTableColumnDef.createStringColumn("column")); table.addColumn(DataTableColumnDef.createNumericColumn("seq_no")); transaction.alterCreateTable(table, 91, 128); table = new DataTableDef(); table.setTableName(FOREIGN_COLS_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("fk_id")); table.addColumn(DataTableColumnDef.createStringColumn("fcolumn")); table.addColumn(DataTableColumnDef.createStringColumn("pcolumn")); table.addColumn(DataTableColumnDef.createNumericColumn("seq_no")); transaction.alterCreateTable(table, 91, 128); table = new DataTableDef(); table.setTableName(SCHEMA_INFO_TABLE); table.addColumn(DataTableColumnDef.createNumericColumn("id")); table.addColumn(DataTableColumnDef.createStringColumn("name")); table.addColumn(DataTableColumnDef.createStringColumn("type")); table.addColumn(DataTableColumnDef.createStringColumn("other")); transaction.alterCreateTable(table, 91, 128); // Stores misc variables of the database, table = new DataTableDef(); table.setTableName(PERSISTENT_VAR_TABLE); table.addColumn(DataTableColumnDef.createStringColumn("variable")); table.addColumn(DataTableColumnDef.createStringColumn("value")); transaction.alterCreateTable(table, 91, 128); // Commit and close the transaction. try { transaction.closeAndCommit(); } catch (TransactionException e) { Debug().writeException(e); throw new Error("Transaction Exception creating conglomerate."); } } /** * Given a table with a 'id' field, this will check that the sequence * value for the table is at least greater than the maximum id in the column. */ void resetTableID(TableName tname) { // Create the transaction Transaction transaction = createTransaction(); // Get the table MutableTableDataSource table = transaction.getTable(tname); // Find the index of the column name called 'id' DataTableDef table_def = table.getDataTableDef(); int col_index = table_def.findColumnName("id"); if (col_index == -1) { throw new Error("Column name 'id' not found."); } // Find the maximum 'id' value. SelectableScheme scheme = table.getColumnScheme(col_index); IntegerVector ivec = scheme.selectLast(); if (ivec.size() > 0) { TObject ob = table.getCellContents(col_index, ivec.intAt(0)); BigNumber b_num = ob.toBigNumber(); if (b_num != null) { // Set the unique id to +1 the maximum id value in the column transaction.setUniqueID(tname, b_num.longValue() + 1L); } } // Commit and close the transaction. try { transaction.closeAndCommit(); } catch (TransactionException e) { Debug().writeException(e); throw new Error("Transaction Exception creating conglomerate."); } } /** * Resets the table sequence id for all the system tables managed by the * conglomerate. */ void resetAllSystemTableID() { resetTableID(PRIMARY_INFO_TABLE); resetTableID(FOREIGN_INFO_TABLE); resetTableID(UNIQUE_INFO_TABLE); resetTableID(CHECK_INFO_TABLE); resetTableID(SCHEMA_INFO_TABLE); } /** * Populates the system table schema with initial data for an empty * conglomerate. This sets up the standard variables and table * constraint data. */ private void initializeSystemTableSchema() { // Create the transaction Transaction transaction = createTransaction(); // Insert the two default schema names, transaction.createSchema(SYSTEM_SCHEMA, "SYSTEM"); // -- Primary Keys -- // The 'id' columns are primary keys on all the system tables, final String[] id_col = new String[] { "id" }; transaction.addPrimaryKeyConstraint(PRIMARY_INFO_TABLE, id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PK_PK"); transaction.addPrimaryKeyConstraint(FOREIGN_INFO_TABLE, id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FK_PK"); transaction.addPrimaryKeyConstraint(UNIQUE_INFO_TABLE, id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_PK"); transaction.addPrimaryKeyConstraint(CHECK_INFO_TABLE, id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_CHECK_PK"); transaction.addPrimaryKeyConstraint(SCHEMA_INFO_TABLE, id_col, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_SCHEMA_PK"); // -- Foreign Keys -- // Create the foreign key references, final String[] fk_col = new String[1]; final String[] fk_ref_col = new String[] { "id" }; fk_col[0] = "pk_id"; transaction.addForeignKeyConstraint( PRIMARY_COLS_TABLE, fk_col, PRIMARY_INFO_TABLE, fk_ref_col, Transaction.NO_ACTION, Transaction.NO_ACTION, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PK_FK"); fk_col[0] = "fk_id"; transaction.addForeignKeyConstraint( FOREIGN_COLS_TABLE, fk_col, FOREIGN_INFO_TABLE, fk_ref_col, Transaction.NO_ACTION, Transaction.NO_ACTION, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FK_FK"); fk_col[0] = "un_id"; transaction.addForeignKeyConstraint( UNIQUE_COLS_TABLE, fk_col, UNIQUE_INFO_TABLE, fk_ref_col, Transaction.NO_ACTION, Transaction.NO_ACTION, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_FK"); // sUSRPKeyInfo 'schema', 'table' column is a unique set, // (You are only allowed one primary key per table). String[] columns = new String[] { "schema", "table" }; transaction.addUniqueConstraint(PRIMARY_INFO_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PKEY_ST_UNIQUE"); // sUSRSchemaInfo 'name' column is a unique column, columns = new String[] { "name" }; transaction.addUniqueConstraint(SCHEMA_INFO_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_SCHEMA_UNIQUE"); // columns = new String[] { "name" }; columns = new String[] { "name", "schema" }; // sUSRPKeyInfo 'name' column is a unique column, transaction.addUniqueConstraint(PRIMARY_INFO_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_PKEY_UNIQUE"); // sUSRFKeyInfo 'name' column is a unique column, transaction.addUniqueConstraint(FOREIGN_INFO_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_FKEY_UNIQUE"); // sUSRUniqueInfo 'name' column is a unique column, transaction.addUniqueConstraint(UNIQUE_INFO_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_UNIQUE_UNIQUE"); // sUSRCheckInfo 'name' column is a unique column, transaction.addUniqueConstraint(CHECK_INFO_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_CHECK_UNIQUE"); // sUSRDatabaseVars 'variable' is unique columns = new String[] { "variable" }; transaction.addUniqueConstraint(PERSISTENT_VAR_TABLE, columns, Transaction.INITIALLY_IMMEDIATE, "SYSTEM_DATABASEVARS_UNIQUE"); // Insert the version number of the database transaction.setPersistentVar("database.version", "1.4"); // Commit and close the transaction. try { transaction.closeAndCommit(); } catch (TransactionException e) { Debug().writeException(e); throw new Error("Transaction Exception initializing conglomerate."); } } /** * Initializes the BlobStore. If the BlobStore doesn't exist it will be * created, and if it does exist it will be initialized. */ private void initializeBlobStore() throws IOException { // Does the file already exist? boolean blob_store_exists = storeSystem().storeExists("BlobStore"); // If the blob store doesn't exist and we are read_only, we can't do // anything further so simply return. if (!blob_store_exists && isReadOnly()) { return; } // The blob store, if (blob_store_exists) { act_blob_store = storeSystem().openStore("BlobStore"); } else { act_blob_store = storeSystem().createStore("BlobStore"); } try { act_blob_store.lockForWrite(); // Create the BlobStore object blob_store = new BlobStore(act_blob_store); // Get the 64 byte fixed area MutableArea fixed_area = act_blob_store.getMutableArea(-1); // If the blob store didn't exist then we need to create it here, if (!blob_store_exists) { long header_p = blob_store.create(); fixed_area.putLong(header_p); fixed_area.checkOut(); } else { // Otherwise we need to initialize the blob store long header_p = fixed_area.getLong(); blob_store.init(header_p); } } finally { act_blob_store.unlockForWrite(); } } // ---------- Private methods ---------- /** * Returns true if the system is in read only mode. */ private boolean isReadOnly() { return system.readOnlyAccess(); } /** * Returns the path of the database. */ private File getPath() { return system.getDatabasePath(); } /** * Returns the next unique table_id value for a new table and updates the * conglomerate state information as appropriate. */ private int nextUniqueTableID() throws IOException { return state_store.nextTableID(); } /** * Sets up the internal state of this object. */ private void setupInternal() { commit_id = 0; table_list = new ArrayList(); // // If the VM supports shutdown hook, // try { // shutdown_hook = new ConglomerateShutdownHookThread(); // Runtime.getRuntime().addShutdownHook(shutdown_hook); // } // catch (Throwable e) { // // Catch instantiation/access errors // system.Debug().write(Lvl.MESSAGE, this, // "Unable to register shutdown hook."); // } } // ---------- Public methods ---------- /** * Minimally creates a new conglomerate but does NOT initialize any of the * system tables. This is a useful feature for a copy function that requires * a TableDataConglomerate object to copy data into but does not require any * initial system tables (because this information is copied from the source * conglomerate. */ void minimalCreate(String name) throws IOException { this.name = name; if (exists(name)) { throw new IOException("Conglomerate already exists: " + name); } // Lock the store system (generates an IOException if exclusive lock // can not be made). if (!isReadOnly()) { storeSystem().lock(name); } // Create/Open the state store act_state_store = storeSystem().createStore(name + STATE_POST); try { act_state_store.lockForWrite(); state_store = new StateStore(act_state_store); long head_p = state_store.create(); // Get the fixed area MutableArea fixed_area = act_state_store.getMutableArea(-1); fixed_area.putLong(head_p); fixed_area.checkOut(); } finally { act_state_store.unlockForWrite(); } setupInternal(); // Init the conglomerate blob store initializeBlobStore(); // Create the system table (but don't initialize) updateSystemTableSchema(); } /** * Creates a new conglomerate at the given path in the file system. This * must be an empty directory where files can be stored. This will create * the conglomerate and exit in an open (read/write) state. */ public void create(String name) throws IOException { minimalCreate(name); // Initialize the conglomerate system tables. initializeSystemTableSchema(); // Commit the state state_store.commit(); } /** * Opens a conglomerate. If the conglomerate does not exist then an * IOException is generated. Once a conglomerate is open, we may start * opening transactions and altering the data within it. */ public void open(String name) throws IOException { this.name = name; if (!exists(name)) { throw new IOException("Conglomerate doesn't exists: " + name); } // Check the file lock if (!isReadOnly()) { // Obtain the lock (generate error if this is not possible) storeSystem().lock(name); } // Open the state store act_state_store = storeSystem().openStore(name + STATE_POST); state_store = new StateStore(act_state_store); // Get the fixed 64 byte area. Area fixed_area = act_state_store.getArea(-1); long head_p = fixed_area.getLong(); state_store.init(head_p); setupInternal(); // Init the conglomerate blob store initializeBlobStore(); readVisibleTables(); readDroppedTables(); // We possibly have things to clean up if there are deleted columns. cleanUpConglomerate(); } /** * Closes this conglomerate. The conglomerate must be open for it to be * closed. When closed, any use of this object is undefined. */ public void close() throws IOException { synchronized (commit_lock) { // We possibly have things to clean up. cleanUpConglomerate(); // Set a check point store_system.setCheckPoint(); // Go through and close all the committed tables. int size = table_list.size(); for (int i = 0; i < size; ++i) { MasterTableDataSource master = (MasterTableDataSource) table_list.get(i); master.dispose(false); } state_store.commit(); storeSystem().closeStore(act_state_store); table_list = null; } // Unlock the storage system storeSystem().unlock(name); if (blob_store != null) { storeSystem().closeStore(act_blob_store); } // removeShutdownHook(); } // /** // * Removes the shutdown hook. // */ // private void removeShutdownHook() { // // If the VM supports shutdown hook, remove it, // try { // if (shutdown_hook != null) { //// System.out.println("REMOVING: " + this); // Runtime.getRuntime().removeShutdownHook(shutdown_hook); // // We have no start it otherwise the ThreadGroup won't remove its // // reference to it and it causes GC problems. // shutdown_hook.start(); // shutdown_hook.waitUntilComplete(); // shutdown_hook = null; // } // } // catch (Throwable e) { // // Catch (and ignore) instantiation/access errors // } // } /** * Deletes and closes the conglomerate. This will delete all the files in * the file system associated with this conglomerate, so this method should * be used with care. *

* WARNING: Will result in total loss of all data stored in the conglomerate. */ public void delete() throws IOException { synchronized (commit_lock) { // We possibly have things to clean up. cleanUpConglomerate(); // Go through and delete and close all the committed tables. int size = table_list.size(); for (int i = 0; i < size; ++i) { MasterTableDataSource master = (MasterTableDataSource) table_list.get(i); master.drop(); } // Delete the state file state_store.commit(); storeSystem().closeStore(act_state_store); storeSystem().deleteStore(act_state_store); // Delete the blob store if (blob_store != null) { storeSystem().closeStore(act_blob_store); storeSystem().deleteStore(act_blob_store); } // Invalidate this object table_list = null; } // Unlock the storage system. storeSystem().unlock(name); } /** * Returns true if the conglomerate is closed. */ public boolean isClosed() { synchronized (commit_lock) { return table_list == null; } } /** * Returns true if the conglomerate exists in the file system and can * be opened. */ public boolean exists(String name) throws IOException { return storeSystem().storeExists(name + STATE_POST); } /** * Makes a complete copy of this database to the position represented by the * given TableDataConglomerate object. The given TableDataConglomerate * object must NOT be being used by another database running in the JVM. * This may take a while to complete. The backup operation occurs within its * own transaction and the copy transaction is read-only meaning there is no * way for the copy process to interfere with other transactions running * concurrently. *

* The conglomerate must be open before this method is called. */ public void liveCopyTo(TableDataConglomerate dest_conglomerate) throws IOException { // The destination store system StoreSystem dest_store_system = dest_conglomerate.storeSystem(); // Copy all the blob data from the given blob store to the current blob // store. dest_conglomerate.blob_store.copyFrom(dest_store_system, blob_store); // Open new transaction - this is the current view we are going to copy. Transaction transaction = createTransaction(); try { // Copy the data in this transaction to the given destination store system. transaction.liveCopyAllDataTo(dest_conglomerate); } finally { // Make sure we close the transaction try { transaction.closeAndCommit(); } catch (TransactionException e) { throw new RuntimeException("Transaction Error: " + e.getMessage()); } } // Finished - increment the live copies counter. getSystem().stats().increment("TableDataConglomerate.liveCopies"); } // ---------- Diagnostic and repair ---------- /** * Returns a RawDiagnosticTable object that is used for diagnostics of the * table with the given file name. */ public RawDiagnosticTable getDiagnosticTable(String table_file_name) { synchronized (commit_lock) { for (int i = 0; i < table_list.size(); ++i) { MasterTableDataSource master = (MasterTableDataSource) table_list.get(i); if (master.getSourceIdent().equals(table_file_name)) { return master.getRawDiagnosticTable(); } } } return null; } /** * Returns the list of file names for all tables in this conglomerate. */ public String[] getAllTableFileNames() { synchronized (commit_lock) { String[] list = new String[table_list.size()]; for (int i = 0; i < table_list.size(); ++i) { MasterTableDataSource master = (MasterTableDataSource) table_list.get(i); list[i] = master.getSourceIdent(); } return list; } } // ---------- Conglomerate event notification ---------- /** * Adds a listener for transactional modification events that occur on the * given table in this conglomerate. A transactional modification event is * an event fired immediately upon the modification of a table by a * transaction, either immediately before the modification or immediately * after. Also an event is fired when a modification to a table is * successfully committed. *

* The BEFORE_* type triggers are given the opportunity to modify the * contents of the RowData before the update or insert occurs. All triggers * may generate an exception which will cause the transaction to rollback. *

* The event carries with it the event type, the transaction that the event * occurred in, and any information regarding the modification itself. *

* This event/listener mechanism is intended to be used to implement higher * layer database triggering systems. Note that care must be taken with * the commit level events because they occur inside a commit lock on this * conglomerate and so synchronization and deadlock issues need to be * carefully considered. *

* NOTE: A listener on the given table will be notified of ALL table * modification events by all transactions at the time they happen. * * @param table_name the name of the table in the conglomerate to listen for * events from. * @param listener the listener to be notified of events. */ public void addTransactionModificationListener(TableName table_name, TransactionModificationListener listener) { synchronized (modification_listeners) { ArrayList list = (ArrayList) modification_listeners.get(table_name); if (list == null) { // If the mapping doesn't exist then create the list for the table // here. list = new ArrayList(); modification_listeners.put(table_name, list); } list.add(listener); } } /** * Removes a listener for transaction modification events on the given table * in this conglomerate as previously set by the * 'addTransactionModificationListener' method. * * @param table_name the name of the table in the conglomerate to remove from * the listener list. * @param listener the listener to be removed. */ public void removeTransactionModificationListener(TableName table_name, TransactionModificationListener listener) { synchronized (modification_listeners) { ArrayList list = (ArrayList) modification_listeners.get(table_name); if (list != null) { int sz = list.size(); for (int i = sz - 1; i >= 0; --i) { if (list.get(i) == listener) { list.remove(i); } } } } } // ---------- Transactional management ---------- /** * Starts a new transaction. The Transaction object returned by this * method is used to read the contents of the database at the time * the transaction was started. It is also used if any modifications are * required to be made. */ public Transaction createTransaction() { long this_commit_id; ArrayList this_committed_tables = new ArrayList(); // Don't let a commit happen while we are looking at this. synchronized (commit_lock) { this_commit_id = commit_id; StateResource[] committed_table_list = state_store.getVisibleList(); for (int i = 0; i < committed_table_list.length; ++i) { this_committed_tables.add( getMasterTable((int) committed_table_list[i].table_id)); } // Create a set of IndexSet for all the tables in this transaction. int sz = this_committed_tables.size(); ArrayList index_info = new ArrayList(sz); for (int i = 0; i < sz; ++i) { MasterTableDataSource mtable = (MasterTableDataSource) this_committed_tables.get(i); index_info.add(mtable.createIndexSet()); } // Create the transaction and record it in the open transactions list. Transaction t = new Transaction(this, this_commit_id, this_committed_tables, index_info); open_transactions.addTransaction(t); return t; } } /** * This is called to notify the conglomerate that the transaction has * closed. This is always called from either the rollback or commit method * of the transaction object. *

* NOTE: This increments 'commit_id' and requires that the conglomerate is * commit locked. */ private void closeTransaction(Transaction transaction) { boolean last_transaction = false; // Closing must happen under a commit lock. synchronized (commit_lock) { open_transactions.removeTransaction(transaction); // Increment the commit id. ++commit_id; // Was that the last transaction? last_transaction = open_transactions.count() == 0; } // If last transaction then schedule a clean up event. if (last_transaction) { try { cleanUpConglomerate(); } catch (IOException e) { Debug().write(Lvl.ERROR, this, "Error cleaning up conglomerate"); Debug().writeException(Lvl.ERROR, e); } } } /** * Closes and drops the MasterTableDataSource. This should only be called * from the clean up method (cleanUpConglomerate()). *

* Returns true if the drop succeeded. A drop may fail if, for example, the * roots of the table are locked. *

* Note that the table_file_name will be encoded with the table type. For * example, ":2mighty.koi" */ private boolean closeAndDropTable(String table_file_name) throws IOException { // Find the table with this file name. for (int i = 0; i < table_list.size(); ++i) { MasterTableDataSource t = (MasterTableDataSource) table_list.get(i); String enc_fn = table_file_name.substring(2); if (t.getSourceIdent().equals(enc_fn)) { // Close and remove from the list. if (t.isRootLocked()) { // We can't drop a table that has roots locked.. return false; } // This drops if the table has been marked as being dropped. boolean b = t.drop(); if (b) { table_list.remove(i); } return b; } } return false; } /** * Closes the MasterTableDataSource with the given source ident. This should * only be called from the clean up method (cleanUpConglomerate()). *

* Note that the table_file_name will be encoded with the table type. For * example, ":2mighty.koi" */ private void closeTable(String table_file_name, boolean pending_drop) throws IOException { // Find the table with this file name. for (int i = 0; i < table_list.size(); ++i) { MasterTableDataSource t = (MasterTableDataSource) table_list.get(i); String enc_fn = table_file_name.substring(2); if (t.getSourceIdent().equals(enc_fn)) { // Close and remove from the list. if (t.isRootLocked()) { // We can't drop a table that has roots locked.. return; } // This closes the table t.dispose(pending_drop); return; } } return; } /** * Cleans up the conglomerate by deleting all tables marked as deleted. * This should be called when the conglomerate is opened, shutdown and * when there are no transactions open. */ private void cleanUpConglomerate() throws IOException { synchronized (commit_lock) { if (isClosed()) { return; } // If no open transactions on the database, then clean up. if (open_transactions.count() == 0) { StateResource[] delete_list = state_store.getDeleteList(); if (delete_list.length > 0) { int drop_count = 0; for (int i = delete_list.length - 1; i >= 0; --i) { String fn = (String) delete_list[i].name; closeTable(fn, true); } // // NASTY HACK: The native win32 file mapping will not // // let you delete a file that is mapped. The NIO API does not allow // // you to manually unmap a file, and the only way to unmap // // memory under win32 is to wait for the garbage collector to // // free it. So this is a hack to try and make the engine // // unmap the memory mapped buffer. // // // // This is not a problem under Unix/Linux because the OS has no // // difficulty deleting a file that is mapped. // // System.gc(); // try { // Thread.sleep(5); // } // catch (InterruptedException e) { /* ignore */ } for (int i = delete_list.length - 1; i >= 0; --i) { String fn = (String) delete_list[i].name; boolean dropped = closeAndDropTable(fn); // If we managed to drop the table, remove from the list. if (dropped) { state_store.removeDeleteResource(fn); ++drop_count; } } // If we dropped a table, commit an update to the conglomerate state. if (drop_count > 0) { state_store.commit(); } } } } } // ---------- Detection of constraint violations ---------- /** * A variable resolver for a single row of a table source. Used when * evaluating a check constraint for newly added row. */ private static class TableRowVariableResolver implements VariableResolver { private TableDataSource table; private int row_index = -1; public TableRowVariableResolver(TableDataSource table, int row) { this.table = table; this.row_index = row; } private int findColumnName(Variable variable) { int col_index = table.getDataTableDef().findColumnName( variable.getName()); if (col_index == -1) { throw new Error("Can't find column: " + variable); } return col_index; } // --- Implemented --- public int setID() { return row_index; } public TObject resolve(Variable variable) { int col_index = findColumnName(variable); return table.getCellContents(col_index, row_index); } public TType returnTType(Variable variable) { int col_index = findColumnName(variable); return table.getDataTableDef().columnAt(col_index).getTType(); } } /** * Convenience, converts a String[] array to a comma deliminated string * list. */ static String stringColumnList(String[] list) { StringBuffer buf = new StringBuffer(); for (int i = 0; i < list.length - 1; ++i) { buf.append(list[i]); } buf.append(list[list.length - 1]); return new String(buf); } /** * Convenience, returns either 'Immediate' or 'Deferred' dependant on the * deferred short. */ static String deferredString(short deferred) { switch(deferred) { case(Transaction.INITIALLY_IMMEDIATE): return "Immediate"; case(Transaction.INITIALLY_DEFERRED): return "Deferred"; default: throw new Error("Unknown deferred string."); } } /** * Returns a list of column indices into the given DataTableDef for the * given column names. */ static int[] findColumnIndices(DataTableDef table_def, String[] cols) { // Resolve the list of column names to column indexes int[] col_indexes = new int[cols.length]; for (int i = 0; i < cols.length; ++i) { col_indexes[i] = table_def.findColumnName(cols[i]); } return col_indexes; } /** * Checks the uniqueness of the columns in the row of the table. If * the given column information in the row data is not unique then it * returns false. We also check for a NULL values - a PRIMARY KEY constraint * does not allow NULL values, whereas a UNIQUE constraint does. */ private static boolean isUniqueColumns( TableDataSource table, int rindex, String[] cols, boolean nulls_are_allowed) { DataTableDef table_def = table.getDataTableDef(); // 'identical_rows' keeps a tally of the rows that match our added cell. IntegerVector identical_rows = null; // Resolve the list of column names to column indexes int[] col_indexes = findColumnIndices(table_def, cols); // If the value being tested for uniqueness contains NULL, we return true // if nulls are allowed. for (int i = 0; i < col_indexes.length; ++i) { TObject cell = table.getCellContents(col_indexes[i], rindex); if (cell.isNull()) { return nulls_are_allowed; } } for (int i = 0; i < col_indexes.length; ++i) { int col_index = col_indexes[i]; // Get the column definition and the cell being inserted, // DataTableColumnDef column_def = table_def.columnAt(col_index); TObject cell = table.getCellContents(col_index, rindex); // We are assured of uniqueness if 'identical_rows != null && // identical_rows.size() == 0' This is because 'identical_rows' keeps // a running tally of the rows in the table that contain unique columns // whose cells match the record being added. if (identical_rows == null || identical_rows.size() > 0) { // Ask SelectableScheme to return pointers to row(s) if there is // already a cell identical to this in the table. SelectableScheme ss = table.getColumnScheme(col_index); IntegerVector ivec = ss.selectEqual(cell); // If 'identical_rows' hasn't been set up yet then set it to 'ivec' // (the list of rows where there is a cell which is equal to the one // being added) // If 'identical_rows' has been set up, then perform an // 'intersection' operation on the two lists (only keep the numbers // that are repeated in both lists). Therefore we keep the rows // that match the row being added. if (identical_rows == null) { identical_rows = ivec; } else { ivec.quickSort(); int row_index = identical_rows.size() - 1; while (row_index >= 0) { int val = identical_rows.intAt(row_index); int found_index = ivec.sortedIndexOf(val); // If we _didn't_ find the index in the array if (found_index >= ivec.size() || ivec.intAt(found_index) != val) { identical_rows.removeIntAt(row_index); } --row_index; } } } } // for each column // If there is 1 (the row we added) then we are unique, otherwise we are // not. if (identical_rows != null) { int sz = identical_rows.size(); if (sz == 1) { return true; } if (sz > 1) { return false; } else if (sz == 0) { throw new Error("Assertion failed: We must be able to find the " + "row we are testing uniqueness against!"); } } return true; } /** * Returns the key indices found in the given table. The keys are * in the given column indices, and the key is in the 'key' array. This can * be used to count the number of keys found in a table for constraint * violation checking. */ static IntegerVector findKeys(TableDataSource t2, int[] col2_indexes, TObject[] key_value) { int key_size = key_value.length; // Now query table 2 to determine if the key values are present. // Use index scan on first key. SelectableScheme ss = t2.getColumnScheme(col2_indexes[0]); IntegerVector list = ss.selectEqual(key_value[0]); if (key_size > 1) { // Full scan for the rest of the columns int sz = list.size(); // For each element of the list for (int i = sz - 1; i >= 0; --i) { int r_index = list.intAt(i); // For each key in the column list for (int c = 1; c < key_size; ++c) { int col_index = col2_indexes[c]; TObject c_value = key_value[c]; if (c_value.compareTo(t2.getCellContents(col_index, r_index)) != 0) { // If any values in the key are not equal set this flag to false // and remove the index from the list. list.removeIntAt(i); // Break the for loop break; } } } } return list; } /** * Finds the number of rows that are referenced between the given row of * table1 and that match table2. This method is used to determine if * there are referential links. *

* If this method returns -1 it means the value being searched for is NULL * therefore we can't determine if there are any referenced links. *

* HACK: If 'check_source_table_key' is set then the key is checked for in * the source table and if it exists returns 0. Otherwise it looks for * references to the key in table2. */ private static int rowCountOfReferenceTable( SimpleTransaction transaction, int row_index, TableName table1, String[] cols1, TableName table2, String[] cols2, boolean check_source_table_key) { // Get the tables TableDataSource t1 = transaction.getTableDataSource(table1); TableDataSource t2 = transaction.getTableDataSource(table2); // The table defs DataTableDef dtd1 = t1.getDataTableDef(); DataTableDef dtd2 = t2.getDataTableDef(); // Resolve the list of column names to column indexes int[] col1_indexes = findColumnIndices(dtd1, cols1); int[] col2_indexes = findColumnIndices(dtd2, cols2); int key_size = col1_indexes.length; // Get the data from table1 TObject[] key_value = new TObject[key_size]; int null_count = 0; for (int n = 0; n < key_size; ++n) { key_value[n] = t1.getCellContents(col1_indexes[n], row_index); if (key_value[n].isNull()) { ++null_count; } } // If we are searching for null then return -1; if (null_count > 0) { return -1; } // HACK: This is a hack. The purpose is if the key exists in the source // table we return 0 indicating to the delete check that there are no // references and it's valid. To the semantics of the method this is // incorrect. if (check_source_table_key) { IntegerVector keys = findKeys(t1, col1_indexes, key_value); int key_count = keys.size(); if (key_count > 0) { return 0; } } return findKeys(t2, col2_indexes, key_value).size(); } /** * Checks that the nullibility and class of the fields in the given * rows are valid. Should be used as part of the insert procedure. */ static void checkFieldConstraintViolations( SimpleTransaction transaction, TableDataSource table, int[] row_indices) { // Quick exit case if (row_indices == null || row_indices.length == 0) { return; } // Check for any bad cells - which are either cells that are 'null' in a // column declared as 'not null', or duplicated in a column declared as // unique. DataTableDef table_def = table.getDataTableDef(); TableName table_name = table_def.getTableName(); // Check not-null columns are not null. If they are null, throw an // error. Additionally check that JAVA_OBJECT columns are correctly // typed. // Check each field of the added rows int len = table_def.columnCount(); for (int i = 0; i < len; ++i) { // Get the column definition and the cell being inserted, DataTableColumnDef column_def = table_def.columnAt(i); // For each row added to this column for (int rn = 0; rn < row_indices.length; ++rn) { TObject cell = table.getCellContents(i, row_indices[rn]); // Check: Column defined as not null and cell being inserted is // not null. if (column_def.isNotNull() && cell.isNull()) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.NULLABLE_VIOLATION, "You tried to add 'null' cell to column '" + table_def.columnAt(i).getName() + "' which is declared as 'not_null'"); } // Check: If column is a java object, then deserialize and check the // object is an instance of the class constraint, if (!cell.isNull() && column_def.getSQLType() == com.mckoi.database.global.SQLTypes.JAVA_OBJECT) { String class_constraint = column_def.getClassConstraint(); // Everything is derived from java.lang.Object so this optimization // will not cause an object deserialization. if (!class_constraint.equals("java.lang.Object")) { // Get the binary representation of the java object ByteLongObject serialized_jobject = (ByteLongObject) cell.getObject(); // Deserialize the object Object ob = ObjectTranslator.deserialize(serialized_jobject); // Check it's assignable from the constraining class if (!ob.getClass().isAssignableFrom( column_def.getClassConstraintAsClass())) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.JAVA_TYPE_VIOLATION, "The Java object being inserted is not derived from the " + "class constraint defined for the column (" + class_constraint + ")"); } } } } // For each row being added } // for each column } /** * Performs constraint violation checks on an addition of the given set of * row indices into the TableDataSource in the given transaction. If a * violation is detected a DatabaseConstraintViolationException is thrown. *

* If deferred = IMMEDIATE only immediate constraints are tested. If * deferred = DEFERRED all constraints are tested. * * @param transaction the Transaction instance used to determine table * constraints. * @param table the table to test * @param row_indices the list of rows that were added to the table. * @param deferred '1' indicates Transaction.IMMEDIATE, * '2' indicates Transaction.DEFERRED. */ static void checkAddConstraintViolations( SimpleTransaction transaction, TableDataSource table, int[] row_indices, short deferred) { String cur_schema = table.getDataTableDef().getSchema(); QueryContext context = new SystemQueryContext(transaction, cur_schema); // Quick exit case if (row_indices == null || row_indices.length == 0) { return; } DataTableDef table_def = table.getDataTableDef(); TableName table_name = table_def.getTableName(); // ---- Constraint checking ---- // Check any primary key constraint. Transaction.ColumnGroup primary_key = Transaction.queryTablePrimaryKeyGroup(transaction, table_name); if (primary_key != null && (deferred == Transaction.INITIALLY_DEFERRED || primary_key.deferred == Transaction.INITIALLY_IMMEDIATE)) { // For each row added to this column for (int rn = 0; rn < row_indices.length; ++rn) { if (!isUniqueColumns(table, row_indices[rn], primary_key.columns, false)) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.PRIMARY_KEY_VIOLATION, deferredString(deferred) + " primary Key constraint violation (" + primary_key.name + ") Columns = ( " + stringColumnList(primary_key.columns) + " ) Table = ( " + table_name.toString() + " )"); } } // For each row being added } // Check any unique constraints. Transaction.ColumnGroup[] unique_constraints = Transaction.queryTableUniqueGroups(transaction, table_name); for (int i = 0; i < unique_constraints.length; ++i) { Transaction.ColumnGroup unique = unique_constraints[i]; if (deferred == Transaction.INITIALLY_DEFERRED || unique.deferred == Transaction.INITIALLY_IMMEDIATE) { // For each row added to this column for (int rn = 0; rn < row_indices.length; ++rn) { if (!isUniqueColumns(table, row_indices[rn], unique.columns, true)) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.UNIQUE_VIOLATION, deferredString(deferred) + " unique constraint violation (" + unique.name + ") Columns = ( " + stringColumnList(unique.columns) + " ) Table = ( " + table_name.toString() + " )"); } } // For each row being added } } // Check any foreign key constraints. // This ensures all foreign references in the table are referenced // to valid records. Transaction.ColumnGroupReference[] foreign_constraints = Transaction.queryTableForeignKeyReferences(transaction, table_name); for (int i = 0; i < foreign_constraints.length; ++i) { Transaction.ColumnGroupReference ref = foreign_constraints[i]; if (deferred == Transaction.INITIALLY_DEFERRED || ref.deferred == Transaction.INITIALLY_IMMEDIATE) { // For each row added to this column for (int rn = 0; rn < row_indices.length; ++rn) { // Make sure the referenced record exists // Return the count of records where the given row of // table_name(columns, ...) IN // ref_table_name(ref_columns, ...) int row_count = rowCountOfReferenceTable(transaction, row_indices[rn], ref.key_table_name, ref.key_columns, ref.ref_table_name, ref.ref_columns, false); if (row_count == -1) { // foreign key is NULL } if (row_count == 0) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION, deferredString(deferred)+" foreign key constraint violation (" + ref.name + ") Columns = " + ref.key_table_name.toString() + "( " + stringColumnList(ref.key_columns) + " ) -> " + ref.ref_table_name.toString() + "( " + stringColumnList(ref.ref_columns) + " )"); } } // For each row being added. } } // Any general checks of the inserted data Transaction.CheckExpression[] check_constraints = Transaction.queryTableCheckExpressions(transaction, table_name); // The TransactionSystem object TransactionSystem system = transaction.getSystem(); // For each check constraint, check that it evaluates to true. for (int i = 0; i < check_constraints.length; ++i) { Transaction.CheckExpression check = check_constraints[i]; if (deferred == Transaction.INITIALLY_DEFERRED || check.deferred == Transaction.INITIALLY_IMMEDIATE) { check = system.prepareTransactionCheckConstraint(table_def, check); Expression exp = check.expression; // For each row being added to this column for (int rn = 0; rn < row_indices.length; ++rn) { TableRowVariableResolver resolver = new TableRowVariableResolver(table, row_indices[rn]); TObject ob = exp.evaluate(null, resolver, context); Boolean b = ob.toBoolean(); if (b != null) { if (b.equals(Boolean.FALSE)) { // Evaluated to false so don't allow this row to be added. throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.CHECK_VIOLATION, deferredString(deferred) + " check constraint violation (" + check.name + ") - '" + exp.text() + "' evaluated to false for inserted/updated row."); } } else { // NOTE: This error will pass the row by default transaction.Debug().write(Lvl.ERROR, TableDataConglomerate.class, deferredString(deferred) + " check constraint violation (" + check.name + ") - '" + exp.text() + "' returned a non boolean or NULL result."); } } // For each row being added } } } /** * Performs constraint violation checks on an addition of the given * row index into the TableDataSource in the given transaction. If a * violation is detected a DatabaseConstraintViolationException is thrown. *

* If deferred = IMMEDIATE only immediate constraints are tested. If * deferred = DEFERRED all constraints are tested. * * @param transaction the Transaction instance used to determine table * constraints. * @param table the table to test * @param row_index the row that was added to the table. * @param deferred '1' indicates Transaction.IMMEDIATE, * '2' indicates Transaction.DEFERRED. */ static void checkAddConstraintViolations( SimpleTransaction transaction, TableDataSource table, int row_index, short deferred) { checkAddConstraintViolations(transaction, table, new int[] { row_index }, deferred); } /** * Performs constraint violation checks on a removal of the given set of * row indexes from the TableDataSource in the given transaction. If a * violation is detected a DatabaseConstraintViolationException is thrown. *

* If deferred = IMMEDIATE only immediate constraints are tested. If * deferred = DEFERRED all constraints are tested. * * @param transaction the Transaction instance used to determine table * constraints. * @param table the table to test * @param row_indices the set of rows that were removed from the table. * @param deferred '1' indicates Transaction.IMMEDIATE, * '2' indicates Transaction.DEFERRED. */ static void checkRemoveConstraintViolations( SimpleTransaction transaction, TableDataSource table, int[] row_indices, short deferred) { // Quick exit case if (row_indices == null || row_indices.length == 0) { return; } DataTableDef table_def = table.getDataTableDef(); TableName table_name = table_def.getTableName(); // Check any imported foreign key constraints. // This ensures that a referential reference can not be removed making // it invalid. Transaction.ColumnGroupReference[] foreign_constraints = Transaction.queryTableImportedForeignKeyReferences( transaction, table_name); for (int i = 0; i < foreign_constraints.length; ++i) { Transaction.ColumnGroupReference ref = foreign_constraints[i]; if (deferred == Transaction.INITIALLY_DEFERRED || ref.deferred == Transaction.INITIALLY_IMMEDIATE) { // For each row removed from this column for (int rn = 0; rn < row_indices.length; ++rn) { // Make sure the referenced record exists // Return the count of records where the given row of // ref_table_name(columns, ...) IN // table_name(ref_columns, ...) int row_count = rowCountOfReferenceTable(transaction, row_indices[rn], ref.ref_table_name, ref.ref_columns, ref.key_table_name, ref.key_columns, true); // There must be 0 references otherwise the delete isn't allowed to // happen. if (row_count > 0) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.FOREIGN_KEY_VIOLATION, deferredString(deferred)+" foreign key constraint violation " + "on delete (" + ref.name + ") Columns = " + ref.key_table_name.toString() + "( " + stringColumnList(ref.key_columns) + " ) -> " + ref.ref_table_name.toString() + "( " + stringColumnList(ref.ref_columns) + " )"); } } // For each row being added. } } } /** * Performs constraint violation checks on a removal of the given * row index from the TableDataSource in the given transaction. If a * violation is detected a DatabaseConstraintViolationException is thrown. *

* If deferred = IMMEDIATE only immediate constraints are tested. If * deferred = DEFERRED all constraints are tested. * * @param transaction the Transaction instance used to determine table * constraints. * @param table the table to test * @param row_index the row that was removed from the table. * @param deferred '1' indicates Transaction.IMMEDIATE, * '2' indicates Transaction.DEFERRED. */ static void checkRemoveConstraintViolations( SimpleTransaction transaction, TableDataSource table, int row_index, short deferred) { checkRemoveConstraintViolations(transaction, table, new int[] { row_index }, deferred); } /** * Performs constraint violation checks on all the rows in the given * table. If a violation is detected a DatabaseConstraintViolationException * is thrown. *

* This method is useful when the constraint schema of a table changes and * we need to check existing data in a table is conformant with the new * constraint changes. *

* If deferred = IMMEDIATE only immediate constraints are tested. If * deferred = DEFERRED all constraint are tested. */ static void checkAllAddConstraintViolations( SimpleTransaction transaction, TableDataSource table, short deferred) { // Get all the rows in the table int[] rows = new int[table.getRowCount()]; RowEnumeration row_enum = table.rowEnumeration(); int p = 0; while (row_enum.hasMoreRows()) { rows[p] = row_enum.nextRowIndex(); ++p; } // Check the constraints of all the rows in the table. checkAddConstraintViolations(transaction, table, rows, Transaction.INITIALLY_DEFERRED); } // ---------- Blob store and object management ---------- /** * Creates and allocates storage for a new large object in the blob store. * This is called to create a new large object before filling it with data * sent from the client. */ Ref createNewLargeObject(byte type, long size) { try { // If the conglomerate is read-only, a blob can not be created. if (isReadOnly()) { throw new RuntimeException( "A new large object can not be allocated " + "with a read-only conglomerate"); } // Allocate the large object from the store Ref ref = blob_store.allocateLargeObject(type, size); // Return the large object reference return ref; } catch (IOException e) { Debug().writeException(e); throw new RuntimeException("IO Error when creating blob: " + e.getMessage()); } } /** * Called when one or more blobs has been completed. This flushes the blob * to the blob store and completes the blob write procedure. It's important * this is called otherwise the BlobStore may not be correctly flushed to * disk with the changes and the data will not be recoverable if a crash * occurs. */ void flushBlobStore() { // NOTE: no longer necessary - please deprecate } // ---------- Conglomerate diagnosis and repair methods ---------- /** * Checks the conglomerate state file. The returned ErrorState object * contains information about any error generated. */ public void fix(String name, UserTerminal terminal) { this.name = name; try { String state_fn = (name + STATE_POST); boolean state_exists = false; try { state_exists = exists(name); } catch (IOException e) { terminal.println("IO Error when checking if state store exists: " + e.getMessage()); e.printStackTrace(); } if (!state_exists) { terminal.println("Couldn't find store: " + state_fn); return; } terminal.println("+ Found state store: " + state_fn); // Open the state store try { act_state_store = storeSystem().openStore(name + STATE_POST); state_store = new StateStore(act_state_store); // Get the 64 byte fixed area Area fixed_area = act_state_store.getArea(-1); long head_p = fixed_area.getLong(); state_store.init(head_p); terminal.println("+ Initialized the state store: " + state_fn); } catch (IOException e) { // Couldn't initialize the state file. terminal.println("Couldn't initialize the state file: " + state_fn + " Reason: " + e.getMessage()); return; } // Initialize the blob store try { initializeBlobStore(); } catch (IOException e) { terminal.println("Error intializing BlobStore: " + e.getMessage()); e.printStackTrace(); return; } // Setup internal setupInternal(); try { checkVisibleTables(terminal); // Reset the sequence id's for the system tables terminal.println("+ RESETTING ALL SYSTEM TABLE UNIQUE ID VALUES."); resetAllSystemTableID(); // Some diagnostic information StringBuffer buf = new StringBuffer(); MasterTableDataSource t; StateResource[] committed_tables = state_store.getVisibleList(); StateResource[] committed_dropped = state_store.getDeleteList(); for (int i = 0; i < committed_tables.length; ++i) { terminal.println("+ COMMITTED TABLE: " + committed_tables[i].name); } for (int i = 0; i < committed_dropped.length; ++i) { terminal.println("+ COMMIT DROPPED TABLE: " + committed_dropped[i].name); } return; } catch (IOException e) { terminal.println("IOException: " + e.getMessage()); e.printStackTrace(); } } finally { try { close(); } catch (IOException e) { terminal.println("Unable to close conglomerate after fix."); } } } // ---------- Conveniences for commit ---------- /** * A static container class for information collected about a table during * the commit cycle. */ private static class CommitTableInfo { // The master table MasterTableDataSource master; // The immutable index set IndexSet index_set; // The journal describing the changes to this table by this // transaction. MasterTableJournal journal; // A list of journals describing changes since this transaction // started. MasterTableJournal[] changes_since_commit; // Break down of changes to the table // Normalized list of row ids that were added int[] norm_added_rows; // Normalized list of row ids that were removed int[] norm_removed_rows; } /** * Returns true if the given List of 'CommitTableInfo' objects contains an * entry for the given master table. */ private static boolean commitTableListContains(List list, MasterTableDataSource master) { int sz = list.size(); for (int i = 0; i < sz; ++i) { CommitTableInfo info = (CommitTableInfo) list.get(i); if (info.master.equals(master)) { return true; } } return false; } // ---------- low level File IO level operations on a conglomerate ---------- // These operations are low level IO operations on the contents of the // conglomerate. How the rows and tables are organised is up to the // transaction managemenet. These methods deal with the low level // operations of creating/dropping tables and adding, deleting and querying // row in tables. /** * Tries to commit a transaction to the conglomerate. This is called * by the 'closeAndCommit' method in Transaction. An overview of how this * works follows: *

* * @param transaction the transaction to commit from. * @param visible_tables the list of visible tables at the end of the commit * (MasterTableDataSource) * @param selected_from_tables ths list of tables that this transaction * performed 'select' like queries on (MasterTableDataSource) * @param touched_tables the list of tables touched by the transaction * (MutableTableDataSource) * @param journal the journal that describes all the changes within the * transaction. */ void processCommit(Transaction transaction, ArrayList visible_tables, ArrayList selected_from_tables, ArrayList touched_tables, TransactionJournal journal) throws TransactionException { // Get individual journals for updates made to tables in this // transaction. // The list MasterTableJournal ArrayList journal_list = new ArrayList(); for (int i = 0; i < touched_tables.size(); ++i) { MasterTableJournal table_journal = ((MutableTableDataSource) touched_tables.get(i)).getJournal(); if (table_journal.entries() > 0) { // Check the journal has entries. journal_list.add(table_journal); } } MasterTableJournal[] changed_tables = (MasterTableJournal[]) journal_list.toArray( new MasterTableJournal[journal_list.size()]); // The list of tables created by this journal. IntegerVector created_tables = journal.getTablesCreated(); // Ths list of tables dropped by this journal. IntegerVector dropped_tables = journal.getTablesDropped(); // The list of tables that constraints were alter by this journal IntegerVector constraint_altered_tables = journal.getTablesConstraintAltered(); // Exit early if nothing changed (this is a read-only transaction) if (changed_tables.length == 0 && created_tables.size() == 0 && dropped_tables.size() == 0 && constraint_altered_tables.size() == 0) { closeTransaction(transaction); return; } // This flag is set to true when entries from the changes tables are // at a point of no return. If this is false it is safe to rollback // changes if necessary. boolean entries_committed = false; // The tables that were actually changed (MasterTableDataSource) ArrayList changed_tables_list = new ArrayList(); // Grab the commit lock. synchronized (commit_lock) { // Get the list of all database objects that were created in the // transaction. ArrayList database_objects_created = transaction.getAllNamesCreated(); // Get the list of all database objects that were dropped in the // transaction. ArrayList database_objects_dropped = transaction.getAllNamesDropped(); // This is a transaction that will represent the view of the database // at the end of the commit Transaction check_transaction = null; try { // ---- Commit check stage ---- long tran_commit_id = transaction.getCommitID(); // We only perform this check if transaction error on dirty selects // are enabled. if (transaction.transactionErrorOnDirtySelect()) { // For each table that this transaction selected from, if there are // any committed changes then generate a transaction error. for (int i = 0; i < selected_from_tables.size(); ++i) { MasterTableDataSource selected_table = (MasterTableDataSource) selected_from_tables.get(i); // Find all committed journals equal to or greater than this // transaction's commit_id. MasterTableJournal[] journals_since = selected_table.findAllJournalsSince(tran_commit_id); if (journals_since.length > 0) { // Yes, there are changes so generate transaction error and // rollback. throw new TransactionException( TransactionException.DIRTY_TABLE_SELECT, "Concurrent Serializable Transaction Conflict(4): " + "Select from table that has committed changes: " + selected_table.getName()); } } } // Check there isn't a namespace clash with database objects. // We need to create a list of all create and drop activity in the // conglomerate from when the transaction started. ArrayList all_dropped_obs = new ArrayList(); ArrayList all_created_obs = new ArrayList(); int nsj_sz = namespace_journal_list.size(); for (int i = 0; i < nsj_sz; ++i) { NameSpaceJournal ns_journal = (NameSpaceJournal) namespace_journal_list.get(i); if (ns_journal.commit_id >= tran_commit_id) { all_dropped_obs.addAll(ns_journal.dropped_names); all_created_obs.addAll(ns_journal.created_names); } } // The list of all dropped objects since this transaction // began. int ado_sz = all_dropped_obs.size(); boolean conflict5 = false; Object conflict_name = null; String conflict_desc = ""; for (int n = 0; n < ado_sz; ++n) { if (database_objects_dropped.contains(all_dropped_obs.get(n))) { conflict5 = true; conflict_name = all_dropped_obs.get(n); conflict_desc = "Drop Clash"; } } // The list of all created objects since this transaction // began. int aco_sz = all_created_obs.size(); for (int n = 0; n < aco_sz; ++n) { if (database_objects_created.contains(all_created_obs.get(n))) { conflict5 = true; conflict_name = all_created_obs.get(n); conflict_desc = "Create Clash"; } } if (conflict5) { // Namespace conflict... throw new TransactionException( TransactionException.DUPLICATE_TABLE, "Concurrent Serializable Transaction Conflict(5): " + "Namespace conflict: " + conflict_name.toString() + " " + conflict_desc); } // For each journal, for (int i = 0; i < changed_tables.length; ++i) { MasterTableJournal change_journal = changed_tables[i]; // The table the change was made to. int table_id = change_journal.getTableID(); // Get the master table with this table id. MasterTableDataSource master = getMasterTable(table_id); // True if the state contains a committed resource with the given name boolean committed_resource = state_store.containsVisibleResource(table_id); // Check this table is still in the committed tables list. if (!created_tables.contains(table_id) && !committed_resource) { // This table is no longer a committed table, so rollback throw new TransactionException( TransactionException.TABLE_DROPPED, "Concurrent Serializable Transaction Conflict(2): " + "Table altered/dropped: " + master.getName()); } // Since this journal was created, check to see if any changes to the // tables have been committed since. // This will return all journals on the table with the same commit_id // or greater. MasterTableJournal[] journals_since = master.findAllJournalsSince(tran_commit_id); // For each journal, determine if there's any clashes. for (int n = 0; n < journals_since.length; ++n) { // This will thrown an exception if a commit classes. change_journal.testCommitClash(master.getDataTableDef(), journals_since[n]); } } // Look at the transaction journal, if a table is dropped that has // journal entries since the last commit then we have an exception // case. for (int i = 0; i < dropped_tables.size(); ++i) { int table_id = dropped_tables.intAt(i); // Get the master table with this table id. MasterTableDataSource master = getMasterTable(table_id); // Any journal entries made to this dropped table? if (master.findAllJournalsSince(tran_commit_id).length > 0) { // Oops, yes, rollback! throw new TransactionException( TransactionException.TABLE_REMOVE_CLASH, "Concurrent Serializable Transaction Conflict(3): " + "Dropped table has modifications: " + master.getName()); } } // Tests passed so go on to commit, // ---- Commit stage ---- // Create a normalized list of MasterTableDataSource of all tables that // were either changed (and not dropped), and created (and not dropped). // This list represents all tables that are either new or changed in // this transaction. final int created_tables_count = created_tables.size(); final int changed_tables_count = changed_tables.length; final ArrayList normalized_changed_tables = new ArrayList(8); // Add all tables that were changed and not dropped in this transaction. for (int i = 0; i < changed_tables_count; ++i) { MasterTableJournal table_journal = changed_tables[i]; // The table the changes were made to. int table_id = table_journal.getTableID(); // If this table is not dropped in this transaction and is not // already in the normalized list then add it. if (!dropped_tables.contains(table_id)) { MasterTableDataSource master_table = getMasterTable(table_id); CommitTableInfo table_info = new CommitTableInfo(); table_info.master = master_table; table_info.journal = table_journal; table_info.changes_since_commit = master_table.findAllJournalsSince(tran_commit_id); normalized_changed_tables.add(table_info); } } // Add all tables that were created and not dropped in this transaction. for (int i = 0; i < created_tables_count; ++i) { int table_id = created_tables.intAt(i); // If this table is not dropped in this transaction then this is a // new table in this transaction. if (!dropped_tables.contains(table_id)) { MasterTableDataSource master_table = getMasterTable(table_id); if (!commitTableListContains(normalized_changed_tables, master_table)) { // This is for entries that are created but modified (no journal). CommitTableInfo table_info = new CommitTableInfo(); table_info.master = master_table; normalized_changed_tables.add(table_info); } } } // The final size of the normalized changed tables list final int norm_changed_tables_count = normalized_changed_tables.size(); // Create a normalized list of MasterTableDataSource of all tables that // were dropped (and not created) in this transaction. This list // represents tables that will be dropped if the transaction // successfully commits. final int dropped_tables_count = dropped_tables.size(); final ArrayList normalized_dropped_tables = new ArrayList(8); for (int i = 0; i < dropped_tables_count; ++i) { // The dropped table int table_id = dropped_tables.intAt(i); // Was this dropped table also created? If it was created in this // transaction then we don't care about it. if (!created_tables.contains(table_id)) { MasterTableDataSource master_table = getMasterTable(table_id); normalized_dropped_tables.add(master_table); } } // We now need to create a SimpleTransaction object that we // use to send to the triggering mechanism. This // SimpleTransaction represents a very specific view of the // transaction. This view contains the latest version of changed // tables in this transaction. It also contains any tables that have // been created by this transaction and does not contain any tables // that have been dropped. Any tables that have not been touched by // this transaction are shown in their current committed state. // To summarize - this view is the current view of the database plus // any modifications made by the transaction that is being committed. // How this works - All changed tables are merged with the current // committed table. All created tables are added into check_transaction // and all dropped tables are removed from check_transaction. If // there were no other changes to a table between the time the // transaction was created and now, the view of the table in the // transaction is used, otherwise the latest changes are merged. // Note that this view will be the view that the database will // ultimately become if this transaction successfully commits. Also, // you should appreciate that this view is NOT exactly the same as // the current trasaction view because any changes that have been // committed by concurrent transactions will be reflected in this view. // Create a new transaction of the database which will represent the // committed view if this commit is successful. check_transaction = createTransaction(); // Overwrite this view with tables from this transaction that have // changed or have been added or dropped. // (Note that order here is important). First drop any tables from // this view. for (int i = 0; i < normalized_dropped_tables.size(); ++i) { // Get the table MasterTableDataSource master_table = (MasterTableDataSource) normalized_dropped_tables.get(i); // Drop this table in the current view check_transaction.removeVisibleTable(master_table); } // Now add any changed tables to the view. // Represents view of the changed tables TableDataSource[] changed_table_source = new TableDataSource[norm_changed_tables_count]; // Set up the above arrays for (int i = 0; i < norm_changed_tables_count; ++i) { // Get the information for this changed table CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables.get(i); // Get the master table that changed from the normalized list. MasterTableDataSource master = table_info.master; // Did this table change since the transaction started? MasterTableJournal[] all_table_changes = table_info.changes_since_commit; if (all_table_changes == null || all_table_changes.length == 0) { // No changes so we can pick the correct IndexSet from the current // transaction. // Get the state of the changed tables from the Transaction MutableTableDataSource mtable = transaction.getTable(master.getTableName()); // Get the current index set of the changed table table_info.index_set = transaction.getIndexSetForTable(master); // Flush all index changes in the table mtable.flushIndexChanges(); // Set the 'check_transaction' object with the latest version of the // table. check_transaction.updateVisibleTable(table_info.master, table_info.index_set); } else { // There were changes so we need to merge the changes with the // current view of the table. // It's not immediately obvious how this merge update works, but // basically what happens is we put the table journal with all the // changes into a new MutableTableDataSource of the current // committed state, and then we flush all the changes into the // index and then update the 'check_transaction' with this change. // Create the MutableTableDataSource with the changes from this // journal. MutableTableDataSource mtable = master.createTableDataSourceAtCommit(check_transaction, table_info.journal); // Get the current index set of the changed table table_info.index_set = check_transaction.getIndexSetForTable(master); // Flush all index changes in the table mtable.flushIndexChanges(); // Dispose the table mtable.dispose(); } // And now refresh the 'changed_table_source' entry changed_table_source[i] = check_transaction.getTable(master.getTableName()); } // The 'check_transaction' now represents the view the database will be // if the commit succeeds. We lock 'check_transaction' so it is // read-only (the view is immutable). check_transaction.setReadOnly(); // Any tables that the constraints were altered for we need to check // if any rows in the table violate the new constraints. for (int i = 0; i < constraint_altered_tables.size(); ++i) { // We need to check there are no constraint violations for all the // rows in the table. int table_id = constraint_altered_tables.intAt(i); for (int n = 0; n < norm_changed_tables_count; ++n) { CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables.get(n); if (table_info.master.getTableID() == table_id) { checkAllAddConstraintViolations(check_transaction, changed_table_source[n], Transaction.INITIALLY_DEFERRED); } } } // For each changed table we must determine the rows that // were deleted and perform the remove constraint checks on the // deleted rows. Note that this happens after the records are // removed from the index. // For each changed table, for (int i = 0; i < norm_changed_tables_count; ++i) { CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables.get(i); // Get the journal that details the change to the table. MasterTableJournal change_journal = table_info.journal; if (change_journal != null) { // Find the normalized deleted rows. int[] normalized_removed_rows = change_journal.normalizedRemovedRows(); // Check removing any of the data doesn't cause a constraint // violation. checkRemoveConstraintViolations(check_transaction, changed_table_source[i], normalized_removed_rows, Transaction.INITIALLY_DEFERRED); // Find the normalized added rows. int[] normalized_added_rows = change_journal.normalizedAddedRows(); // Check adding any of the data doesn't cause a constraint // violation. checkAddConstraintViolations(check_transaction, changed_table_source[i], normalized_added_rows, Transaction.INITIALLY_DEFERRED); // Set up the list of added and removed rows table_info.norm_added_rows = normalized_added_rows; table_info.norm_removed_rows = normalized_removed_rows; } } // Deferred trigger events. // For each changed table. n_loop: for (int i = 0; i < norm_changed_tables_count; ++i) { CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables.get(i); // Get the journal that details the change to the table. MasterTableJournal change_journal = table_info.journal; if (change_journal != null) { // Get the table name TableName table_name = table_info.master.getTableName(); // The list of listeners to dispatch this event to TransactionModificationListener[] listeners; // Are there any listeners listening for events on this table? synchronized (modification_listeners) { ArrayList list = (ArrayList) modification_listeners.get(table_name); if (list == null || list.size() == 0) { // If no listeners on this table, continue to the next // table that was changed. continue n_loop; } // Generate the list of listeners, listeners = (TransactionModificationListener[]) list.toArray( new TransactionModificationListener[list.size()]); } // Generate the event TableCommitModificationEvent event = new TableCommitModificationEvent( check_transaction, table_name, table_info.norm_added_rows, table_info.norm_removed_rows); // Fire this event on the listeners for (int n = 0; n < listeners.length; ++n) { listeners[n].tableCommitChange(event); } } // if (change_journal != null) } // for each changed table // NOTE: This isn't as fail safe as it could be. We really need to // do the commit in two phases. The first writes updated indices to // the index files. The second updates the header pointer for the // respective table. Perhaps we can make the header update // procedure just one file write. // Finally, at this point all constraint checks have passed and the // changes are ready to finally be committed as permanent changes // to the conglomerate. All that needs to be done is to commit our // IndexSet indices for each changed table as final. // ISSUE: Should we separate the 'committing of indexes' changes and // 'committing of delete/add flags' to make the FS more robust? // It would be more robust if all indexes are committed in one go, // then all table flag data. // Set flag to indicate we have committed entries. entries_committed = true; // For each change to each table, for (int i = 0; i < norm_changed_tables_count; ++i) { CommitTableInfo table_info = (CommitTableInfo) normalized_changed_tables.get(i); // Get the journal that details the change to the table. MasterTableJournal change_journal = table_info.journal; if (change_journal != null) { // Get the master table with this table id. MasterTableDataSource master = table_info.master; // Commit the changes to the table. // We use 'this.commit_id' which is the current commit level we are // at. master.commitTransactionChange(this.commit_id, change_journal, table_info.index_set); // Add to 'changed_tables_list' changed_tables_list.add(master); } } // Only do this if we've created or dropped tables. if (created_tables.size() > 0 || dropped_tables.size() > 0) { // Update the committed tables in the conglomerate state. // This will update and synchronize the headers in this conglomerate. commitToTables(created_tables, dropped_tables); } // Update the namespace clash list if (database_objects_created.size() > 0 || database_objects_dropped.size() > 0) { NameSpaceJournal namespace_journal = new NameSpaceJournal(tran_commit_id, database_objects_created, database_objects_dropped); namespace_journal_list.add(namespace_journal); } } finally { try { // If entries_committed == false it means we didn't get to a point // where any changed tables were committed. Attempt to rollback the // changes in this transaction if they haven't been committed yet. if (entries_committed == false) { // For each change to each table, for (int i = 0; i < changed_tables.length; ++i) { // Get the journal that details the change to the table. MasterTableJournal change_journal = changed_tables[i]; // The table the changes were made to. int table_id = change_journal.getTableID(); // Get the master table with this table id. MasterTableDataSource master = getMasterTable(table_id); // Commit the rollback on the table. master.rollbackTransactionChange(change_journal); } if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Rolled back transaction changes in a commit."); } } } finally { try { // Dispose the 'check_transaction' if (check_transaction != null) { check_transaction.dispose(); closeTransaction(check_transaction); } // Always ensure a transaction close, even if we have an exception. // Notify the conglomerate that this transaction has closed. closeTransaction(transaction); } catch (Throwable e) { Debug().writeException(e); } } } // Flush the journals up to the minimum commit id for all the tables // that this transaction changed. long min_commit_id = open_transactions.minimumCommitID(null); int chsz = changed_tables_list.size(); for (int i = 0; i < chsz; ++i) { MasterTableDataSource master = (MasterTableDataSource) changed_tables_list.get(i); master.mergeJournalChanges(min_commit_id); } int nsjsz = namespace_journal_list.size(); for (int i = nsjsz - 1; i >= 0; --i) { NameSpaceJournal namespace_journal = (NameSpaceJournal) namespace_journal_list.get(i); // Remove if the commit id for the journal is less than the minimum // commit id if (namespace_journal.commit_id < min_commit_id) { namespace_journal_list.remove(i); } } // Set a check point in the store system. This means that the // persistance state is now stable. store_system.setCheckPoint(); } // synchronized (commit_lock) } /** * Rollbacks a transaction and invalidates any changes that the transaction * made to the database. The rows that this transaction changed are given * up as freely available rows. This is called by the 'closeAndRollback' * method in Transaction. */ void processRollback(Transaction transaction, ArrayList touched_tables, TransactionJournal journal) { // Go through the journal. Any rows added should be marked as deleted // in the respective master table. // Get individual journals for updates made to tables in this // transaction. // The list MasterTableJournal ArrayList journal_list = new ArrayList(); for (int i = 0; i < touched_tables.size(); ++i) { MasterTableJournal table_journal = ((MutableTableDataSource) touched_tables.get(i)).getJournal(); if (table_journal.entries() > 0) { // Check the journal has entries. journal_list.add(table_journal); } } MasterTableJournal[] changed_tables = (MasterTableJournal[]) journal_list.toArray( new MasterTableJournal[journal_list.size()]); // The list of tables created by this journal. IntegerVector created_tables = journal.getTablesCreated(); synchronized (commit_lock) { try { // For each change to each table, for (int i = 0; i < changed_tables.length; ++i) { // Get the journal that details the change to the table. MasterTableJournal change_journal = changed_tables[i]; // The table the changes were made to. int table_id = change_journal.getTableID(); // Get the master table with this table id. MasterTableDataSource master = getMasterTable(table_id); // Commit the rollback on the table. master.rollbackTransactionChange(change_journal); } } finally { // Notify the conglomerate that this transaction has closed. closeTransaction(transaction); } } } // ----- /** * Sets the given List of MasterTableDataSource objects to the currently * committed list of tables in this conglomerate. This will make the change * permanent by updating the state file also. *

* This should be called as part of a transaction commit. */ private void commitToTables( IntegerVector created_tables, IntegerVector dropped_tables) { // Add created tables to the committed tables list. for (int i = 0; i < created_tables.size(); ++i) { // For all created tables, add to the visible list and remove from the // delete list in the state store. MasterTableDataSource t = getMasterTable(created_tables.intAt(i)); StateResource resource = new StateResource(t.getTableID(), createEncodedTableFile(t)); state_store.addVisibleResource(resource); state_store.removeDeleteResource(resource.name); } // Remove dropped tables from the committed tables list. for (int i = 0; i < dropped_tables.size(); ++i) { // For all dropped tables, add to the delete list and remove from the // visible list in the state store. MasterTableDataSource t = getMasterTable(dropped_tables.intAt(i)); StateResource resource = new StateResource(t.getTableID(), createEncodedTableFile(t)); state_store.addDeleteResource(resource); state_store.removeVisibleResource(resource.name); } try { state_store.commit(); } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } } /** * Returns the MasterTableDataSource in this conglomerate with the given * table id. */ MasterTableDataSource getMasterTable(int table_id) { synchronized (commit_lock) { // Find the table with this table id. for (int i = 0; i < table_list.size(); ++i) { MasterTableDataSource t = (MasterTableDataSource) table_list.get(i); if (t.getTableID() == table_id) { return t; } } throw new Error("Unable to find an open table with id: " + table_id); } } /** * Creates a table store in this conglomerate with the given name and returns * a reference to the table. Note that this table is not a commited change * to the system. It is a free standing blank table store. The table * returned here is uncommitted and will be deleted unless it is committed. *

* Note that two tables may exist within a conglomerate with the same name, * however each committed table must have a unique name. *

* @param table_def the table definition. * @param data_sector_size the size of the data sectors (affects performance * and size of the file). * @param index_sector_size the size of the index sectors. */ MasterTableDataSource createMasterTable(DataTableDef table_def, int data_sector_size, int index_sector_size) { synchronized (commit_lock) { try { // EFFICIENCY: Currently this writes to the conglomerate state file // twice. Once in 'nextUniqueTableID' and once in // 'state_store.commit'. // The unique id that identifies this table, int table_id = nextUniqueTableID(); // Create the object. V2MasterTableDataSource master_table = new V2MasterTableDataSource(getSystem(), storeSystem(), open_transactions, blob_store); master_table.create(table_id, table_def); // Add to the list of all tables. table_list.add(master_table); // Add this to the list of deleted tables, // (This should really be renamed to uncommitted tables). markAsCommittedDropped(table_id); // Commit this state_store.commit(); // And return it. return master_table; } catch (IOException e) { Debug().writeException(e); throw new Error("Unable to create master table '" + table_def.getName() + "' - " + e.getMessage()); } } } /** * Creates a table store in this conglomerate that is an exact copy of the * given MasterTableDataSource. Note that this table is not a commited change * to the system. It is a free standing blank table store. The table * returned here is uncommitted and will be deleted unless it is committed. *

* Note that two tables may exist within a conglomerate with the same name, * however each committed table must have a unique name. *

* @param src_master_table the source master table to copy. * @param index_set the view of the table index to copy. * @return the MasterTableDataSource with the copied information. */ MasterTableDataSource copyMasterTable( MasterTableDataSource src_master_table, IndexSet index_set) { synchronized (commit_lock) { try { // EFFICIENCY: Currently this writes to the conglomerate state file // twice. Once in 'nextUniqueTableID' and once in // 'state_store.commit'. // The unique id that identifies this table, int table_id = nextUniqueTableID(); // Create the object. V2MasterTableDataSource master_table = new V2MasterTableDataSource(getSystem(), storeSystem(), open_transactions, blob_store); master_table.copy(table_id, src_master_table, index_set); // Add to the list of all tables. table_list.add(master_table); // Add this to the list of deleted tables, // (This should really be renamed to uncommitted tables). markAsCommittedDropped(table_id); // Commit this state_store.commit(); // And return it. return master_table; } catch (IOException e) { Debug().writeException(e); throw new RuntimeException("Unable to copy master table '" + src_master_table.getDataTableDef().getName() + "' - " + e.getMessage()); } } } // ---------- Inner classes ---------- /** * A journal for handling namespace clashes between transactions. For * example, we would need to generate a conflict if two concurrent * transactions were to drop the same table, or if a procedure and a * table with the same name were generated in concurrent transactions. */ private static class NameSpaceJournal { /** * The commit_id of this journal entry. */ long commit_id; /** * The list of names created in this journal. */ ArrayList created_names; /** * The list of names dropped in this journal. */ ArrayList dropped_names; /** * Constructs the journal. */ NameSpaceJournal(long commit_id, ArrayList created_names, ArrayList dropped_names) { this.commit_id = commit_id; this.created_names = created_names; this.dropped_names = dropped_names; } } // // ---------- Shutdown hook ---------- // // /** // * This is a thread that is started when the shutdown hook for this // * conglomerate is executed. It goes through each table in the conglomerate // * and attempts to lock the 'writeLockedObject' for each table. When all the // * objects are locked it goes into a wait state. // */ // private class ConglomerateShutdownHookThread extends Thread { // private boolean complete = false; // // ConglomerateShutdownHookThread() { // setName("Mckoi - JVM Shutdown Hook"); // } // // public synchronized void run() { // // Synchronize over the commit_lock object // synchronized (commit_lock) { // if (table_list != null) { //// System.out.println("Cleanup on: " + TableDataConglomerate.this); // for (int i = 0; i < table_list.size(); ++i) { // MasterTableDataSource master = // (MasterTableDataSource) table_list.get(i); //// System.out.println("CLEANUP: " + master); // master.shutdownHookCleanup(); // } // } // } // complete = true; // notifyAll(); // } // public synchronized void waitUntilComplete() { // try { // while (!complete) { // wait(); // } // } // catch (InterruptedException e) { /* ignore */ } // } // } public void finalize() { // removeShutdownHook(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableDataSource.java000066400000000000000000000062531330501023400255700ustar00rootroot00000000000000/** * com.mckoi.database.TableDataSource 17 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * This interface represents the source of data in a table. This is an * abstraction that is used to read data from within a table. *

* The entire contents of a table can be completely represented by * implementations of this interface. * * @author Tobias Downer */ public interface TableDataSource { /** * Returns the TransactionSystem object that describes global properties * about the data source that generated this object. */ TransactionSystem getSystem(); /** * Returns a DataTableDef object that defines the layout of the table that * this data is in. *

* This may return 'null' if there is no table definition. */ DataTableDef getDataTableDef(); /** * Returns the number of rows in this data source. *

* NOTE: Returns 'n' - getCellContents(column, row) is not necessarily valid * for row = [0..n]. Use 'rowEnumerator' to generate an iterator for valid * row values over this data source. */ int getRowCount(); /** * Returns an iterator that is used to sequentually step through all valid * rows in this source. The iterator is guarenteed to return exactly * 'getRowCount' elements. The row elements returned by this iterator * are used in 'getCellContents' in the 'row' parameter. *

* Note that this object is only defined if entries in the table are not * added/remove during the lifetime of this iterator. If entries are added * or removed from the table while this iterator is open, then calls to * 'nextRowIndex' will be undefined. */ RowEnumeration rowEnumeration(); /** * Returns the SelectableScheme that we use as an index for rows in the * given column of this source. The SelectableScheme is used to determine * the relationship between cells in a column. *

* ISSUE: The scheme returned here should not have the 'insert' or 'remove' * methods called (ie. it should be considered immutable). Perhaps we * should make a MutableSelectableScheme interface to guarentee this * constraint. */ SelectableScheme getColumnScheme(int column); /** * Returns an object that represents the information in the given cell * in the table. This may be an expensive operation, so calls to it * should be kept to a minimum. Note that the offset between two * rows is not necessarily 1. Use 'rowEnumeration' to create a row iterator. */ TObject getCellContents(int column, int row); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableDescriptions.java000066400000000000000000000120051330501023400261740ustar00rootroot00000000000000/** * com.mckoi.database.TableDescriptions 28 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import java.util.*; /** * An object that is a key part of Database. This object maintains a list * of descriptions of all tables in the database. The list contains * information about the columns in the table and any other misc table options. * * @author Tobias Downer */ public final class TableDescriptions { /** * The filename of the file that describes every table in the database. */ private static final String TABLE_DESC_FILE = "MckoiDB.desc"; /** * The File that contains the table descriptions list. */ private File table_desc_file; /** * The File we use to temporary store the table descriptions as we save * them. */ private File temp_desc_file; /** * The backup file for table descriptions. */ private File backup_file; /** * A hash table that maps from table name to the DataTableDef object that * describes the table. */ private HashMap table_descriptions; /** * Constructs this object with the database in the given directory. */ public TableDescriptions(File database_path) { table_desc_file = new File(database_path, TABLE_DESC_FILE); temp_desc_file = new File(database_path, TABLE_DESC_FILE + ".temp"); backup_file = new File(database_path, TABLE_DESC_FILE + ".bak"); clear(); } /** * Returns true if the table descriptions file exists. */ public boolean exists() { return table_desc_file.exists() && !table_desc_file.isDirectory(); } /** * Load the entire list of table descriptions for this database. */ public void load() throws IOException { // Does the table description file exist? if (table_desc_file.exists()) { // The file exists so load up the table descriptions and put each table // in the table_descriptions map. DataInputStream din = new DataInputStream( new BufferedInputStream(new FileInputStream(table_desc_file))); int ver = din.readInt(); int table_count = din.readInt(); for (int i = 0; i < table_count; ++i) { DataTableDef table_desc = DataTableDef.read(din); String name = table_desc.getName(); table_descriptions.put(name, table_desc); } din.close(); } } /** * Updates the table description file in the database. The table description * file describes every table in the database. It is loaded when the * database is initialized and refreshed whenever a table alteration occurs * or the database is shut down. */ public void save() throws IOException { DataOutputStream dout = new DataOutputStream( new BufferedOutputStream(new FileOutputStream(temp_desc_file))); dout.writeInt(1); String[] table_list = getTableList(); dout.writeInt(table_list.length); for (int i = 0; i < table_list.length; ++i) { // Write the DataTableDef for this table ((DataTableDef) table_descriptions.get(table_list[i])).write(dout); } dout.flush(); dout.close(); // Delete the current backup file and rename the temp file to the official // file. // Cycle through the backups... backup_file.delete(); table_desc_file.renameTo(backup_file); temp_desc_file.renameTo(table_desc_file); } /** * Adds a new DataTableDef object to the list of tables in the database. */ void add(DataTableDef table) throws IOException { table_descriptions.put(table.getName(), table); } /** * Removes a DataTableDef object from the list with the given name. */ void remove(String name) throws IOException { table_descriptions.remove(name); } /** * Returns a list of table name's sorted in alphebetical order. */ public String[] getTableList() { Set keys = table_descriptions.keySet(); String[] all_keys = (String[]) keys.toArray(new String[keys.size()]); Arrays.sort(all_keys); return all_keys; } /** * Clears this object completely. */ void clear() { table_descriptions = new HashMap(150, 0.50f); } /** * Returns the DataTableDef object for the table with the given name. The * description must have been loaded before this method is called. Returns * null if the table was not found. */ public DataTableDef getDef(String table_name) { return (DataTableDef) table_descriptions.get(table_name); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableFunctions.java000066400000000000000000000201101330501023400254720ustar00rootroot00000000000000/** * com.mckoi.database.TableFunctions 08 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; /** * A number of functions that are table set functions such as simple select * operations, joins, unions, sub-query operations, etc. * * @author Tobias Downer */ public class TableFunctions { /** * The function for a non-correlated ANY or ALL sub-query operation between a * left and right branch. An example of an SQL query that generates such a * query is; *

   *   Table.col > ANY ( SELECT .... )
   * 

* This function only works non-correlated sub-queries. *

* A non-correlated sub-query, or a correlated sub-query where the correlated * variables are references to a parent plan branch, the plan only need be * evaluated once and optimizations on the query present themselves. */ static Table anyAllNonCorrelated(Table left_table, Variable[] left_vars, Operator op, Table right_table) { // Check the right table and the correct number of columns, if (right_table.getColumnCount() != left_vars.length) { throw new RuntimeException( "Input table <> " + left_vars.length + " columns."); } // Handle trivial case of no entries to select from if (left_table.getRowCount() == 0) { return left_table; } // Resolve the vars in the left table and check the references are // compatible. int sz = left_vars.length; int[] left_col_map = new int[sz]; int[] right_col_map = new int[sz]; for (int i = 0; i < sz; ++i) { left_col_map[i] = left_table.findFieldName(left_vars[i]); right_col_map[i] = i; // System.out.println("Finding: " + left_vars[i]); // System.out.println("left_col_map: " + left_col_map[i]); // System.out.println("right_col_map: " + right_col_map[i]); if (left_col_map[i] == -1) { throw new RuntimeException("Invalid reference: " + left_vars[i]); } DataTableColumnDef left_type = left_table.getColumnDefAt(left_col_map[i]); DataTableColumnDef right_type = right_table.getColumnDefAt(i); if (!left_type.getTType().comparableTypes(right_type.getTType())) { throw new Error( "The type of the sub-query expression " + left_vars[i] + "(" + left_type.getSQLTypeString() + ") is incompatible with " + "the sub-query type " + right_type.getSQLTypeString() + "."); } } // We now have all the information to solve this query. IntegerVector select_vec; if (op.isSubQueryForm(Operator.ALL)) { // ----- ALL operation ----- // We work out as follows: // For >, >= type ALL we find the highest value in 'table' and // select from 'source' all the rows that are >, >= than the // highest value. // For <, <= type ALL we find the lowest value in 'table' and // select from 'source' all the rows that are <, <= than the // lowest value. // For = type ALL we see if 'table' contains a single value. If it // does we select all from 'source' that equals the value, otherwise an // empty table. // For <> type ALL we use the 'not in' algorithm. if (op.is(">") || op.is(">=")) { // Select the last from the set (the highest value), TObject[] highest_cells = right_table.getLastCellContent(right_col_map); // Select from the source table all rows that are > or >= to the // highest cell, select_vec = left_table.selectRows(left_col_map, op, highest_cells); } else if (op.is("<") || op.is("<=")) { // Select the first from the set (the lowest value), TObject[] lowest_cells = right_table.getFirstCellContent(right_col_map); // Select from the source table all rows that are < or <= to the // lowest cell, select_vec = left_table.selectRows(left_col_map, op, lowest_cells); } else if (op.is("=")) { // Select the single value from the set (if there is one). TObject[] single_cell = right_table.getSingleCellContent(right_col_map); if (single_cell != null) { // Select all from source_table all values that = this cell select_vec = left_table.selectRows(left_col_map, op, single_cell); } else { // No single value so return empty set (no value in LHS will equal // a value in RHS). return left_table.emptySelect(); } } else if (op.is("<>")) { // Equiv. to NOT IN select_vec = INHelper.notIn(left_table, right_table, left_col_map, right_col_map); } else { throw new RuntimeException( "Don't understand operator '" + op + "' in ALL."); } } else if (op.isSubQueryForm(Operator.ANY)) { // ----- ANY operation ----- // We work out as follows: // For >, >= type ANY we find the lowest value in 'table' and // select from 'source' all the rows that are >, >= than the // lowest value. // For <, <= type ANY we find the highest value in 'table' and // select from 'source' all the rows that are <, <= than the // highest value. // For = type ANY we use same method from INHelper. // For <> type ANY we iterate through 'source' only including those // rows that a <> query on 'table' returns size() != 0. if (op.is(">") || op.is(">=")) { // Select the first from the set (the lowest value), TObject[] lowest_cells = right_table.getFirstCellContent(right_col_map); // Select from the source table all rows that are > or >= to the // lowest cell, select_vec = left_table.selectRows(left_col_map, op, lowest_cells); } else if (op.is("<") || op.is("<=")) { // Select the last from the set (the highest value), TObject[] highest_cells = right_table.getLastCellContent(right_col_map); // Select from the source table all rows that are < or <= to the // highest cell, select_vec = left_table.selectRows(left_col_map, op, highest_cells); } else if (op.is("=")) { // Equiv. to IN select_vec = INHelper.in(left_table, right_table, left_col_map, right_col_map); } else if (op.is("<>")) { // Select the value that is the same of the entire column TObject[] cells = right_table.getSingleCellContent(right_col_map); if (cells != null) { // All values from 'source_table' that are <> than the given cell. select_vec = left_table.selectRows(left_col_map, op, cells); } else { // No, this means there are different values in the given set so the // query evaluates to the entire table. return left_table; } } else { throw new RuntimeException( "Don't understand operator '" + op + "' in ANY."); } } else { throw new RuntimeException("Unrecognised sub-query operator."); } // Make into a table to return. VirtualTable rtable = new VirtualTable(left_table); rtable.set(left_table, select_vec); return rtable; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableModificationEvent.java000066400000000000000000000146341330501023400271470ustar00rootroot00000000000000/** * com.mckoi.database.TableModificationEvent 07 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * The event information of when a table is modified inside a transaction. * * @author Tobias Downer */ public class TableModificationEvent { // ----- Statics ----- /** * Event that occurs before the action */ public static final int BEFORE = 0x010; /** * Event that occurs after the action */ public static final int AFTER = 0x020; // --- /** * Event type for insert action. */ public static final int INSERT = 0x001; /** * Event type for update action. */ public static final int UPDATE = 0x002; /** * Event type for delete action. */ public static final int DELETE = 0x004; // --- /** * Event for before an insert. */ public static final int BEFORE_INSERT = BEFORE | INSERT; /** * Event for after an insert. */ public static final int AFTER_INSERT = AFTER | INSERT; /** * Event for before an update. */ public static final int BEFORE_UPDATE = BEFORE | UPDATE; /** * Event for after an update. */ public static final int AFTER_UPDATE = AFTER | UPDATE; /** * Event for before a delete. */ public static final int BEFORE_DELETE = BEFORE | DELETE; /** * Event for after a delete. */ public static final int AFTER_DELETE = AFTER | DELETE; // ----- Members ----- /** * The DatabaseConnection of the table that the modification occurred in. */ private DatabaseConnection connection; /** * The name of the table that was modified. */ private TableName table_name; /** * The type of event that occurred. */ private int event_type; /** * A RowData object representing the row that is being inserted by this * modification. This is set for INSERT and UPDATE events. If the event * type is BEFORE then this data represents the new data in the table and * can be modified. This represents the NEW information. */ private RowData row_data; /** * The row index of the table that is before removed by this modification. * This is set for UPDATE and DELETE events. This represents the OLD * information. */ private int row_index = -1; /** * General Constructor. */ private TableModificationEvent(DatabaseConnection connection, TableName table_name, int row_index, RowData row_data, int type, boolean before) { this.connection = connection; this.table_name = table_name; this.row_index = row_index; this.row_data = row_data; this.event_type = type | (before ? BEFORE : AFTER); } /** * Constructs an insert event. */ TableModificationEvent(DatabaseConnection connection, TableName table_name, RowData row_data, boolean before) { this(connection, table_name, -1, row_data, INSERT, before); } /** * Constructs an update event. */ TableModificationEvent(DatabaseConnection connection, TableName table_name, int row_index, RowData row_data, boolean before) { this(connection, table_name, row_index, row_data, UPDATE, before); } /** * Constructs a delete event. */ TableModificationEvent(DatabaseConnection connection, TableName table_name, int row_index, boolean before) { this(connection, table_name, row_index, null, DELETE, before); } /** * Returns the DatabaseConnection that this event fired in. */ public DatabaseConnection getDatabaseConnection() { return connection; } /** * Returns the event type. */ public int getType() { return event_type; } /** * Returns true if this is a BEFORE event. */ public boolean isBefore() { return (event_type & BEFORE) != 0; } /** * Returns true if this is a AFTER event. */ public boolean isAfter() { return (event_type & AFTER) != 0; } /** * Returns true if this is an INSERT event. */ public boolean isInsert() { return (event_type & INSERT) != 0; } /** * Returns true if this is an UPDATE event. */ public boolean isUpdate() { return (event_type & UPDATE) != 0; } /** * Returns true if this is an DELETE event. */ public boolean isDelete() { return (event_type & DELETE) != 0; } /** * Returns the name of the table of this modification. */ public TableName getTableName() { return table_name; } /** * Returns the index of the row in the table that was affected by this * event or -1 if event type is INSERT. */ public int getRowIndex() { return row_index; } /** * Returns the RowData object that represents the change that is being * made to the table either by an INSERT or UPDATE. For a DELETE event this * return null. */ public RowData getRowData() { return row_data; } /** * Returns true if the given listener type should be notified of this type * of table modification event. For example, if this is a BEFORE event then * the BEFORE bit on the given type must be set and if this is an INSERT event * then the INSERT bit on the given type must be set. */ public boolean listenedBy(int listen_t) { // If this is a BEFORE trigger, then we must be listening for BEFORE events, // etc. boolean ba_match = ( (event_type & BEFORE) != 0 && (listen_t & BEFORE) != 0 ) || ( (event_type & AFTER) != 0 && (listen_t & AFTER) != 0 ); // If this is an INSERT trigger, then we must be listening for INSERT // events, etc. boolean trig_match = ( (event_type & INSERT) != 0 && (listen_t & INSERT) != 0 ) || ( (event_type & DELETE) != 0 && (listen_t & DELETE) != 0 ) || ( (event_type & UPDATE) != 0 && (listen_t & UPDATE) != 0 ); // If both of the above are true return (ba_match && trig_match); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableName.java000066400000000000000000000100771330501023400244150ustar00rootroot00000000000000/** * com.mckoi.database.TableName 09 Mar 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A name of a table and any associated referencing information. This object * is immutable. * * @author Tobias Downer */ public final class TableName implements Comparable, java.io.Serializable { static final long serialVersionUID = 6527135256976754916L; /** * The constant 'schema_name' that defines a schema that is unknown. */ private static final String UNKNOWN_SCHEMA_NAME = "##UNKNOWN_SCHEMA##"; /** * The name of the schema of the table. This value can be 'null' which * means the schema is currently unknown. */ private final String schema_name; /** * The name of the table. */ private final String table_name; /** * Constructs the name. */ public TableName(String schema_name, String table_name) { if (table_name == null) { throw new NullPointerException("'name' can not be null."); } if (schema_name == null) { schema_name = UNKNOWN_SCHEMA_NAME; } this.schema_name = schema_name; this.table_name = table_name; } public TableName(String table_name) { this(UNKNOWN_SCHEMA_NAME, table_name); } /** * Returns the schema name or null if the schema name is unknown. */ public String getSchema() { if (schema_name.equals(UNKNOWN_SCHEMA_NAME)) { return null; } else { return schema_name; } } /** * Returns the table name. */ public String getName() { return table_name; } /** * Resolves a schema reference in a table name. If the schema in this * table is 'null' (which means the schema is unknown) then it is set to the * given schema argument. */ public TableName resolveSchema(String scheman) { if (schema_name.equals(UNKNOWN_SCHEMA_NAME)) { return new TableName(scheman, getName()); } return this; } /** * Resolves a [schema name].[table name] type syntax to a TableName * object. Uses 'schemav' only if there is no schema name explicitely * specified. */ public static TableName resolve(String schemav, String namev) { int i = namev.indexOf('.'); if (i == -1) { return new TableName(schemav, namev); } else { return new TableName(namev.substring(0, i), namev.substring(i + 1)); } } /** * Resolves a [schema name].[table name] type syntax to a TableName * object. */ public static TableName resolve(String namev) { return resolve(UNKNOWN_SCHEMA_NAME, namev); } // ---- /** * To string. */ public String toString() { if (getSchema() != null) { return getSchema() + "." + getName(); } return getName(); } /** * Equality. */ public boolean equals(Object ob) { TableName tn = (TableName) ob; return tn.schema_name.equals(schema_name) && tn.table_name.equals(table_name); } /** * Equality but ignore the case. */ public boolean equalsIgnoreCase(TableName tn) { return tn.schema_name.equalsIgnoreCase(schema_name) && tn.table_name.equalsIgnoreCase(table_name); } /** * Comparable. */ public int compareTo(Object ob) { TableName tn = (TableName) ob; int v = schema_name.compareTo(tn.schema_name); if (v == 0) { return table_name.compareTo(tn.table_name); } return v; } /** * Hash code. */ public int hashCode() { return schema_name.hashCode() + table_name.hashCode(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TableQueryDef.java000066400000000000000000000036021330501023400252550ustar00rootroot00000000000000/** * com.mckoi.database.TableQueryDef 23 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An interface to an object that describes characteristics of a table based * object in the database. This can represent anything that evaluates to a * Table when the query plan is evaluated. It is used to represent data tables * and views. *

* This object is used by the planner to see ahead of time what sort of table * we are dealing with. For example, a view is stored with a DataTableDef * describing the resultant columns, and the QueryPlanNode to produce the * view result. The query planner requires the information in DataTableDef * to resolve references in the query, and the QueryPlanNode to add into the * resultant plan tree. * * @author Tobias Downer */ public interface TableQueryDef { /** * Returns an immutable DataTableDef object that describes the columns in this * table source, and the name of the table. */ DataTableDef getDataTableDef(); /** * Returns a QueryPlanNode that can be put into a plan tree and can be * evaluated to find the result of the table. This method should always * return a new object representing the query plan. */ QueryPlanNode getQueryPlanNode(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TemporaryTable.java000066400000000000000000000234741330501023400255240ustar00rootroot00000000000000/** * com.mckoi.database.TemporaryTable 11 Apr 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import com.mckoi.util.IntegerVector; import com.mckoi.debug.*; import com.mckoi.database.global.TypeUtil; /** * This class represents a temporary table that is built from data that is * not related to any underlying DataTable object from the database. *

* For example, an aggregate function generates data would be put into a * TemporaryTable. * * @author Tobias Downer */ public final class TemporaryTable extends DefaultDataTable { /** * The DataTableDef object that describes the columns in this table. */ private DataTableDef table_def; /** * A Vector that represents the storage of TObject[] arrays for each row * of the table. */ private ArrayList table_storage; /** * The Constructor. */ public TemporaryTable(Database database, String name, DataTableColumnDef[] fields) { super(database); table_storage = new ArrayList(); table_def = new DataTableDef(); table_def.setTableName(new TableName(null, name)); for (int i = 0; i < fields.length; ++i) { table_def.addVirtualColumn(new DataTableColumnDef(fields[i])); } table_def.setImmutable(); } /** * Constructs this TemporaryTable based on the fields from the given * Table object. */ public TemporaryTable(String name, Table based_on) { super(based_on.getDatabase()); table_def = new DataTableDef(based_on.getDataTableDef()); table_def.setTableName(new TableName(null, name)); table_def.setImmutable(); } /** * Constructs this TemporaryTable based on the given Table object. */ public TemporaryTable(DefaultDataTable based_on) { super(based_on.getDatabase()); table_def = new DataTableDef(based_on.getDataTableDef()); table_def.setImmutable(); } /* ====== Methods that are only for TemporaryTable interface ====== */ /** * Resolves the given column name (eg 'id' or 'Customer.id' or * 'APP.Customer.id') to a column in this table. */ private Variable resolveToVariable(String col_name) { Variable partial = Variable.resolve(col_name); return partial; // return partial.resolveTableName(TableName.resolve(getName())); } /** * Creates a new row where cells can be inserted into. */ public void newRow() { table_storage.add(new TObject[getColumnCount()]); ++row_count; } /** * Sets the cell in the given column / row to the given value. */ public void setRowCell(TObject cell, int column, int row) { TObject[] cells = (TObject[]) table_storage.get(row); cells[column] = cell; } /** * Sets the cell in the column of the last row of this table to the given * TObject. */ public void setRowCell(TObject cell, String col_name) { Variable v = resolveToVariable(col_name); setRowCell(cell, findFieldName(v), row_count - 1); } /** * Sets the cell in the column of the last row of this table to the given * TObject. */ public void setRowObject(TObject ob, int col_index, int row) { setRowCell(ob, col_index, row); } /** * Sets the cell in the column of the last row of this table to the given * TObject. */ public void setRowObject(TObject ob, String col_name) { Variable v = resolveToVariable(col_name); setRowObject(ob, findFieldName(v)); } /** * Sets the cell in the column of the last row of this table to the given * TObject. */ public void setRowObject(TObject ob, int col_index) { setRowObject(ob, col_index, row_count - 1); } /** * Copies the cell from the given table (src_col, src_row) to the last row * of the column specified of this table. */ public void setCellFrom(Table table, int src_col, int src_row, String to_col) { Variable v = resolveToVariable(to_col); TObject cell = table.getCellContents(src_col, src_row); setRowCell(cell, findFieldName(v), row_count - 1); } /** * Copies the contents of the row of the given Table onto the end of this * table. Only copies columns that exist in both tables. */ public void copyFrom(Table table, int row) { newRow(); Variable[] vars = new Variable[table.getColumnCount()]; for (int i = 0; i < vars.length; ++i) { vars[i] = table.getResolvedVariable(i); } for (int i = 0; i < getColumnCount(); ++i) { Variable v = getResolvedVariable(i); String col_name = v.getName(); try { int tcol_index = -1; for (int n = 0; n < vars.length || tcol_index == -1; ++n) { if (vars[n].getName().equals(col_name)) { tcol_index = n; } } setRowCell(table.getCellContents(tcol_index, row), i, row_count - 1); } catch (Exception e) { Debug().writeException(e); throw new Error(e.getMessage()); } } } /** * This should be called if you want to perform table operations on this * TemporaryTable. It should be called *after* all the rows have been set. * It generates SelectableScheme object which sorts the columns of the table * and lets us execute Table operations on this table. * NOTE: After this method is called, the table must not change in any way. */ public void setupAllSelectableSchemes() { blankSelectableSchemes(1); // <- blind search for (int row_number = 0; row_number < row_count; ++row_number) { addRowToColumnSchemes(row_number); } } /* ====== Methods that are implemented for Table interface ====== */ public DataTableDef getDataTableDef() { return table_def; } /** * Returns an object that represents the information in the given cell * in the table. This can be used to obtain information about the given * table cells. */ public TObject getCellContents(int column, int row) { TObject[] cells = (TObject[]) table_storage.get(row); TObject cell = cells[column]; if (cell == null) { throw new Error("NULL cell! (" + column + ", " + row + ")"); } return cell; } /** * Returns an Enumeration of the rows in this table. * Each call to 'nextRowIndex' returns the next valid row index in the table. */ public RowEnumeration rowEnumeration() { return new SimpleRowEnumeration(row_count); } /** * Adds a DataTableListener to the DataTable objects at the root of this * table tree hierarchy. If this table represents the join of a number of * tables then the DataTableListener is added to all the DataTable objects * at the root. *

* A DataTableListener is notified of all modifications to the raw entries * of the table. This listener can be used for detecting changes in VIEWs, * for triggers or for caching of common queries. */ void addDataTableListener(DataTableListener listener) { // Nothing to be notified on with a Temporary table... } /** * Removes a DataTableListener from the DataTable objects at the root of * this table tree hierarchy. If this table represents the join of a * number of tables, then the DataTableListener is removed from all the * DataTable objects at the root. */ void removeDataTableListener(DataTableListener listener) { // No listeners can be in a TemporaryTable. } /** * Locks the root table(s) of this table so that it is impossible to * overwrite the underlying rows that may appear in this table. * This is used when cells in the table need to be accessed 'outside' the * lock. So we may have late access to cells in the table. * 'lock_key' is a given key that will also unlock the root table(s). * NOTE: This is nothing to do with the 'LockingMechanism' object. */ public void lockRoot(int lock_key) { // We don't need to do anything for temporary tables, because they have // no root to lock. } /** * Unlocks the root tables so that the underlying rows may * once again be used if they are not locked and have been removed. This * should be called some time after the rows have been locked. */ public void unlockRoot(int lock_key) { // We don't need to do anything for temporary tables, because they have // no root to unlock. } /** * Returns true if the table has its row roots locked (via the lockRoot(int) * method. */ public boolean hasRootsLocked() { // A temporary table _always_ has its roots locked. return true; } // ---------- Static convenience methods ---------- /** * Creates a table with a single column with the given name and type. */ static final TemporaryTable singleColumnTable(Database database, String col_name, Class c) { TType ttype = TType.fromClass(c); DataTableColumnDef col_def = new DataTableColumnDef(); col_def.setName(col_name); col_def.setFromTType(ttype); TemporaryTable table = new TemporaryTable(database, "single", new DataTableColumnDef[] { col_def }); // int type = TypeUtil.toDBType(c); // TableField[] fields = // { new TableField(col_name, type, Integer.MAX_VALUE, false) }; // TemporaryTable table = new TemporaryTable(database, "single", fields); return table; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Transaction.java000066400000000000000000002356131330501023400250570ustar00rootroot00000000000000/** * com.mckoi.database.Transaction 18 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; import com.mckoi.util.IntegerVector; import com.mckoi.util.BigNumber; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.ObjectTranslator; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; /** * An open transaction that manages all data access to the * TableDataConglomerate. A transaction sees a view of the data as it was when * the transaction was created. It also sees any modifications that were made * within the context of this transaction. It does not see modifications made * by other open transactions. *

* A transaction ends when it is committed or rollbacked. All operations * on this transaction object only occur within the context of this transaction * and are not permanent changes to the database structure. Only when the * transaction is committed are changes reflected in the master data. * * @author Tobias Downer */ public class Transaction extends SimpleTransaction { // ---------- Constraint statics ---------- // These statics are for managing constraints. /** * The type of deferrance. */ public static final short INITIALLY_DEFERRED = java.sql.DatabaseMetaData.importedKeyInitiallyDeferred; public static final short INITIALLY_IMMEDIATE = java.sql.DatabaseMetaData.importedKeyInitiallyImmediate; public static final short NOT_DEFERRABLE = java.sql.DatabaseMetaData.importedKeyNotDeferrable; /** * Foreign key referential trigger actions. */ public static final String NO_ACTION = "NO ACTION"; public static final String CASCADE = "CASCADE"; public static final String SET_NULL = "SET NULL"; public static final String SET_DEFAULT = "SET DEFAULT"; // ---------- Member variables ---------- /** * The TableDataConglomerate that this transaction is within the context of. */ private TableDataConglomerate conglomerate; /** * The commit_id that represents the id of the last commit that occurred * when this transaction was created. */ private long commit_id; /** * All tables touched by this transaction. (MutableTableDataSource) */ private ArrayList touched_tables; /** * All tables selected from in this transaction. (MasterTableDataSource) */ private ArrayList selected_from_tables; /** * The name of all database objects that were created in this transaction. * This is used for a namespace collision test during commit. */ private ArrayList created_database_objects; /** * The name of all database objects that were dropped in this transaction. * This is used for a namespace collision test during commit. */ private ArrayList dropped_database_objects; /** * The journal for this transaction. This journal describes all changes * made to the database by this transaction. */ private TransactionJournal journal; /** * The list of InternalTableInfo objects that are containers for generating * internal tables (GTDataSource). */ private InternalTableInfo[] internal_tables; /** * A pointer in the internal_tables list. */ private int internal_tables_i; /** * True if an error should be generated on a dirty select. */ private boolean transaction_error_on_dirty_select; /** * True if this transaction is closed. */ private boolean closed; /** * Constructs the transaction. */ Transaction(TableDataConglomerate conglomerate, long commit_id, ArrayList visible_tables, ArrayList table_indices) { super(conglomerate.getSystem(), conglomerate.getSequenceManager()); this.conglomerate = conglomerate; this.commit_id = commit_id; this.closed = false; this.created_database_objects = new ArrayList(); this.dropped_database_objects = new ArrayList(); this.touched_tables = new ArrayList(); this.selected_from_tables = new ArrayList(); journal = new TransactionJournal(); // Set up all the visible tables int sz = visible_tables.size(); for (int i = 0; i < sz; ++i) { addVisibleTable((MasterTableDataSource) visible_tables.get(i), (IndexSet) table_indices.get(i)); } // NOTE: We currently only support 8 - internal tables to the transaction // layer, and internal tables to the database connection layer. internal_tables = new InternalTableInfo[8]; internal_tables_i = 0; addInternalTableInfo(new TransactionInternalTables()); getSystem().stats().increment("Transaction.count"); // Defaults to true (should be changed by called 'setErrorOnDirtySelect' // method. transaction_error_on_dirty_select = true; } /** * Returns the TableDataConglomerate of this transaction. */ final TableDataConglomerate getConglomerate() { return conglomerate; } /** * Adds an internal table container (InternalTableInfo) used to * resolve internal tables. This is intended as a way for the * DatabaseConnection layer to plug in 'virtual' tables, such as those * showing connection statistics, etc. It also allows modelling database * objects as tables, such as sequences, triggers, procedures, etc. */ void addInternalTableInfo(InternalTableInfo info) { if (internal_tables_i >= internal_tables.length) { throw new RuntimeException("Internal table list bounds reached."); } internal_tables[internal_tables_i] = info; ++internal_tables_i; } /** * Returns the 'commit_id' which is the last commit that occured before * this transaction was created. *

* NOTE: Don't make this synchronized over anything. This is accessed * by OpenTransactionList. */ long getCommitID() { // REINFORCED NOTE: This absolutely must never be synchronized because // it is accessed by OpenTransactionList synchronized. return commit_id; } // ----- Operations within the context of this transaction ----- /** * Overwritten from SimpleTransaction. * Returns a new MutableTableDataSource for the view of the * MasterTableDataSource at the start of this transaction. Note that this is * only ever called once per table accessed in this transaction. */ public MutableTableDataSource createMutableTableDataSourceAtCommit(MasterTableDataSource master) { // Create the table for this transaction. MutableTableDataSource table = master.createTableDataSourceAtCommit(this); // Log in the journal that this table was touched by the transaction. journal.entryAddTouchedTable(master.getTableID()); touched_tables.add(table); return table; } /** * Called by the query evaluation layer when information is selected * from this table as part of this transaction. When there is a select * query on a table, when the transaction is committed we should look for * any concurrently committed changes to the table. If there are any, then * any selects on the table should be considered incorrect and cause a * commit failure. */ public void addSelectedFromTable(TableName table_name) { // Special handling of internal tables, if (isDynamicTable(table_name)) { return; } MasterTableDataSource master = findVisibleTable(table_name, false); if (master == null) { throw new StatementException( "Table with name not available: " + table_name); } // System.out.println("Selected from table: " + table_name); synchronized (selected_from_tables) { if (!selected_from_tables.contains(master)) { selected_from_tables.add(master); } } } /** * Copies all the tables within this transaction view to the destination * conglomerate object. Some care should be taken with security when using * this method. This is useful for generating a backup of the current * view of the database that can work without interfering with the general * operation of the database. */ void liveCopyAllDataTo(TableDataConglomerate dest_conglomerate) { // Create a new TableDataConglomerate using the same settings from this // TransactionSystem but on the new StoreSystem. int sz = getVisibleTableCount(); // The list to copy (in the order to copy in). ArrayList copy_list = new ArrayList(sz); // The 'SEQUENCE_INFO' table is handled specially, MasterTableDataSource sequence_info_table = null; for (int i = 0; i < sz; ++i) { MasterTableDataSource master_table = getVisibleTable(i); TableName table_name = master_table.getDataTableDef().getTableName(); if (table_name.equals(TableDataConglomerate.SYS_SEQUENCE_INFO)) { sequence_info_table = master_table; } else { copy_list.add(master_table); } } // Add the sequence info to the end of the list, copy_list.add(sequence_info_table); try { // For each master table, for (int i = 0; i < sz; ++i) { MasterTableDataSource master_table = (MasterTableDataSource) copy_list.get(i); TableName table_name = master_table.getDataTableDef().getTableName(); // Create a destination transaction Transaction dest_transaction = dest_conglomerate.createTransaction(); // The view of this table within this transaction. IndexSet index_set = getIndexSetForTable(master_table); // If the table already exists then drop it if (dest_transaction.tableExists(table_name)) { dest_transaction.dropTable(table_name); } // Copy it into the destination conglomerate. dest_transaction.copyTable(master_table, index_set); // Close and commit the transaction in the destination conglomeration. dest_transaction.closeAndCommit(); // Dispose the IndexSet index_set.dispose(); } } catch (TransactionException e) { Debug().writeException(e); throw new RuntimeException("Transaction Error when copying table: " + e.getMessage()); } } // ---------- Dynamically generated tables ---------- /** * Returns true if the given table name represents a dynamically generated * system table. */ protected boolean isDynamicTable(TableName table_name) { for (int i = 0; i < internal_tables.length; ++i) { InternalTableInfo info = internal_tables[i]; if (info != null) { if (info.containsTableName(table_name)) { return true; } } } return false; } /** * Returns a list of all dynamic table names. This method returns a * reference to a static, make sure you don't change the contents of the * array! */ protected TableName[] getDynamicTableList() { int sz = 0; for (int i = 0; i < internal_tables.length; ++i) { InternalTableInfo info = internal_tables[i]; if (info != null) { sz += info.getTableCount(); } } TableName[] list = new TableName[sz]; int index = 0; for (int i = 0; i < internal_tables.length; ++i) { InternalTableInfo info = internal_tables[i]; if (info != null) { sz = info.getTableCount(); for (int n = 0; n < sz; ++n) { list[index] = info.getTableName(n); ++index; } } } return list; } /** * Returns the DataTableDef for the given internal table. */ protected DataTableDef getDynamicDataTableDef(TableName table_name) { for (int i = 0; i < internal_tables.length; ++i) { InternalTableInfo info = internal_tables[i]; if (info != null) { int index = info.findTableName(table_name); if (index != -1) { return info.getDataTableDef(index); } } } throw new RuntimeException("Not an internal table: " + table_name); } /** * Returns an instance of MutableDataTableSource that represents the * contents of the internal table with the given name. */ protected MutableTableDataSource getDynamicTable(TableName table_name) { for (int i = 0; i < internal_tables.length; ++i) { InternalTableInfo info = internal_tables[i]; if (info != null) { int index = info.findTableName(table_name); if (index != -1) { return info.createInternalTable(index); } } } throw new RuntimeException("Not an internal table: " + table_name); } /** * Returns a string type describing the type of the dynamic table. */ public String getDynamicTableType(TableName table_name) { // Otherwise we need to look up the table in the internal table list, for (int i = 0; i < internal_tables.length; ++i) { InternalTableInfo info = internal_tables[i]; if (info != null) { int index = info.findTableName(table_name); if (index != -1) { return info.getTableType(index); } } } // No internal table found, so report the error. throw new RuntimeException("No table '" + table_name + "' to report type for."); } // ---------- Transaction manipulation ---------- /** * Creates a new table within this transaction with the given sector size. * If the table already exists then an exception is thrown. *

* This should only be called under an exclusive lock on the connection. */ public void createTable(DataTableDef table_def, int data_sector_size, int index_sector_size) { TableName table_name = table_def.getTableName(); MasterTableDataSource master = findVisibleTable(table_name, false); if (master != null) { throw new StatementException( "Table '" + table_name + "' already exists."); } table_def.setImmutable(); if (data_sector_size < 27) { data_sector_size = 27; } else if (data_sector_size > 4096) { data_sector_size = 4096; } // Create the new master table and add to list of visible tables. master = conglomerate.createMasterTable(table_def, data_sector_size, index_sector_size); // Add this table (and an index set) for this table. addVisibleTable(master, master.createIndexSet()); // Log in the journal that this transaction touched the table_id. int table_id = master.getTableID(); journal.entryAddTouchedTable(table_id); // Log in the journal that we created this table. journal.entryTableCreate(table_id); // Add entry to the Sequences table for the native generator for this // table. SequenceManager.addNativeTableGenerator(this, table_name); // Notify that this database object has been successfully created. databaseObjectCreated(table_name); } /** * Creates a new table within this transaction. If the table already * exists then an exception is thrown. *

* This should only be called under an exclusive lock on the connection. */ public void createTable(DataTableDef table_def) { // data sector size defaults to 251 // index sector size defaults to 1024 createTable(table_def, 251, 1024); } /** * Given a DataTableDef, if the table exists then it is updated otherwise * if it doesn't exist then it is created. *

* This should only be used as very fine grain optimization for creating/ * altering tables. If in the future the underlying table model is changed * so that the given 'sector_size' value is unapplicable, then the value * will be ignored. */ public void alterCreateTable(DataTableDef table_def, int data_sector_size, int index_sector_size) { if (!tableExists(table_def.getTableName())) { createTable(table_def, data_sector_size, index_sector_size); } else { alterTable(table_def.getTableName(), table_def, data_sector_size, index_sector_size); } } /** * Drops a table within this transaction. If the table does not exist then * an exception is thrown. *

* This should only be called under an exclusive lock on the connection. */ public void dropTable(TableName table_name) { // System.out.println(this + " DROP: " + table_name); MasterTableDataSource master = findVisibleTable(table_name, false); if (master == null) { throw new StatementException( "Table '" + table_name + "' doesn't exist."); } // Removes this table from the visible table list of this transaction removeVisibleTable(master); // Log in the journal that this transaction touched the table_id. int table_id = master.getTableID(); journal.entryAddTouchedTable(table_id); // Log in the journal that we dropped this table. journal.entryTableDrop(table_id); // Remove the native sequence generator (in this transaction) for this // table. SequenceManager.removeNativeTableGenerator(this, table_name); // Notify that this database object has been dropped databaseObjectDropped(table_name); } /** * Generates an exact copy of the table within this transaction. It is * recommended that the table is dropped before the copy is made. The * purpose of this method is to generate a temporary table that can be * modified without fear of another transaction changing the contents in * another transaction. This also provides a convenient way to compact * a table because any spare space is removed when the table is copied. It * also allows us to make a copy of MasterTableDataSource into a foreign * conglomerate which allows us to implement a backup procedure. *

* This method does NOT assume the given MasterTableDataSource is contained, * or has once been contained within this conglomerate. */ public void copyTable( MasterTableDataSource src_master_table, IndexSet index_set) { DataTableDef table_def = src_master_table.getDataTableDef(); TableName table_name = table_def.getTableName(); MasterTableDataSource master = findVisibleTable(table_name, false); if (master != null) { throw new StatementException( "Unable to copy. Table '" + table_name + "' already exists."); } // Copy the master table and add to the list of visible tables. master = conglomerate.copyMasterTable(src_master_table, index_set); // Add this visible table addVisibleTable(master, master.createIndexSet()); // Log in the journal that this transaction touched the table_id. int table_id = master.getTableID(); journal.entryAddTouchedTable(table_id); // Log in the journal that we created this table. journal.entryTableCreate(table_id); // Add entry to the Sequences table for the native generator for this // table. SequenceManager.addNativeTableGenerator(this, table_name); // Notify that this database object has been successfully created. databaseObjectCreated(table_name); } /** * Alter the table with the given name to the new definition and give the * copied table a new data sector size. If the table does not exist then * an exception is thrown. *

* This copies all columns that were in the original table to the new * altered table if the name is the same. Any names that don't exist are * set to the default value. *

* This should only be called under an exclusive lock on the connection. */ public void alterTable(TableName table_name, DataTableDef table_def, int data_sector_size, int index_sector_size) { table_def.setImmutable(); // The current schema context is the schema of the table name String current_schema = table_name.getSchema(); SystemQueryContext context = new SystemQueryContext(this, current_schema); // Get the next unique id of the unaltered table. long next_id = nextUniqueID(table_name); // Drop the current table MutableTableDataSource c_table = getTable(table_name); dropTable(table_name); // And create the table table createTable(table_def); MutableTableDataSource altered_table = getTable(table_name); // Get the new MasterTableDataSource object MasterTableDataSource new_master_table = findVisibleTable(table_name, false); // Set the sequence id of the table new_master_table.setUniqueID(next_id); // Work out which columns we have to copy to where int[] col_map = new int[table_def.columnCount()]; DataTableDef orig_td = c_table.getDataTableDef(); for (int i = 0; i < col_map.length; ++i) { String col_name = table_def.columnAt(i).getName(); col_map[i] = orig_td.findColumnName(col_name); } try { // First move all the rows from the old table to the new table, // This does NOT update the indexes. try { RowEnumeration e = c_table.rowEnumeration(); while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); RowData row_data = new RowData(altered_table); for (int i = 0; i < col_map.length; ++i) { int col = col_map[i]; if (col != -1) { row_data.setColumnData(i, c_table.getCellContents(col, row_index)); } } row_data.setDefaultForRest(context); // Note we use a low level 'addRow' method on the master table // here. This does not touch the table indexes. The indexes are // built later. int new_row_number = new_master_table.addRow(row_data); // Set the record as committed added new_master_table.writeRecordType(new_row_number, 0x010); } } catch (DatabaseException e) { Debug().writeException(e); throw new RuntimeException(e.getMessage()); } // PENDING: We need to copy any existing index definitions that might // have been set on the table being altered. // Rebuild the indexes in the new master table, new_master_table.buildIndexes(); // Get the snapshot index set on the new table and set it here setIndexSetForTable(new_master_table, new_master_table.createIndexSet()); // Flush this out of the table cache flushTableCache(table_name); // Ensure the native sequence generator exists... SequenceManager.removeNativeTableGenerator(this, table_name); SequenceManager.addNativeTableGenerator(this, table_name); // Notify that this database object has been successfully dropped and // created. databaseObjectDropped(table_name); databaseObjectCreated(table_name); } catch (IOException e) { Debug().writeException(e); throw new RuntimeException(e.getMessage()); } } /** * Alters the table with the given name within this transaction to the * specified table definition. If the table does not exist then an exception * is thrown. *

* This should only be called under an exclusive lock on the connection. */ public void alterTable(TableName table_name, DataTableDef table_def) { // Make sure we remember the current sector size of the altered table so // we can create the new table with the original size. try { int current_data_sector_size; MasterTableDataSource master = findVisibleTable(table_name, false); if (master instanceof V1MasterTableDataSource) { current_data_sector_size = ((V1MasterTableDataSource) master).rawDataSectorSize(); } else { current_data_sector_size = -1; } // HACK: We use index sector size of 2043 for all altered tables alterTable(table_name, table_def, current_data_sector_size, 2043); } catch (IOException e) { throw new RuntimeException("IO Error: " + e.getMessage()); } } /** * Checks all the rows in the table for immediate constraint violations * and when the transaction is next committed check for all deferred * constraint violations. This method is used when the constraints on a * table changes and we need to determine if any constraint violations * occurred. To the constraint checking system, this is like adding all * the rows to the given table. */ public void checkAllConstraints(TableName table_name) { // Get the table TableDataSource table = getTable(table_name); // Get all the rows in the table int[] rows = new int[table.getRowCount()]; RowEnumeration row_enum = table.rowEnumeration(); int i = 0; while (row_enum.hasMoreRows()) { rows[i] = row_enum.nextRowIndex(); ++i; } // Check the constraints of all the rows in the table. TableDataConglomerate.checkAddConstraintViolations( this, table, rows, INITIALLY_IMMEDIATE); // Add that we altered this table in the journal MasterTableDataSource master = findVisibleTable(table_name, false); if (master == null) { throw new StatementException( "Table '" + table_name + "' doesn't exist."); } // Log in the journal that this transaction touched the table_id. int table_id = master.getTableID(); journal.entryAddTouchedTable(table_id); // Log in the journal that we dropped this table. journal.entryTableConstraintAlter(table_id); } /** * Compacts the table with the given name within this transaction. If the * table doesn't exist then an exception is thrown. */ public void compactTable(TableName table_name) { // Find the master table. MasterTableDataSource current_table = findVisibleTable(table_name, false); if (current_table == null) { throw new StatementException( "Table '" + table_name + "' doesn't exist."); } // If the table is worth compacting, or the table is a // V1MasterTableDataSource if (current_table.isWorthCompacting()) { // The view of this table within this transaction. IndexSet index_set = getIndexSetForTable(current_table); // Drop the current table dropTable(table_name); // And copy to the new table copyTable(current_table, index_set); } } /** * Returns true if the conglomerate commit procedure should check for * dirty selects and produce a transaction error. A dirty select is when * a query reads information from a table that is effected by another table * during a transaction. This in itself will not cause data * consistancy problems but for strict conformance to SERIALIZABLE * isolation level this should return true. *

* NOTE; We MUST NOT make this method serialized because it is back called * from within a commit lock in TableDataConglomerate. */ boolean transactionErrorOnDirtySelect() { return transaction_error_on_dirty_select; } /** * Sets the transaction error on dirty select for this transaction. */ void setErrorOnDirtySelect(boolean status) { transaction_error_on_dirty_select = status; } // ----- Setting/Querying constraint information ----- // PENDING: Is it worth implementing a pluggable constraint architecture // as described in the idea below. With the current implementation we // have tied a DataTableConglomerate to a specific constraint // architecture. // // IDEA: These methods delegate to the parent conglomerate which has a // pluggable architecture for setting/querying constraints. Some uses of // a conglomerate may not need integrity constraints or may implement the // mechanism for storing/querying in a different way. This provides a // useful abstraction of being enable to implement constraint behaviour // by only providing a way to set/query the constraint information in // different conglomerate uses. /** * Convenience, given a SimpleTableQuery object this will return a list of * column names in sequence that represent the columns in a group constraint. *

* 'cols' is the unsorted list of indexes in the table that represent the * group. *

* Assumes column 2 of dt is the sequence number and column 1 is the name * of the column. */ private static String[] toColumns(SimpleTableQuery dt, IntegerVector cols) { int size = cols.size(); String[] list = new String[size]; // for each n of the output list for (int n = 0; n < size; ++n) { // for each i of the input list for (int i = 0; i < size; ++i) { int row_index = cols.intAt(i); int seq_no = ((BigNumber) dt.get(2, row_index).getObject()).intValue(); if (seq_no == n) { list[n] = dt.get(1, row_index).getObject().toString(); break; } } } return list; } /** * Convenience, generates a unique constraint name. If the given constraint * name is 'null' then a new one is created, otherwise the given default * one is returned. */ private static String makeUniqueConstraintName(String name, BigNumber unique_id) { if (name == null) { name = "_ANONYMOUS_CONSTRAINT_" + unique_id.toString(); } return name; } /** * Notifies this transaction that a database object with the given name has * successfully been created. */ void databaseObjectCreated(TableName table_name) { // If this table name was dropped, then remove from the drop list boolean dropped = dropped_database_objects.remove(table_name); // If the above operation didn't remove a table name then add to the // created database objects list. if (!dropped) { created_database_objects.add(table_name); } } /** * Notifies this transaction that a database object with the given name has * successfully been dropped. */ void databaseObjectDropped(TableName table_name) { // If this table name was created, then remove from the create list boolean created = created_database_objects.remove(table_name); // If the above operation didn't remove a table name then add to the // dropped database objects list. if (!created) { dropped_database_objects.add(table_name); } } /** * Returns the normalized list of database object names created in this * transaction. */ ArrayList getAllNamesCreated() { return created_database_objects; } /** * Returns the normalized list of database object names dropped in this * transaction. */ ArrayList getAllNamesDropped() { return dropped_database_objects; } /** * Create a new schema in this transaction. When the transaction is * committed the schema will become globally accessable. Note that any * security checks must be performed before this method is called. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void createSchema(String name, String type) { TableName table_name = TableDataConglomerate.SCHEMA_INFO_TABLE; MutableTableDataSource t = getTable(table_name); SimpleTableQuery dt = new SimpleTableQuery(t); try { // Select entries where; // sUSRSchemaInfo.name = name if (!dt.existsSingle(1, name)) { // Add the entry to the schema info table. RowData rd = new RowData(t); BigNumber unique_id = BigNumber.fromLong(nextUniqueID(table_name)); rd.setColumnDataFromObject(0, unique_id); rd.setColumnDataFromObject(1, name); rd.setColumnDataFromObject(2, type); // Third (other) column is left as null t.addRow(rd); } else { throw new StatementException("Schema already exists: " + name); } } finally { dt.dispose(); } } /** * Drops a schema from this transaction. When the transaction is committed * the schema will be dropped perminently. Note that any security checks * must be performed before this method is called. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void dropSchema(String name) { TableName table_name = TableDataConglomerate.SCHEMA_INFO_TABLE; MutableTableDataSource t = getTable(table_name); SimpleTableQuery dt = new SimpleTableQuery(t); // Drop a single entry from dt where column 1 = name boolean b = dt.deleteSingle(1, name); dt.dispose(); if (!b) { throw new StatementException("Schema doesn't exists: " + name); } } /** * Returns true if the schema exists within this transaction. */ public boolean schemaExists(String name) { TableName table_name = TableDataConglomerate.SCHEMA_INFO_TABLE; MutableTableDataSource t = getTable(table_name); SimpleTableQuery dt = new SimpleTableQuery(t); // Returns true if there's a single entry in dt where column 1 = name boolean b = dt.existsSingle(1, name); dt.dispose(); return b; } /** * Resolves the case of the given schema name if the database is performing * case insensitive identifier matching. Returns a SchemaDef object that * identifiers the schema. Returns null if the schema name could not be * resolved. */ public SchemaDef resolveSchemaCase(String name, boolean ignore_case) { // The list of schema SimpleTableQuery dt = new SimpleTableQuery( getTable(TableDataConglomerate.SCHEMA_INFO_TABLE)); try { RowEnumeration e = dt.rowEnumeration(); if (ignore_case) { SchemaDef result = null; while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); String cur_name = dt.get(1, row_index).getObject().toString(); if (name.equalsIgnoreCase(cur_name)) { if (result != null) { throw new StatementException( "Ambiguous schema name: '" + name + "'"); } String type = dt.get(2, row_index).getObject().toString(); result = new SchemaDef(cur_name, type); } } return result; } else { // if (!ignore_case) while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); String cur_name = dt.get(1, row_index).getObject().toString(); if (name.equals(cur_name)) { String type = dt.get(2, row_index).getObject().toString(); return new SchemaDef(cur_name, type); } } // Not found return null; } } finally { dt.dispose(); } } /** * Returns an array of SchemaDef objects for each schema currently setup in * the database. */ public SchemaDef[] getSchemaList() { // The list of schema SimpleTableQuery dt = new SimpleTableQuery( getTable(TableDataConglomerate.SCHEMA_INFO_TABLE)); RowEnumeration e = dt.rowEnumeration(); SchemaDef[] arr = new SchemaDef[dt.getRowCount()]; int i = 0; while (e.hasMoreRows()) { int row_index = e.nextRowIndex(); String cur_name = dt.get(1, row_index).getObject().toString(); String cur_type = dt.get(2, row_index).getObject().toString(); arr[i] = new SchemaDef(cur_name, cur_type); ++i; } dt.dispose(); return arr; } /** * Sets a persistent variable of the database that becomes a committed * change once this transaction is committed. The variable can later be * retrieved with a call to the 'getPersistantVar' method. A persistant * var is created if it doesn't exist in the DatabaseVars table otherwise * it is overwritten. */ public void setPersistentVar(String variable, String value) { TableName table_name = TableDataConglomerate.PERSISTENT_VAR_TABLE; MutableTableDataSource t = getTable(table_name); SimpleTableQuery dt = new SimpleTableQuery(t); dt.setVar(0, new Object[] { variable, value }); dt.dispose(); } /** * Returns the value of the persistent variable with the given name or null * if it doesn't exist. */ public String getPersistantVar(String variable) { TableName table_name = TableDataConglomerate.PERSISTENT_VAR_TABLE; MutableTableDataSource t = getTable(table_name); SimpleTableQuery dt = new SimpleTableQuery(t); String val = dt.getVar(1, 0, variable).toString(); dt.dispose(); return val; } /** * Creates a new sequence generator with the given TableName and * initializes it with the given details. This does NOT check if the * given name clashes with an existing database object. */ public void createSequenceGenerator( TableName name, long start_value, long increment_by, long min_value, long max_value, long cache, boolean cycle) { SequenceManager.createSequenceGenerator(this, name, start_value, increment_by, min_value, max_value, cache, cycle); // Notify that this database object has been created databaseObjectCreated(name); } /** * Drops an existing sequence generator with the given name. */ public void dropSequenceGenerator(TableName name) { SequenceManager.dropSequenceGenerator(this, name); // Flush the sequence manager flushSequenceManager(name); // Notify that this database object has been dropped databaseObjectDropped(name); } /** * Adds a unique constraint to the database which becomes perminant when * the transaction is committed. Columns in a table that are defined as * unique are prevented from being duplicated by the engine. *

* NOTE: Security checks for adding constraints must be checked for at a * higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void addUniqueConstraint(TableName table_name, String[] cols, short deferred, String constraint_name) { TableName tn1 = TableDataConglomerate.UNIQUE_INFO_TABLE; TableName tn2 = TableDataConglomerate.UNIQUE_COLS_TABLE; MutableTableDataSource t = getTable(tn1); MutableTableDataSource tcols = getTable(tn2); try { // Insert a value into UNIQUE_INFO_TABLE RowData rd = new RowData(t); BigNumber unique_id = BigNumber.fromLong(nextUniqueID(tn1)); constraint_name = makeUniqueConstraintName(constraint_name, unique_id); rd.setColumnDataFromObject(0, unique_id); rd.setColumnDataFromObject(1, constraint_name); rd.setColumnDataFromObject(2, table_name.getSchema()); rd.setColumnDataFromObject(3, table_name.getName()); rd.setColumnDataFromObject(4, BigNumber.fromInt(deferred)); t.addRow(rd); // Insert the columns for (int i = 0; i < cols.length; ++i) { rd = new RowData(tcols); rd.setColumnDataFromObject(0, unique_id); // unique id rd.setColumnDataFromObject(1, cols[i]); // column name rd.setColumnDataFromObject(2, BigNumber.fromInt(i)); // sequence number tcols.addRow(rd); } } catch (DatabaseConstraintViolationException e) { // Constraint violation when inserting the data. Check the type and // wrap around an appropriate error message. if (e.getErrorCode() == DatabaseConstraintViolationException.UNIQUE_VIOLATION) { // This means we gave a constraint name that's already being used // for a primary key. throw new StatementException( "Unique constraint name '" + constraint_name + "' is already being used."); } throw e; } } /** * Adds a foreign key constraint to the database which becomes perminent * when the transaction is committed. A foreign key represents a referential * link from one table to another (may be the same table). The 'table_name', * 'cols' args represents the object to link from. The 'ref_table', * 'ref_cols' args represents the object to link to. The update rules are * for specifying cascading delete/update rules. The deferred arg is for * IMMEDIATE/DEFERRED checking. *

* NOTE: Security checks for adding constraints must be checked for at a * higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void addForeignKeyConstraint(TableName table, String[] cols, TableName ref_table, String[] ref_cols, String delete_rule, String update_rule, short deferred, String constraint_name) { TableName tn1 = TableDataConglomerate.FOREIGN_INFO_TABLE; TableName tn2 = TableDataConglomerate.FOREIGN_COLS_TABLE; MutableTableDataSource t = getTable(tn1); MutableTableDataSource tcols = getTable(tn2); try { // If 'ref_columns' empty then set to primary key for referenced table, // ISSUE: What if primary key changes after the fact? if (ref_cols.length == 0) { ColumnGroup set = queryTablePrimaryKeyGroup(this, ref_table); if (set == null) { throw new StatementException( "No primary key defined for referenced table '" + ref_table + "'"); } ref_cols = set.columns; } if (cols.length != ref_cols.length) { throw new StatementException("Foreign key reference '" + table + "' -> '" + ref_table + "' does not have an equal number of " + "column terms."); } // If delete or update rule is 'SET NULL' then check the foreign key // columns are not constrained as 'NOT NULL' if (delete_rule.equals("SET NULL") || update_rule.equals("SET NULL")) { DataTableDef table_def = getDataTableDef(table); for (int i = 0; i < cols.length; ++i) { DataTableColumnDef column_def = table_def.columnAt(table_def.findColumnName(cols[i])); if (column_def.isNotNull()) { throw new StatementException("Foreign key reference '" + table + "' -> '" + ref_table + "' update or delete triggered " + "action is SET NULL for columns that are constrained as " + "NOT NULL."); } } } // Insert a value into FOREIGN_INFO_TABLE RowData rd = new RowData(t); BigNumber unique_id = BigNumber.fromLong(nextUniqueID(tn1)); constraint_name = makeUniqueConstraintName(constraint_name, unique_id); rd.setColumnDataFromObject(0, unique_id); rd.setColumnDataFromObject(1, constraint_name); rd.setColumnDataFromObject(2, table.getSchema()); rd.setColumnDataFromObject(3, table.getName()); rd.setColumnDataFromObject(4, ref_table.getSchema()); rd.setColumnDataFromObject(5, ref_table.getName()); rd.setColumnDataFromObject(6, update_rule); rd.setColumnDataFromObject(7, delete_rule); rd.setColumnDataFromObject(8, BigNumber.fromInt(deferred)); t.addRow(rd); // Insert the columns for (int i = 0; i < cols.length; ++i) { rd = new RowData(tcols); rd.setColumnDataFromObject(0, unique_id); // unique id rd.setColumnDataFromObject(1, cols[i]); // column name rd.setColumnDataFromObject(2, ref_cols[i]); // ref column name rd.setColumnDataFromObject(3, BigNumber.fromInt(i)); // sequence number tcols.addRow(rd); } } catch (DatabaseConstraintViolationException e) { // Constraint violation when inserting the data. Check the type and // wrap around an appropriate error message. if (e.getErrorCode() == DatabaseConstraintViolationException.UNIQUE_VIOLATION) { // This means we gave a constraint name that's already being used // for a primary key. throw new StatementException("Foreign key constraint name '" + constraint_name + "' is already being used."); } throw e; } } /** * Adds a primary key constraint that becomes perminent when the transaction * is committed. A primary key represents a set of columns in a table * that are constrained to be unique and can not be null. If the * constraint name parameter is 'null' a primary key constraint is created * with a unique constraint name. *

* NOTE: Security checks for adding constraints must be checked for at a * higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void addPrimaryKeyConstraint(TableName table_name, String[] cols, short deferred, String constraint_name) { TableName tn1 = TableDataConglomerate.PRIMARY_INFO_TABLE; TableName tn2 = TableDataConglomerate.PRIMARY_COLS_TABLE; MutableTableDataSource t = getTable(tn1); MutableTableDataSource tcols = getTable(tn2); try { // Insert a value into PRIMARY_INFO_TABLE RowData rd = new RowData(t); BigNumber unique_id = BigNumber.fromLong(nextUniqueID(tn1)); constraint_name = makeUniqueConstraintName(constraint_name, unique_id); rd.setColumnDataFromObject(0, unique_id); rd.setColumnDataFromObject(1, constraint_name); rd.setColumnDataFromObject(2, table_name.getSchema()); rd.setColumnDataFromObject(3, table_name.getName()); rd.setColumnDataFromObject(4, BigNumber.fromInt(deferred)); t.addRow(rd); // Insert the columns for (int i = 0; i < cols.length; ++i) { rd = new RowData(tcols); rd.setColumnDataFromObject(0, unique_id); // unique id rd.setColumnDataFromObject(1, cols[i]); // column name rd.setColumnDataFromObject(2, BigNumber.fromInt(i)); // Sequence number tcols.addRow(rd); } } catch (DatabaseConstraintViolationException e) { // Constraint violation when inserting the data. Check the type and // wrap around an appropriate error message. if (e.getErrorCode() == DatabaseConstraintViolationException.UNIQUE_VIOLATION) { // This means we gave a constraint name that's already being used // for a primary key. throw new StatementException("Primary key constraint name '" + constraint_name + "' is already being used."); } throw e; } } /** * Adds a check expression that becomes perminent when the transaction * is committed. A check expression is an expression that must evaluate * to true for all records added/updated in the database. *

* NOTE: Security checks for adding constraints must be checked for at a * higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void addCheckConstraint(TableName table_name, Expression expression, short deferred, String constraint_name) { TableName tn = TableDataConglomerate.CHECK_INFO_TABLE; MutableTableDataSource t = getTable(tn); int col_count = t.getDataTableDef().columnCount(); try { // Insert check constraint data. BigNumber unique_id = BigNumber.fromLong(nextUniqueID(tn)); constraint_name = makeUniqueConstraintName(constraint_name, unique_id); RowData rd = new RowData(t); rd.setColumnDataFromObject(0, unique_id); rd.setColumnDataFromObject(1, constraint_name); rd.setColumnDataFromObject(2, table_name.getSchema()); rd.setColumnDataFromObject(3, table_name.getName()); rd.setColumnDataFromObject(4, new String(expression.text())); rd.setColumnDataFromObject(5, BigNumber.fromInt(deferred)); if (col_count > 6) { // Serialize the check expression ByteLongObject serialized_expression = ObjectTranslator.serialize(expression); rd.setColumnDataFromObject(6, serialized_expression); } t.addRow(rd); } catch (DatabaseConstraintViolationException e) { // Constraint violation when inserting the data. Check the type and // wrap around an appropriate error message. if (e.getErrorCode() == DatabaseConstraintViolationException.UNIQUE_VIOLATION) { // This means we gave a constraint name that's already being used. throw new StatementException("Check constraint name '" + constraint_name + "' is already being used."); } throw e; } } /** * Drops all the constraints defined for the given table. This is a useful * function when dropping a table from the database. *

* NOTE: Security checks that the user can drop constraints must be checke at * a higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public void dropAllConstraintsForTable(TableName table_name) { ColumnGroup primary = queryTablePrimaryKeyGroup(this, table_name); ColumnGroup[] uniques = queryTableUniqueGroups(this, table_name); CheckExpression[] expressions = queryTableCheckExpressions(this, table_name); ColumnGroupReference[] refs = queryTableForeignKeyReferences(this, table_name); if (primary != null) { dropPrimaryKeyConstraintForTable(table_name, primary.name); } for (int i = 0; i < uniques.length; ++i) { dropUniqueConstraintForTable(table_name, uniques[i].name); } for (int i = 0; i < expressions.length; ++i) { dropCheckConstraintForTable(table_name, expressions[i].name); } for (int i = 0; i < refs.length; ++i) { dropForeignKeyReferenceConstraintForTable(table_name, refs[i].name); } } /** * Drops the named constraint from the transaction. Used when altering * table schema. Returns the number of constraints that were removed from * the system. If this method returns 0 then it indicates there is no * constraint with the given name in the table. *

* NOTE: Security checks that the user can drop constraints must be checke at * a higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public int dropNamedConstraint(TableName table_name, String constraint_name) { int drop_count = 0; if (dropPrimaryKeyConstraintForTable(table_name, constraint_name)) { ++drop_count; } if (dropUniqueConstraintForTable(table_name, constraint_name)) { ++drop_count; } if (dropCheckConstraintForTable(table_name, constraint_name)) { ++drop_count; } if (dropForeignKeyReferenceConstraintForTable(table_name, constraint_name)) { ++drop_count; } return drop_count; } /** * Drops the primary key constraint for the given table. Used when altering * table schema. If 'constraint_name' is null this method will search for * the primary key of the table name. Returns true if the primary key * constraint was dropped (the constraint existed). *

* NOTE: Security checks that the user can drop constraints must be checke at * a higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public boolean dropPrimaryKeyConstraintForTable( TableName table_name, String constraint_name) { MutableTableDataSource t = getTable(TableDataConglomerate.PRIMARY_INFO_TABLE); MutableTableDataSource t2 = getTable(TableDataConglomerate.PRIMARY_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns try { IntegerVector data; if (constraint_name != null) { // Returns the list of indexes where column 1 = constraint name // and column 2 = schema name data = dt.selectIndexesEqual(1, constraint_name, 2, table_name.getSchema()); } else { // Returns the list of indexes where column 3 = table name // and column 2 = schema name data = dt.selectIndexesEqual(3, table_name.getName(), 2, table_name.getSchema()); } if (data.size() > 1) { throw new Error("Assertion failed: multiple primary key for: " + table_name); } else if (data.size() == 1) { int row_index = data.intAt(0); // The id TObject id = dt.get(0, row_index); // All columns with this id IntegerVector ivec = dtcols.selectIndexesEqual(0, id); // Delete from the table dtcols.deleteRows(ivec); dt.deleteRows(data); return true; } // data.size() must be 0 so no constraint was found to drop. return false; } finally { dtcols.dispose(); dt.dispose(); } } /** * Drops a single named unique constraint from the given table. Returns * true if the unique constraint was dropped (the constraint existed). *

* NOTE: Security checks that the user can drop constraints must be checke at * a higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public boolean dropUniqueConstraintForTable( TableName table, String constraint_name) { MutableTableDataSource t = getTable(TableDataConglomerate.UNIQUE_INFO_TABLE); MutableTableDataSource t2 = getTable(TableDataConglomerate.UNIQUE_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns try { // Returns the list of indexes where column 1 = constraint name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(1, constraint_name, 2, table.getSchema()); if (data.size() > 1) { throw new Error("Assertion failed: multiple unique constraint name: " + constraint_name); } else if (data.size() == 1) { int row_index = data.intAt(0); // The id TObject id = dt.get(0, row_index); // All columns with this id IntegerVector ivec = dtcols.selectIndexesEqual(0, id); // Delete from the table dtcols.deleteRows(ivec); dt.deleteRows(data); return true; } // data.size() == 0 so the constraint wasn't found return false; } finally { dtcols.dispose(); dt.dispose(); } } /** * Drops a single named check constraint from the given table. Returns true * if the check constraint was dropped (the constraint existed). *

* NOTE: Security checks that the user can drop constraints must be checke at * a higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public boolean dropCheckConstraintForTable( TableName table, String constraint_name) { MutableTableDataSource t = getTable(TableDataConglomerate.CHECK_INFO_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table try { // Returns the list of indexes where column 1 = constraint name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(1, constraint_name, 2, table.getSchema()); if (data.size() > 1) { throw new Error("Assertion failed: multiple check constraint name: " + constraint_name); } else if (data.size() == 1) { // Delete the check constraint dt.deleteRows(data); return true; } // data.size() == 0 so the constraint wasn't found return false; } finally { dt.dispose(); } } /** * Drops a single named foreign key reference from the given table. Returns * true if the foreign key reference constraint was dropped (the constraint * existed). *

* NOTE: Security checks that the user can drop constraints must be checke at * a higher layer. *

* NOTE: We must guarentee that the transaction be in exclusive mode before * this method is called. */ public boolean dropForeignKeyReferenceConstraintForTable( TableName table, String constraint_name) { MutableTableDataSource t = getTable(TableDataConglomerate.FOREIGN_INFO_TABLE); MutableTableDataSource t2 = getTable(TableDataConglomerate.FOREIGN_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns try { // Returns the list of indexes where column 1 = constraint name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(1, constraint_name, 2, table.getSchema()); if (data.size() > 1) { throw new Error("Assertion failed: multiple foreign key constraint " + "name: " + constraint_name); } else if (data.size() == 1) { int row_index = data.intAt(0); // The id TObject id = dt.get(0, row_index); // All columns with this id IntegerVector ivec = dtcols.selectIndexesEqual(0, id); // Delete from the table dtcols.deleteRows(ivec); dt.deleteRows(data); return true; } // data.size() == 0 so the constraint wasn't found return false; } finally { dtcols.dispose(); dt.dispose(); } } /** * Returns the list of tables (as a TableName array) that are dependant * on the data in the given table to maintain referential consistancy. The * list includes the tables referenced as foreign keys, and the tables * that reference the table as a foreign key. *

* This is a useful query for determining ahead of time the tables that * require a read lock when inserting/updating a table. A table will require * a read lock if the operation needs to query it for potential referential * integrity violations. */ public static TableName[] queryTablesRelationallyLinkedTo( SimpleTransaction transaction, TableName table) { ArrayList list = new ArrayList(); ColumnGroupReference[] refs = queryTableForeignKeyReferences(transaction, table); for (int i = 0; i < refs.length; ++i) { TableName tname = refs[i].ref_table_name; if (!list.contains(tname)) { list.add(tname); } } refs = queryTableImportedForeignKeyReferences(transaction, table); for (int i = 0; i < refs.length; ++i) { TableName tname = refs[i].key_table_name; if (!list.contains(tname)) { list.add(tname); } } return (TableName[]) list.toArray(new TableName[list.size()]); } /** * Returns a set of unique groups that are constrained to be unique for * the given table in this transaction. For example, if columns ('name') * and ('number', 'document_rev') are defined as unique, this will return * an array of two groups that represent unique columns in the given * table. */ public static ColumnGroup[] queryTableUniqueGroups( SimpleTransaction transaction, TableName table_name) { TableDataSource t = transaction.getTableDataSource(TableDataConglomerate.UNIQUE_INFO_TABLE); TableDataSource t2 = transaction.getTableDataSource(TableDataConglomerate.UNIQUE_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns ColumnGroup[] groups; try { // Returns the list indexes where column 3 = table name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(3, table_name.getName(), 2, table_name.getSchema()); groups = new ColumnGroup[data.size()]; for (int i = 0; i < data.size(); ++i) { TObject id = dt.get(0, data.intAt(i)); // Select all records with equal id IntegerVector cols = dtcols.selectIndexesEqual(0, id); // Put into a group. ColumnGroup group = new ColumnGroup(); // constraint name group.name = dt.get(1, data.intAt(i)).getObject().toString(); group.columns = toColumns(dtcols, cols); // the list of columns group.deferred = ((BigNumber) dt.get(4, data.intAt(i)).getObject()).shortValue(); groups[i] = group; } } finally { dt.dispose(); dtcols.dispose(); } return groups; } /** * Returns a set of primary key groups that are constrained to be unique * for the given table in this transaction (there can be only 1 primary * key defined for a table). Returns null if there is no primary key * defined for the table. */ public static ColumnGroup queryTablePrimaryKeyGroup( SimpleTransaction transaction, TableName table_name) { TableDataSource t = transaction.getTableDataSource(TableDataConglomerate.PRIMARY_INFO_TABLE); TableDataSource t2 = transaction.getTableDataSource(TableDataConglomerate.PRIMARY_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns try { // Returns the list indexes where column 3 = table name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(3, table_name.getName(), 2, table_name.getSchema()); if (data.size() > 1) { throw new Error("Assertion failed: multiple primary key for: " + table_name); } else if (data.size() == 1) { int row_index = data.intAt(0); // The id TObject id = dt.get(0, row_index); // All columns with this id IntegerVector ivec = dtcols.selectIndexesEqual(0, id); // Make it in to a columns object ColumnGroup group = new ColumnGroup(); group.name = dt.get(1, row_index).getObject().toString(); group.columns = toColumns(dtcols, ivec); group.deferred = ((BigNumber) dt.get(4, row_index).getObject()).shortValue(); return group; } else { return null; } } finally { dt.dispose(); dtcols.dispose(); } } /** * Returns a set of check expressions that are constrained over all new * columns added to the given table in this transaction. For example, * we may want a column called 'serial_number' to be constrained as * CHECK serial_number LIKE '___-________-___'. */ public static CheckExpression[] queryTableCheckExpressions( SimpleTransaction transaction, TableName table_name) { TableDataSource t = transaction.getTableDataSource(TableDataConglomerate.CHECK_INFO_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table CheckExpression[] checks; try { // Returns the list indexes where column 3 = table name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(3, table_name.getName(), 2, table_name.getSchema()); checks = new CheckExpression[data.size()]; for (int i = 0; i < checks.length; ++i) { int row_index = data.intAt(i); CheckExpression check = new CheckExpression(); check.name = dt.get(1, row_index).getObject().toString(); check.deferred = ((BigNumber) dt.get(5, row_index).getObject()).shortValue(); // Is the deserialized version available? if (t.getDataTableDef().columnCount() > 6) { ByteLongObject sexp = (ByteLongObject) dt.get(6, row_index).getObject(); if (sexp != null) { try { // Deserialize the expression check.expression = (Expression) ObjectTranslator.deserialize(sexp); } catch (Throwable e) { // We weren't able to deserialize the expression so report the // error to the log transaction.Debug().write(Lvl.WARNING, Transaction.class, "Unable to deserialize the check expression. " + "The error is: " + e.getMessage()); transaction.Debug().write(Lvl.WARNING, Transaction.class, "Parsing the check expression instead."); check.expression = null; } } } // Otherwise we need to parse it from the string if (check.expression == null) { Expression exp = Expression.parse( dt.get(4, row_index).getObject().toString()); check.expression = exp; } checks[i] = check; } } finally { dt.dispose(); } return checks; } /** * Returns an array of column references in the given table that represent * foreign key references. For example, say a foreign reference has been * set up in the given table as follows;

   *   FOREIGN KEY (customer_id) REFERENCES Customer (id)
   * 

* This method will return the column group reference * Order(customer_id) -> Customer(id). *

* This method is used to check that a foreign key reference actually points * to a valid record in the referenced table as expected. */ public static ColumnGroupReference[] queryTableForeignKeyReferences( SimpleTransaction transaction, TableName table_name) { TableDataSource t = transaction.getTableDataSource(TableDataConglomerate.FOREIGN_INFO_TABLE); TableDataSource t2 = transaction.getTableDataSource(TableDataConglomerate.FOREIGN_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns ColumnGroupReference[] groups; try { // Returns the list indexes where column 3 = table name // and column 2 = schema name IntegerVector data = dt.selectIndexesEqual(3, table_name.getName(), 2, table_name.getSchema()); groups = new ColumnGroupReference[data.size()]; for (int i = 0; i < data.size(); ++i) { int row_index = data.intAt(i); // The foreign key id TObject id = dt.get(0, row_index); // The referenced table TableName ref_table_name = new TableName( dt.get(4, row_index).getObject().toString(), dt.get(5, row_index).getObject().toString()); // Select all records with equal id IntegerVector cols = dtcols.selectIndexesEqual(0, id); // Put into a group. ColumnGroupReference group = new ColumnGroupReference(); // constraint name group.name = dt.get(1, row_index).getObject().toString(); group.key_table_name = table_name; group.ref_table_name = ref_table_name; group.update_rule = dt.get(6, row_index).getObject().toString(); group.delete_rule = dt.get(7, row_index).getObject().toString(); group.deferred = ((BigNumber) dt.get(8, row_index).getObject()).shortValue(); int cols_size = cols.size(); String[] key_cols = new String[cols_size]; String[] ref_cols = new String[cols_size]; for (int n = 0; n < cols_size; ++n) { for (int p = 0; p < cols_size; ++p) { int cols_index = cols.intAt(p); if (((BigNumber) dtcols.get(3, cols_index).getObject()).intValue() == n) { key_cols[n] = dtcols.get(1, cols_index).getObject().toString(); ref_cols[n] = dtcols.get(2, cols_index).getObject().toString(); break; } } } group.key_columns = key_cols; group.ref_columns = ref_cols; groups[i] = group; } } finally { dt.dispose(); dtcols.dispose(); } return groups; } /** * Returns an array of column references in the given table that represent * foreign key references that reference columns in the given table. This * is a reverse mapping of the 'queryTableForeignKeyReferences' method. For * example, say a foreign reference has been set up in any table as follows; *

   *   [ In table Order ]
   *   FOREIGN KEY (customer_id) REFERENCE Customer (id)
   * 

* And the table name we are querying is 'Customer' then this method will * return the column group reference * Order(customer_id) -> Customer(id). *

* This method is used to check that a reference isn't broken when we remove * a record (for example, removing a Customer that has references to it will * break integrity). */ public static ColumnGroupReference[] queryTableImportedForeignKeyReferences( SimpleTransaction transaction, TableName ref_table_name) { TableDataSource t = transaction.getTableDataSource(TableDataConglomerate.FOREIGN_INFO_TABLE); TableDataSource t2 = transaction.getTableDataSource(TableDataConglomerate.FOREIGN_COLS_TABLE); SimpleTableQuery dt = new SimpleTableQuery(t); // The info table SimpleTableQuery dtcols = new SimpleTableQuery(t2); // The columns ColumnGroupReference[] groups; try { // Returns the list indexes where column 5 = ref table name // and column 4 = ref schema name IntegerVector data = dt.selectIndexesEqual(5,ref_table_name.getName(), 4,ref_table_name.getSchema()); groups = new ColumnGroupReference[data.size()]; for (int i = 0; i < data.size(); ++i) { int row_index = data.intAt(i); // The foreign key id TObject id = dt.get(0, row_index); // The referencee table TableName table_name = new TableName( dt.get(2, row_index).getObject().toString(), dt.get(3, row_index).getObject().toString()); // Select all records with equal id IntegerVector cols = dtcols.selectIndexesEqual(0, id); // Put into a group. ColumnGroupReference group = new ColumnGroupReference(); // constraint name group.name = dt.get(1, row_index).getObject().toString(); group.key_table_name = table_name; group.ref_table_name = ref_table_name; group.update_rule = dt.get(6, row_index).getObject().toString(); group.delete_rule = dt.get(7, row_index).getObject().toString(); group.deferred = ((BigNumber) dt.get(8, row_index).getObject()).shortValue(); int cols_size = cols.size(); String[] key_cols = new String[cols_size]; String[] ref_cols = new String[cols_size]; for (int n = 0; n < cols_size; ++n) { for (int p = 0; p < cols_size; ++p) { int cols_index = cols.intAt(p); if (((BigNumber) dtcols.get(3, cols_index).getObject()).intValue() == n) { key_cols[n] = dtcols.get(1, cols_index).getObject().toString(); ref_cols[n] = dtcols.get(2, cols_index).getObject().toString(); break; } } } group.key_columns = key_cols; group.ref_columns = ref_cols; groups[i] = group; } } finally { dt.dispose(); dtcols.dispose(); } return groups; } // ----- Transaction close operations ----- /** * Closes and marks a transaction as committed. Any changes made by this * transaction are seen by all transactions created after this method * returns. *

* This method will fail under the following circumstances: *

    *
  1. There are any rows deleted in this transaction that were deleted * by another successfully committed transaction. *
  2. There were rows added in another committed transaction that would * change the result of the search clauses committed by this transaction. *
* The first check is not too difficult to check for. The second is very * difficult however we need it to ensure TRANSACTION_SERIALIZABLE isolation * is enforced. We may have to simplify this by throwing a transaction * exception if the table has had any changes made to it during this * transaction. *

* This should only be called under an exclusive lock on the connection. */ public void closeAndCommit() throws TransactionException { if (!closed) { try { closed = true; // Get the conglomerate to do this commit. conglomerate.processCommit(this, getVisibleTables(), selected_from_tables, touched_tables, journal); } finally { cleanup(); } } } /** * Closes and rolls back a transaction as if the commands the transaction ran * never happened. This will not throw a transaction exception. *

* This should only be called under an exclusive lock on the connection. */ public void closeAndRollback() { if (!closed) { try { closed = true; // Notify the conglomerate that this transaction has closed. conglomerate.processRollback(this, touched_tables, journal); } finally { cleanup(); } } } /** * Cleans up this transaction. */ private void cleanup() { getSystem().stats().decrement("Transaction.count"); // Dispose of all the IndexSet objects created by this transaction. disposeAllIndices(); // Dispose all the table we touched try { for (int i = 0; i < touched_tables.size(); ++i) { MutableTableDataSource source = (MutableTableDataSource) touched_tables.get(i); source.dispose(); } } catch (Throwable e) { Debug().writeException(e); } getSystem().stats().increment("Transaction.cleanup"); conglomerate = null; touched_tables = null; journal = null; } /** * Disposes this transaction without rolling back or committing the changes. * Care should be taken when using this - it must only be used for simple * transactions that are short lived and have not modified the database. */ void dispose() { if (!isReadOnly()) { throw new RuntimeException( "Assertion failed - tried to dispose a non read-only transaction."); } if (!closed) { closed = true; cleanup(); } } /** * Finalize, we should close the transaction. */ public void finalize() throws Throwable { super.finalize(); if (!closed) { Debug().write(Lvl.ERROR, this, "Transaction not closed!"); closeAndRollback(); } } // ---------- Transaction inner classes ---------- /** * A list of DataTableDef system table definitions for tables internal to * the transaction. */ private final static DataTableDef[] INTERNAL_DEF_LIST; static { INTERNAL_DEF_LIST = new DataTableDef[3]; INTERNAL_DEF_LIST[0] = GTTableColumnsDataSource.DEF_DATA_TABLE_DEF; INTERNAL_DEF_LIST[1] = GTTableInfoDataSource.DEF_DATA_TABLE_DEF; INTERNAL_DEF_LIST[2] = GTProductDataSource.DEF_DATA_TABLE_DEF; } /** * A static internal table info for internal tables to the transaction. * This implementation includes all the dynamically generated system tables * that are tied to information in a transaction. */ private class TransactionInternalTables extends AbstractInternalTableInfo { /** * Constructor. */ public TransactionInternalTables() { super("SYSTEM TABLE", INTERNAL_DEF_LIST); } // ---------- Implemented ---------- public MutableTableDataSource createInternalTable(int index) { if (index == 0) { return new GTTableColumnsDataSource(Transaction.this).init(); } else if (index == 1) { return new GTTableInfoDataSource(Transaction.this).init(); } else if (index == 2) { return new GTProductDataSource(Transaction.this).init(); } else { throw new RuntimeException(); } } } /** * A group of columns as used by the constraint system. A ColumnGroup is * a simple list of columns in a table. */ public static class ColumnGroup { /** * The name of the group (the constraint name). */ public String name; /** * The list of columns that make up the group. */ public String[] columns; /** * Whether this is deferred or initially immediate. */ public short deferred; } /** * Represents a constraint expression to check. */ public static class CheckExpression { /** * The name of the check expression (the constraint name). */ public String name; /** * The expression to check. */ public Expression expression; /** * Whether this is deferred or initially immediate. */ public short deferred; } /** * Represents a reference from a group of columns in one table to a group of * columns in another table. The is used to represent a foreign key * reference. */ public static class ColumnGroupReference { /** * The name of the group (the constraint name). */ public String name; /** * The key table name. */ public TableName key_table_name; /** * The list of columns that make up the key. */ public String[] key_columns; /** * The referenced table name. */ public TableName ref_table_name; /** * The list of columns that make up the referenced group. */ public String[] ref_columns; /** * The update rule. */ public String update_rule; /** * The delete rule. */ public String delete_rule; /** * Whether this is deferred or initially immediate. */ public short deferred; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TransactionException.java000066400000000000000000000041741330501023400267320ustar00rootroot00000000000000/** * com.mckoi.database.TransactionException 22 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * Thrown when a transaction error happens. This can only be thrown during * the commit process of a transaction. * * @author Tobias Downer */ public class TransactionException extends Exception { // The types of transaction exceptions. /** * Thrown when a transaction deletes or updates a row that another * transaction has committed a change to. */ public final static int ROW_REMOVE_CLASH = 1; /** * Thrown when a transaction drops or alters a table that another transaction * has committed a change to. */ public final static int TABLE_REMOVE_CLASH = 2; /** * Thrown when a transaction adds/removes/modifies rows from a table that * has been dropped by another transaction. */ public final static int TABLE_DROPPED = 3; /** * Thrown when a transaction selects data from a table that has committed * changes to it from another transaction. */ public final static int DIRTY_TABLE_SELECT = 4; /** * Thrown when a transaction conflict occurs and would cause duplicate tables * to be created. */ public final static int DUPLICATE_TABLE = 5; /** * The type of error. */ private int type; public TransactionException(int type, String message) { super(message); this.type = type; } /** * Returns the type of transaction error this is. */ public int getType() { return type; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TransactionJournal.java000066400000000000000000000264651330501023400264150ustar00rootroot00000000000000/** * com.mckoi.database.TransactionJournal 19 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; import java.util.ArrayList; /** * The list of all primitive operations to the database that a transaction * performed. It includes the list of all rows added or removed to all tables, * and the tables created and dropped and any table that had constraint * modifications. *

* This journal is updated inside a Transaction. When the transaction is * completed, this journal is used both to determine if the transaction * can be committed, and also to update the changes to the data that a * transaction has made. *

* THREADING: The journal update commands are synchronized because they need * to be atomic operations and can be accessed by multiple threads. * * @author Tobias Downer */ final class TransactionJournal { /** * Journal commands. */ static byte TABLE_ADD = 1; // Add a row to a table. // (params: table_id, row_index) static byte TABLE_REMOVE = 2; // Remove a row from a table. // (params: table_id, row_index) static byte TABLE_CREATE = 3; // Create a new table. // (params: table_id) static byte TABLE_DROP = 4; // Drop a table. // (params: table_id) static byte TABLE_CONSTRAINT_ALTER = 5; // Alter constraints of a table. // (params: table_id) /** * The number of entries in this journal. */ private int journal_entries; /** * The list of table's that have been touched by this transaction. A table * is touched if the 'getTable' method in the transaction is used to * get the table. This means even if a table is just read from, the * journal will record that the table was touched. *

* This object records the 'table_id' of the touched tables in a sorted * list. */ private IntegerVector touched_tables; /** * A byte[] array that represents the set of commands a transaction * performed on a table. */ private byte[] command_journal; /** * An IntegerVector that is filled with parameters from the command journal. * For example, a 'TABLE_ADD' journal log will have as parameters the * table id the row was added to, and the row_index that was added. */ private IntegerVector command_parameters; /** * Optimization, these flags are set to true when various types of journal * entries are made to the transaction journal. */ private boolean has_added_table_rows, has_removed_table_rows, has_created_tables, has_dropped_tables, has_constraint_alterations; /** * Constructs a blank journal. */ TransactionJournal() { journal_entries = 0; command_journal = new byte[16]; command_parameters = new IntegerVector(32); touched_tables = new IntegerVector(8); has_added_table_rows = false; has_removed_table_rows = false; has_created_tables = false; has_dropped_tables = false; has_constraint_alterations = false; } /** * Adds a command to the journal. */ private void addCommand(byte command) { if (journal_entries >= command_journal.length) { // Resize command array. int grow_size = Math.min(4000, journal_entries); byte[] new_command_journal = new byte[journal_entries + grow_size]; System.arraycopy(command_journal, 0, new_command_journal, 0, journal_entries); command_journal = new_command_journal; } command_journal[journal_entries] = command; ++journal_entries; } /** * Adds a parameter to the journal command parameters. */ private void addParameter(int param) { command_parameters.addInt(param); } /** * Logs in this journal that the transaction touched the given table id. */ synchronized void entryAddTouchedTable(int table_id) { int pos = touched_tables.sortedIndexOf(table_id); // If table_id already in the touched table list. if (pos < touched_tables.size() && touched_tables.intAt(pos) == table_id) { return; } // If position to insert >= size of the touched tables set then add to // the end of the set. if (pos >= touched_tables.size()) { touched_tables.addInt(table_id); } else { // Otherwise, insert into sorted order. touched_tables.insertIntAt(table_id, pos); } } /** * Makes a journal entry that a table entry has been added to the table with * the given id. */ synchronized void entryAddTableRow(int table_id, int row_index) { // has_added_table_rows = true; addCommand(TABLE_ADD); addParameter(table_id); addParameter(row_index); } /** * Makes a journal entry that a table entry has been removed from the table * with the given id. */ synchronized void entryRemoveTableRow(int table_id, int row_index) { // has_removed_table_rows = true; addCommand(TABLE_REMOVE); addParameter(table_id); addParameter(row_index); } /** * Makes a journal entry that a table with the given 'table_id' has been * created by this transaction. */ synchronized void entryTableCreate(int table_id) { has_created_tables = true; addCommand(TABLE_CREATE); addParameter(table_id); } /** * Makes a journal entry that a table with the given 'table_id' has been * dropped by this transaction. */ synchronized void entryTableDrop(int table_id) { has_dropped_tables = true; addCommand(TABLE_DROP); addParameter(table_id); } /** * Makes a journal entry that a table with the given 'table_id' has been * altered by this transaction. */ synchronized void entryTableConstraintAlter(int table_id) { has_constraint_alterations = true; addCommand(TABLE_CONSTRAINT_ALTER); addParameter(table_id); } /** * Generates an array of MasterTableJournal objects that specify the * changes that occur to each table affected by this transaction. Each array * element represents a change to an individual table in the conglomerate * that changed as a result of this transaction. *

* This is used when a transaction successfully commits and we need to log * the transaction changes with the master table. *

* If no changes occurred to a table, then no entry is returned here. */ MasterTableJournal[] makeMasterTableJournals() { ArrayList table_journals = new ArrayList(); int param_index = 0; MasterTableJournal master_journal = null; for (int i = 0 ; i < journal_entries; ++i) { byte c = command_journal[i]; if (c == TABLE_ADD || c == TABLE_REMOVE) { int table_id = command_parameters.intAt(param_index); int row_index = command_parameters.intAt(param_index + 1); param_index += 2; // Do we already have this table journal? if (master_journal == null || master_journal.getTableID() != table_id) { // Try to find the journal in the list. int size = table_journals.size(); master_journal = null; for (int n = 0; n < size && master_journal == null; ++n) { MasterTableJournal test_journal = (MasterTableJournal) table_journals.get(n); if (test_journal.getTableID() == table_id) { master_journal = test_journal; } } // Not found so add to list. if (master_journal == null) { master_journal = new MasterTableJournal(table_id); table_journals.add(master_journal); } } // Add this change to the table journal. master_journal.addEntry(c, row_index); } else if (c == TABLE_CREATE || c == TABLE_DROP || c == TABLE_CONSTRAINT_ALTER) { param_index += 1; } else { throw new Error("Unknown journal command."); } } // Return the array. return (MasterTableJournal[]) table_journals.toArray( new MasterTableJournal[table_journals.size()]); } /** * Returns the list of tables id's that were dropped by this journal. */ IntegerVector getTablesDropped() { IntegerVector dropped_tables = new IntegerVector(); // Optimization, quickly return empty set if we know there are no tables. if (!has_dropped_tables) { return dropped_tables; } int param_index = 0; for (int i = 0 ; i < journal_entries; ++i) { byte c = command_journal[i]; if (c == TABLE_ADD || c == TABLE_REMOVE) { param_index += 2; } else if (c == TABLE_CREATE || c == TABLE_CONSTRAINT_ALTER) { param_index += 1; } else if (c == TABLE_DROP) { dropped_tables.addInt(command_parameters.intAt(param_index)); param_index += 1; } else { throw new Error("Unknown journal command."); } } return dropped_tables; } /** * Returns the list of tables id's that were created by this journal. */ IntegerVector getTablesCreated() { IntegerVector created_tables = new IntegerVector(); // Optimization, quickly return empty set if we know there are no tables. if (!has_created_tables) { return created_tables; } int param_index = 0; for (int i = 0 ; i < journal_entries; ++i) { byte c = command_journal[i]; if (c == TABLE_ADD || c == TABLE_REMOVE) { param_index += 2; } else if (c == TABLE_DROP || c == TABLE_CONSTRAINT_ALTER) { param_index += 1; } else if (c == TABLE_CREATE) { created_tables.addInt(command_parameters.intAt(param_index)); param_index += 1; } else { throw new Error("Unknown journal command."); } } return created_tables; } /** * Returns the list of tables id's that were constraint altered by this * journal. */ IntegerVector getTablesConstraintAltered() { IntegerVector caltered_tables = new IntegerVector(); // Optimization, quickly return empty set if we know there are no tables. if (!has_constraint_alterations) { return caltered_tables; } int param_index = 0; for (int i = 0 ; i < journal_entries; ++i) { byte c = command_journal[i]; if (c == TABLE_ADD || c == TABLE_REMOVE) { param_index += 2; } else if (c == TABLE_DROP || c == TABLE_CREATE) { param_index += 1; } else if (c == TABLE_CONSTRAINT_ALTER) { caltered_tables.addInt(command_parameters.intAt(param_index)); param_index += 1; } else { throw new Error("Unknown journal command."); } } return caltered_tables; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TransactionModificationListener.java000066400000000000000000000041561330501023400311070ustar00rootroot00000000000000/** * com.mckoi.database.TransactionModificationListener 07 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A listener that is notified of table modification events made by a * transaction, both immediately inside a transaction and when a transaction * commits. These events can occur either immediately before or immediately * after the data is modified or during a commit. * * @author Tobias Downer */ public interface TransactionModificationListener { /** * An action for when changes to a table are committed. This event occurs * after constraint checks, and before the change is actually committed to * the database. If this method generates an exception then the change * is rolled back and any changes made by the transaction are lost. This * action is generated inside a 'commit lock' of the conglomerate, and * therefore care should be taken with the performance of this method. *

* The event object provides access to a SimpleTransaction object that is a * read-only view of the database in its committed state (if this operation * is successful). The transaction can be used to perform any last minute * deferred constraint checks. *

* This action is useful for last minute abortion of a transaction, or for * updating cache information. It can not be used as a triggering mechanism * and should never call back to user code. */ void tableCommitChange(TableCommitModificationEvent event); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TransactionSystem.java000066400000000000000000001034461330501023400262620ustar00rootroot00000000000000/** * com.mckoi.database.TransactionSystem 24 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.store.LoggingBufferManager; import com.mckoi.util.Stats; import com.mckoi.util.StringUtil; import com.mckoi.util.LogWriter; import com.mckoi.debug.*; import com.mckoi.database.control.DBConfig; import java.io.File; import java.io.IOException; import java.io.PrintWriter; import java.io.Writer; import java.util.Date; import java.util.List; import java.util.ArrayList; import java.util.Properties; /** * A class that provides information and global functions for the transaction * layer in the engine. Shared information includes configuration details, * logging, etc. * * @author Tobias Downer */ public class TransactionSystem { /** * The stats object that keeps track of database statistics. */ private final Stats stats = new Stats(); /** * A logger to output any debugging messages. * NOTE: This MUST be final, because other objects may retain a reference * to the object. If it is not final, then different objects will be * logging to different places if this reference is changed. */ private final DefaultDebugLogger logger; /** * The ResourceBundle that contains properties of the entire database * system. */ private DBConfig config = null; /** * The path in the file system for the database files. Note that this will * be null if the database does not exist in a local file system. For this * reason it's best not to write code that relies on the use of this value. */ private File db_path; /** * Set to true if lookup comparison lists are enabled. */ private boolean lookup_comparison_list_enabled = false; /** * Set to true if the database is in read only mode. This is set from the * configuration file. */ private boolean read_only_access = false; /** * Set to true if locking checks should be performed each time a table is * accessed. */ private boolean table_lock_check = false; /** * Set to false if there is conservative index memory storage. If true, * all root selectable schemes are stored behind a soft reference that will * be garbage collected. */ private boolean soft_index_storage = false; /** * If this is set to true, during boot up the engine will reindex all the * tables that weren't closed. If false, the engine will only reindex the * tables that have unchecked in modifications. */ private boolean always_reindex_dirty_tables = false; /** * Set to true if the file handles should NOT be synchronized with the * system file IO when the indices are written. If this is true, then the * database is not as fail safe, however File IO performance is improved. */ private boolean dont_synch_filesystem = false; /** * Set to true if the parser should ignore case when searching for a schema, * table or column using an identifier. */ private boolean ignore_case_for_identifiers = false; /** * Transaction option, if this is true then a transaction error is generated * during commit if a transaction selects data from a table that has * committed changes to it during commit time. *

* True by default. */ private boolean transaction_error_on_dirty_select = true; /** * The DataCellCache that is a shared resource between on database's. */ private DataCellCache data_cell_cache = null; /** * The list of FunctionFactory objects that handle different functions from * SQL. */ private ArrayList function_factory_list; /** * The FunctionLookup object that can resolve a FunctionDef object to a * Function object. */ private DSFunctionLookup function_lookup; /** * The regular expression library bridge for the library we are configured * to use. */ private RegexLibrary regex_library; /** * The log directory. */ private File log_directory; /** * A LoggingBufferManager object used to manage pages of ScatteringFileStore * objects in the file system. We can configure the maximum pages and page * size via this object, so we have control over how much memory from the * heap is used for buffering. */ private LoggingBufferManager buffer_manager; /** * The underlying StoreSystem implementation that encapsulates the behaviour * for storing data persistantly. */ private StoreSystem store_system; // ---------- Low level row listeners ---------- /** * A list of table names and listeners that are notified of add and remove * events in a table. */ private ArrayList table_listeners; /** * Constructor. */ public TransactionSystem() { // Setup generate properties from the JVM. logger = new DefaultDebugLogger(); Properties p = System.getProperties(); stats.set(0, "Runtime.java.version: " + p.getProperty("java.version")); stats.set(0, "Runtime.java.vendor: " + p.getProperty("java.vendor")); stats.set(0, "Runtime.java.vm.name: " + p.getProperty("java.vm.name")); stats.set(0, "Runtime.os.name: " + p.getProperty("os.name")); stats.set(0, "Runtime.os.arch: " + p.getProperty("os.arch")); stats.set(0, "Runtime.os.version: " + p.getProperty("os.version")); table_listeners = new ArrayList(); } /** * Parses a file string to an absolute position in the file system. We must * provide the path to the root directory (eg. the directory where the * config bundle is located). */ private static File parseFileString(File root_path, String root_info, String path_string) { File path = new File(path_string); File res; // If the path is absolute then return the absoluate reference if (path.isAbsolute()) { res = path; } else { // If the root path source is the jvm then just return the path. if (root_info != null && root_info.equals("jvm")) { return path; } // If the root path source is the configuration file then // concat the configuration path with the path string and return it. else { res = new File(root_path, path_string); } } return res; } /** * Sets up the log file from the config information. */ private void setupLog(DBConfig config) { String log_path_string = config.getValue("log_path"); String root_path_var = config.getValue("root_path"); String read_only_access = config.getValue("read_only"); String debug_logs = config.getValue("debug_logs"); boolean read_only_bool = false; if (read_only_access != null) { read_only_bool = read_only_access.equalsIgnoreCase("enabled"); } boolean debug_logs_bool = true; if (debug_logs != null) { debug_logs_bool = debug_logs.equalsIgnoreCase("enabled"); } // Conditions for not initializing a log directory; // 1. read only access is enabled // 2. log_path is empty or not set if (debug_logs_bool && !read_only_bool && log_path_string != null && !log_path_string.equals("")) { // First set up the debug information in this VM for the 'Debug' class. File log_path = parseFileString(config.currentPath(), root_path_var, log_path_string); // If the path doesn't exist the make it. if (!log_path.exists()) { log_path.mkdirs(); } // Set the log directory in the DatabaseSystem setLogDirectory(log_path); LogWriter f_writer; File debug_log_file; String dlog_file_name = ""; try { dlog_file_name = config.getValue("debug_log_file"); debug_log_file = new File(log_path.getCanonicalPath(), dlog_file_name); // Allow log size to grow to 512k and allow 12 archives of the log f_writer = new LogWriter(debug_log_file, 512 * 1024, 12); f_writer.write("**** Debug log started: " + new Date(System.currentTimeMillis()) + " ****\n"); f_writer.flush(); } catch (IOException e) { throw new RuntimeException( "Unable to open debug file '" + dlog_file_name + "' in path '" + log_path + "'"); } setDebugOutput(f_writer); } // If 'debug_logs=disabled', don't write out any debug logs if (!debug_logs_bool) { // Otherwise set it up so the output from the logs goes to a PrintWriter // that doesn't do anything. Basically - this means all log information // will get sent into a black hole. setDebugOutput(new PrintWriter(new Writer() { public void write(int c) throws IOException { } public void write(char cbuf[], int off, int len) throws IOException { } public void write(String str, int off, int len) throws IOException { } public void flush() throws IOException { } public void close() throws IOException { } })); } int debug_level = Integer.parseInt(config.getValue("debug_level")); if (debug_level == -1) { setDebugLevel(255); } else { setDebugLevel(debug_level); } } /** * Returns a configuration value, or the default if it's not found. */ public final String getConfigString(String property, String default_val) { String v = config.getValue(property); if (v == null) { return default_val; } return v.trim(); } /** * Returns a configuration value, or the default if it's not found. */ public final int getConfigInt(String property, int default_val) { String v = config.getValue(property); if (v == null) { return default_val; } return Integer.parseInt(v); } /** * Returns a configuration value, or the default if it's not found. */ public final boolean getConfigBoolean(String property, boolean default_val) { String v = config.getValue(property); if (v == null) { return default_val; } return v.trim().equalsIgnoreCase("enabled"); } /** * Given a regular expression string representing a particular library, this * will return the name of the class to use as a bridge between the library * and Mckoi. Returns null if the library name is invalid. */ private static String regexStringToClass(String lib) { if (lib.equals("java.util.regexp")) { return "com.mckoi.database.regexbridge.JavaRegex"; } else if (lib.equals("org.apache.regexp")) { return "com.mckoi.database.regexbridge.ApacheRegex"; } else if (lib.equals("gnu.regexp")) { return "com.mckoi.database.regexbridge.GNURegex"; } else { return null; } } /** * Inits the TransactionSystem with the configuration properties of the * system. * This can only be called once, and should be called at database boot time. */ public void init(DBConfig config) { function_factory_list = new ArrayList(); function_lookup = new DSFunctionLookup(); if (config != null) { this.config = config; // Set the read_only property read_only_access = getConfigBoolean("read_only", false); // Setup the log setupLog(config); // The storage encapsulation that has been configured. String storage_system = getConfigString("storage_system", "v1file"); boolean is_file_store_mode; // Construct the system store. if (storage_system.equalsIgnoreCase("v1file")) { Debug().write(Lvl.MESSAGE, this, "Storage System: v1 file storage mode."); // The path where the database data files are stored. String database_path = getConfigString("database_path", "./data"); // The root path variable String root_path_var = getConfigString("root_path", "jvm"); // Set the absolute database path db_path = parseFileString(config.currentPath(), root_path_var, database_path); store_system = new V1FileStoreSystem(this, db_path, read_only_access); is_file_store_mode = true; } else if (storage_system.equalsIgnoreCase("v1javaheap")) { Debug().write(Lvl.MESSAGE, this, "Storage System: v1 Java heap storage mode."); store_system = new V1HeapStoreSystem(); is_file_store_mode = false; } else { String error_msg = "Unknown storage_system property: " + storage_system; Debug().write(Lvl.ERROR, this, error_msg); throw new RuntimeException(error_msg); } // Register the internal function factory, addFunctionFactory(new InternalFunctionFactory()); String status; // Set up the DataCellCache from the values in the configuration int max_cache_size = 0, max_cache_entry_size = 0; max_cache_size = getConfigInt("data_cache_size", 0); max_cache_entry_size = getConfigInt("max_cache_entry_size", 0); if (max_cache_size >= 4096 && max_cache_entry_size >= 16 && max_cache_entry_size < (max_cache_size / 2)) { Debug().write(Lvl.MESSAGE, this, "Internal Data Cache size: " + max_cache_size); Debug().write(Lvl.MESSAGE, this, "Internal Data Cache max cell size: " + max_cache_entry_size); // Find a prime hash size depending on the size of the cache. int hash_size = DataCellCache.closestPrime(max_cache_size / 55); // Set up the data_cell_cache data_cell_cache = new DataCellCache(this, max_cache_size, max_cache_entry_size, hash_size); } else { Debug().write(Lvl.MESSAGE, this, "Internal Data Cache disabled."); } // Are lookup comparison lists enabled? // lookup_comparison_list_enabled = // getConfigBoolean("lookup_comparison_list", false); lookup_comparison_list_enabled = false; Debug().write(Lvl.MESSAGE, this, "lookup_comparison_list = " + lookup_comparison_list_enabled); // Should we open the database in read only mode? Debug().write(Lvl.MESSAGE, this, "read_only = " + read_only_access); if (read_only_access) stats.set(1, "DatabaseSystem.read_only"); // // Hard Sync file system whenever we update index files? // if (is_file_store_mode) { // dont_synch_filesystem = getConfigBoolean("dont_synch_filesystem", false); // Debug().write(Lvl.MESSAGE, this, // "dont_synch_filesystem = " + dont_synch_filesystem); // } // Generate transaction error if dirty selects are detected? transaction_error_on_dirty_select = getConfigBoolean("transaction_error_on_dirty_select", true); Debug().write(Lvl.MESSAGE, this, "transaction_error_on_dirty_select = " + transaction_error_on_dirty_select); // Case insensitive identifiers? ignore_case_for_identifiers = getConfigBoolean("ignore_case_for_identifiers", false); Debug().write(Lvl.MESSAGE, this, "ignore_case_for_identifiers = " + ignore_case_for_identifiers); // ---- Store system setup ---- // See if this JVM supports the java.nio interface // (first introduced in 1.4) if (is_file_store_mode) { boolean nio_interface_available; try { Class.forName("java.nio.channels.FileChannel"); nio_interface_available = true; Debug().write(Lvl.MESSAGE, this, "Java NIO API is available."); } catch (ClassNotFoundException e) { nio_interface_available = false; Debug().write(Lvl.MESSAGE, this, "Java NIO API is not available."); } // Bug workaround - there are problems with memory mapped NIO under 95/98 // which we workaround by disabling NIO support on 95/98. boolean nio_bugged_os; String os_name = System.getProperties().getProperty("os.name"); nio_bugged_os = (os_name.equalsIgnoreCase("Windows 95") || os_name.equalsIgnoreCase("Windows 98")); // Get the safety level of the file system where 10 is the most safe // and 1 is the least safe. int io_safety_level = getConfigInt("io_safety_level", 10); if (io_safety_level < 1 || io_safety_level > 10) { Debug().write(Lvl.MESSAGE, this, "Invalid io_safety_level value. Setting to the most safe level."); io_safety_level = 10; } Debug().write(Lvl.MESSAGE, this, "io_safety_level = " + io_safety_level); // Logging is disabled when safety level is less or equal to 2 boolean enable_logging = true; if (io_safety_level <= 2) { Debug().write(Lvl.MESSAGE, this, "Disabling journaling and file sync."); enable_logging = false; } // If the configuration property 'use_nio_if_available' is enabled then // we setup a LoggingBufferManager that uses NIO (default to 'false') boolean use_nio_if_available = getConfigBoolean("use_nio_if_available", false); boolean force_use_nio = getConfigBoolean("force_use_nio", false); String api_to_use; int page_size; int max_pages; final boolean disable_nio = true; // If NIO interface available and configuration tells us to use NIO and // we are not running on an OS where NIO is buggy, we set the NIO options // here. if ( !disable_nio && ( force_use_nio || ( nio_interface_available && use_nio_if_available && !nio_bugged_os ))) { Debug().write(Lvl.MESSAGE, this, "Using NIO API for OS memory mapped file access."); page_size = getConfigInt("buffered_nio_page_size", 1024 * 1024); max_pages = getConfigInt("buffered_nio_max_pages", 64); api_to_use = "Java NIO"; } else { Debug().write(Lvl.MESSAGE, this, "Using stardard IO API for heap buffered file access."); page_size = getConfigInt("buffered_io_page_size", 8192); max_pages = getConfigInt("buffered_io_max_pages", 256); api_to_use = "Java IO"; } // Output this information to the log Debug().write(Lvl.MESSAGE, this, "[Buffer Manager] Using IO API: " + api_to_use); Debug().write(Lvl.MESSAGE, this, "[Buffer Manager] Page Size: " + page_size); Debug().write(Lvl.MESSAGE, this, "[Buffer Manager] Max pages: " + max_pages); // Journal path is currently always the same as database path. final File journal_path = db_path; // Max slice size is 1 GB for file scattering class final long max_slice_size = 16384 * 65536; // First file extention is 'koi' final String first_file_ext = "koi"; // Set up the BufferManager buffer_manager = new LoggingBufferManager( db_path, journal_path, read_only_access, max_pages, page_size, first_file_ext, max_slice_size, Debug(), enable_logging); // ^ This is a big constructor. It sets up the logging manager and // sets a resource store data accessor converter to a scattering // implementation with a max slice size of 1 GB // Start the buffer manager. try { buffer_manager.start(); } catch (IOException e) { Debug().write(Lvl.ERROR, this, "Error starting buffer manager"); Debug().writeException(Lvl.ERROR, e); throw new Error("IO Error: " + e.getMessage()); } } // What regular expression library are we using? // If we want the engine to support other regular expression libraries // then include the additional entries here. // Test to see if the regex API exists boolean regex_api_exists; try { Class.forName("java.util.regex.Pattern"); regex_api_exists = true; } catch (ClassNotFoundException e) { // Internal API doesn't exist regex_api_exists = false; Debug().write(Lvl.MESSAGE, this, "Java regex API not available."); } String regex_bridge; String lib_used; String force_lib = getConfigString("force_regex_library", null); // Are we forcing a particular regular expression library? if (force_lib != null) { lib_used = force_lib; // Convert the library string to a class name regex_bridge = regexStringToClass(force_lib); } else { String lib = getConfigString("regex_library", null); lib_used = lib; // Use the standard Java 1.4 regular expression library if it is found. if (regex_api_exists) { regex_bridge = "com.mckoi.database.regexbridge.JavaRegex"; lib_used = "java.util.regexp"; } // else if (lib_used != null) { // regex_bridge = regexStringToClass(lib_used); // } else { regex_bridge = null; lib_used = null; } } if (regex_bridge != null) { try { Class c = Class.forName(regex_bridge); regex_library = (RegexLibrary) c.newInstance(); Debug().write(Lvl.MESSAGE, this, "Using regex bridge: " + lib_used); } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Unable to load regex bridge: " + regex_bridge); Debug().writeException(Lvl.WARNING, e); } } else { if (lib_used != null) { Debug().write(Lvl.ERROR, this, "Regex library not known: " + lib_used); } Debug().write(Lvl.MESSAGE, this, "Regex features disabled."); } // ---------- Plug ins --------- try { // The 'function_factories' property. String function_factories = getConfigString("function_factories", null); if (function_factories != null) { List factories = StringUtil.explode(function_factories, ";"); for (int i = 0; i < factories.size(); ++i) { String factory_class = factories.get(i).toString(); Class c = Class.forName(factory_class); FunctionFactory fun_factory = (FunctionFactory) c.newInstance(); addFunctionFactory(fun_factory); Debug().write(Lvl.MESSAGE, this, "Successfully added function factory: " + factory_class); } } else { Debug().write(Lvl.MESSAGE, this, "No 'function_factories' config property found."); // If resource missing, do nothing... } } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Error parsing 'function_factories' configuration property."); Debug().writeException(e); } // Flush the contents of the function lookup object. flushCachedFunctionLookup(); } } /** * Hack - set up the DataCellCache in DatabaseSystem so we can use the * MasterTableDataSource object without having to boot a new DatabaseSystem. */ public void setupRowCache(int max_cache_size, int max_cache_entry_size) { // Set up the data_cell_cache data_cell_cache = new DataCellCache(this, max_cache_size, max_cache_entry_size); } /** * Returns true if the database is in read only mode. In read only mode, * any 'write' operations are not permitted. */ public boolean readOnlyAccess() { return read_only_access; } /** * Returns the path of the database in the local file system if the database * exists within the local file system. If the database is not within the * local file system then null is returned. It is recommended this method * is not used unless for legacy or compatability purposes. */ public File getDatabasePath() { return db_path; } /** * Returns true if the database should perform checking of table locks. */ public boolean tableLockingEnabled() { return table_lock_check; } /** * Returns true if we should generate lookup caches in InsertSearch otherwise * returns false. */ public boolean lookupComparisonListEnabled() { return lookup_comparison_list_enabled; } /** * Returns true if all table indices are kept behind a soft reference that * can be garbage collected. */ public boolean softIndexStorage() { return soft_index_storage; } /** * Returns the status of the 'always_reindex_dirty_tables' property. */ public boolean alwaysReindexDirtyTables() { return always_reindex_dirty_tables; } /** * Returns true if we shouldn't synchronize with the file system when * important indexing information is flushed to the disk. */ public boolean dontSynchFileSystem() { return dont_synch_filesystem; } /** * Returns true if during commit the engine should look for any selects * on a modified table and fail if they are detected. */ public boolean transactionErrorOnDirtySelect() { return transaction_error_on_dirty_select; } /** * Returns true if the parser should ignore case when searching for * schema/table/column identifiers. */ public boolean ignoreIdentifierCase() { return ignore_case_for_identifiers; } /** * Returns the LoggingBufferManager object enabling us to create no file * stores in the file system. This provides access to the buffer scheme that * has been configured. */ public LoggingBufferManager getBufferManager() { return buffer_manager; } /** * Returns the regular expression library from the configuration file. */ public RegexLibrary getRegexLibrary() { if (regex_library != null) { return regex_library; } throw new Error("No regular expression library found in classpath " + "and/or in configuration file."); } // ---------- Store System encapsulation ---------- /** * Returns the StoreSystem encapsulation being used in this database. */ public final StoreSystem storeSystem() { return store_system; } // ---------- Debug logger methods ---------- /** * Sets the Writer output for the debug logger. */ public final void setDebugOutput(java.io.Writer writer) { // System.out.println("**** Setting debug log output ****" + writer); // System.out.println(logger); logger.setOutput(writer); } /** * Sets the debug minimum level that is output to the logger. */ public final void setDebugLevel(int level) { logger.setDebugLevel(level); } /** * Returns the DebugLogger object that is used to log debug message. This * method must always return a debug logger that we can log to. */ public final DebugLogger Debug() { return logger; } // ---------- Function factories ---------- /** * Registers a new FunctionFactory with the database system. The function * factories are used to resolve a function name into a Function object. * Function factories are checked in the order they are added to the database * system. */ public void addFunctionFactory(FunctionFactory factory) { synchronized (function_factory_list) { function_factory_list.add(factory); } factory.init(); } /** * Flushes the 'FunctionLookup' object returned by the getFunctionLookup * method. This should be called if the function factory list has been * modified in some way. */ public void flushCachedFunctionLookup() { FunctionFactory[] factories; synchronized (function_factory_list) { factories = (FunctionFactory[]) function_factory_list.toArray( new FunctionFactory[function_factory_list.size()]); } function_lookup.flushContents(factories); } /** * Returns a FunctionLookup object that will search through the function * factories in this database system and find and resolve a function. The * returned object may throw an exception from the 'generateFunction' method * if the FunctionDef is invalid. For example, if the number of parameters * is incorrect or the name can not be found. */ public FunctionLookup getFunctionLookup() { return function_lookup; } // ---------- System preparers ---------- /** * Given a Transaction.CheckExpression, this will prepare the expression and * return a new prepared CheckExpression. The default implementation of this * is to do nothing. However, a sub-class of the system choose to prepare * the expression, such as resolving the functions via the function lookup, * and resolving the sub-queries, etc. */ public Transaction.CheckExpression prepareTransactionCheckConstraint( DataTableDef table_def, Transaction.CheckExpression check) { // ExpressionPreparer expression_preparer = getFunctionExpressionPreparer(); // Resolve the expression to this table and row and evaluate the // check constraint. Expression exp = check.expression; table_def.resolveColumns(ignoreIdentifierCase(), exp); // try { // // Prepare the functions // exp.prepare(expression_preparer); // } // catch (Exception e) { // Debug().writeException(e); // throw new RuntimeException(e.getMessage()); // } return check; } // ---------- Database System Statistics Methods ---------- /** * Returns a com.mckoi.util.Stats object that can be used to keep track * of database statistics for this VM. */ public final Stats stats() { return stats; } // ---------- Log directory management ---------- /** * Sets the log directory. This should preferably be called during * initialization. If the log directory is not set or is set to 'null' then * no logging to files occurs. */ public final void setLogDirectory(File log_path) { this.log_directory = log_path; } /** * Returns the current log directory or null if no logging should occur. */ public final File getLogDirectory() { return log_directory; } // ---------- Cache Methods ---------- /** * Returns a DataCellCache object that is a shared resource between all * database's running on this VM. If this returns 'null' then the internal * cache is disabled. */ DataCellCache getDataCellCache() { return data_cell_cache; } // ---------- Dispatch methods ---------- /** * The dispatcher. */ private DatabaseDispatcher dispatcher; /** * Returns the DatabaseDispatcher object. */ private DatabaseDispatcher getDispatcher() { synchronized (this) { if (dispatcher == null) { dispatcher = new DatabaseDispatcher(this); } return dispatcher; } } /** * Creates an event object that is passed into 'postEvent' method * to run the given Runnable method after the time has passed. *

* The event created here can be safely posted on the event queue as many * times as you like. It's useful to create an event as a persistant object * to service some event. Just post it on the dispatcher when you want * it run! */ Object createEvent(Runnable runnable) { return getDispatcher().createEvent(runnable); } /** * Adds a new event to be dispatched on the queue after 'time_to_wait' * milliseconds has passed. *

* 'event' must be an event object returned via 'createEvent'. */ void postEvent(int time_to_wait, Object event) { getDispatcher().postEvent(time_to_wait, event); } /** * Disposes this object. */ public void dispose() { if (buffer_manager != null) { try { // Set a check point store_system.setCheckPoint(); // Stop the buffer manager buffer_manager.stop(); } catch (IOException e) { System.out.println("Error stopping buffer manager."); e.printStackTrace(); } } buffer_manager = null; regex_library = null; data_cell_cache = null; config = null; log_directory = null; function_factory_list = null; store_system = null; if (dispatcher != null) { dispatcher.finish(); } // trigger_manager = null; dispatcher = null; } // ---------- Inner classes ---------- /** * A FunctionLookup implementation that will look up a function from a * list of FunctionFactory objects provided with. */ private static class DSFunctionLookup implements FunctionLookup { private FunctionFactory[] factories; public synchronized Function generateFunction(FunctionDef function_def) { for (int i = 0; i < factories.length; ++i) { Function f = factories[i].generateFunction(function_def); if (f != null) { return f; } } return null; } public synchronized boolean isAggregate(FunctionDef function_def) { for (int i = 0; i < factories.length; ++i) { FunctionInfo f_info = factories[i].getFunctionInfo(function_def.getName()); if (f_info != null) { return f_info.getType() == FunctionInfo.AGGREGATE; } } return false; } public synchronized void flushContents(FunctionFactory[] factories) { this.factories = factories; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TriggerEvent.java000066400000000000000000000037611330501023400251740ustar00rootroot00000000000000/** * com.mckoi.database.TriggerEvent 16 Feb 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A trigger event represents a high level action that occured in the * database. A trigger event is generated by the SQL interpreter on evaluation * of curtain types of queries. * * @author Tobias Downer */ public class TriggerEvent { /** * Statics that represent the different types of high layer trigger events. */ public static final int INSERT = 1; public static final int DELETE = 2; public static final int UPDATE = 3; /** * The type of this event. */ private int type; /** * The source of the trigger (eg. the table name). */ private String source; /** * The number of times this event was fired. */ private int count; /** * Constructs the trigger event. */ public TriggerEvent(int type, String source, int count) { this.type = type; this.source = source; this.count = count; } public TriggerEvent(int type, String source) { this(type, source, 1); } /** * Returns the type of this event. */ public int getType() { return type; } /** * Returns the source of this event. */ public String getSource() { return source; } /** * Returns the number of times this event was fired. */ public int getCount() { return count; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TriggerListener.java000066400000000000000000000023241330501023400256720ustar00rootroot00000000000000/** * com.mckoi.database.TriggerListener 02 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * A listener that can listen for high layer trigger events. * * @author Tobias Downer */ public interface TriggerListener { /** * Notifies that a trigger event fired. * * @param database the DatabaseConnection that this trigger is registered * for. * @param trigger_evt the trigger event that was fired. */ void fireTrigger(DatabaseConnection database, String trigger_name, TriggerEvent trigger_evt); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/TriggerManager.java000066400000000000000000000162731330501023400254670ustar00rootroot00000000000000/** * com.mckoi.database.TriggerManager 02 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.*; import com.mckoi.util.HashMapList; /** * An object that manages high level trigger events within a Database context. * This manager is designed to manage the map between session and triggers * being listened for. It is the responsibility of the language parsing * layer to notify this manager of trigger events. *

* NOTE: It is intended that this object manages events from the highest layer, * so it is possible that trigger events may not get to be notified if * queries are not evaluated properly. *

* NOTE: This object serves a different purpose than DataTableListener. * DataTableListener is guarenteed to pick up all low level access to the * tables. This object is only intended as a helper for implementing a * trigger event dispatcher by a higher level package (eg. * com.mckoi.database.sql) *

* CONCURRENCY: This class is thread safe. It may safely be accessed by * multiple threads. Any events that are fired are put on the * DatabaseDispatcher thread. * * @author Tobias Downer */ final class TriggerManager { /** * The parent TransactionSystem object. */ private TransactionSystem system; /** * Maps from the user session (User) to the list of TriggerAction objects * for this user. */ private HashMapList listener_map; /** * Maps from the trigger source string to the list of TriggerAction * objects that are listening for events from this source. */ private HashMapList table_map; /** * Constructor. */ TriggerManager(TransactionSystem system) { this.system = system; listener_map = new HashMapList(); table_map = new HashMapList(); } /** * Flushes the list of TriggerEvent objects and dispatches them to the * users that are listening. This is called after the given connection * has successfully committed and closed. */ void flushTriggerEvents(final ArrayList event_list) { for (int i = 0; i < event_list.size(); ++i) { TriggerEvent evt = (TriggerEvent) event_list.get(i); fireTrigger(evt); } } /** * Adds a listener for an event with the given 'id' for this user session. *

* For example,
* addTriggerListener(user, "my_trigger", * TriggerEvent.UPDATE, "Part", my_listener); *

* This listener is notified of all update events on the 'Part' table. */ synchronized void addTriggerListener(DatabaseConnection database, String trigger_name, int event_id, String trigger_source, TriggerListener listener) { // Has this trigger name already been defined for this user? List list = listener_map.get(database); for (int i = 0; i < list.size(); ++i) { TriggerAction action = (TriggerAction) list.get(i); if (action.getName().equals(trigger_name)) { throw new Error("Duplicate trigger name '" + trigger_name + "'"); } } TriggerAction action = new TriggerAction(database, trigger_name, event_id, trigger_source, listener); listener_map.put(database, action); table_map.put(trigger_source, action); } /** * Removes a trigger for the given user session. */ synchronized void removeTriggerListener(DatabaseConnection database, String trigger_name) { List list = listener_map.get(database); for (int i = 0; i < list.size(); ++i) { TriggerAction action = (TriggerAction) list.get(i); if (action.getName().equals(trigger_name)) { listener_map.remove(database, action); table_map.remove(action.trigger_source, action); return; } } throw new Error("Trigger name '" + trigger_name + "' not found."); } /** * Clears all the user triggers that have been defined. */ synchronized void clearAllDatabaseConnectionTriggers( DatabaseConnection database) { List list = listener_map.clear(database); for (int i = 0; i < list.size(); ++i) { TriggerAction action = (TriggerAction) list.get(i); table_map.remove(action.trigger_source, action); } } /** * Notifies all the listeners on a trigger_source (ie. a table) that a * specific type of event has happened, as denoted by the type. * * @param trigger_source the source of the trigger. * @param trigger_type either INSERT, DELETE or UPDATE * @param fire_count the number of times the trigger was fired for this * event. */ private void fireTrigger(final TriggerEvent evt) { final ArrayList trig_list; // Get all the triggers for this trigger source, // System.out.println(evt.getSource()); // System.out.println(table_map); synchronized (this) { List list = table_map.get(evt.getSource()); if (list.size() == 0) { return; } trig_list = new ArrayList(list); } // Post an event that fires the triggers for each listener. Runnable runner = new Runnable() { public void run() { for (int i = 0; i < trig_list.size(); ++i) { TriggerAction action = (TriggerAction) trig_list.get(i); if (evt.getType() == action.trigger_event) { action.listener.fireTrigger(action.database, action.trigger_name, evt); } } } }; // Post the event to go off approx 3ms from now. system.postEvent(3, system.createEvent(runner)); } // ---------- Inner classes ---------- /** * Encapsulates the information of a trigger listener for a specific event * for a user. */ private static class TriggerAction { private DatabaseConnection database; private String trigger_name; // The name of the trigger. private TriggerListener listener; // The trigger listener. private String trigger_source; // The source of the trigger. private int trigger_event; // Event we are to listen for. /** * Constructor. */ TriggerAction(DatabaseConnection database, String name, int type, String trigger_source, TriggerListener listener) { this.database = database; this.trigger_name = name; this.trigger_event = type; this.listener = listener; this.trigger_source = trigger_source; } /** * Returns the name of the trigger. */ public String getName() { return trigger_name; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/User.java000066400000000000000000000066401330501023400235040ustar00rootroot00000000000000/** * com.mckoi.database.User 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.Cache; /** * Encapsulates the information about a single user logged into the system. * The class provides access to information in the user database. *

* This object also serves as a storage for session state information. For * example, this object stores the triggers that this session has created. *

* NOTE: This object is not immutable. The same user may log into the system * and it will result in a new User object being created. * * @author Tobias Downer */ public final class User { /** * The name of the user. */ private String user_name; /** * The database object that this user is currently logged into. */ private Database database; /** * The connection string that identifies how this user is connected to the * database. */ private String connection_string; /** * The time this user connected. */ private long time_connected; /** * The last time this user executed a command on the connection. */ private long last_command_time; /** * The Constructor. This takes a user name and gets the privs for them. *

* Note that this method should only be created from within a Database * object. */ User(String user_name, Database database, String connection_string, long time_connected) { this.user_name = user_name; this.database = database; this.connection_string = connection_string; this.time_connected = time_connected; this.last_command_time = time_connected; } /** * Returns the name of the user. */ public String getUserName() { return user_name; } /** * Returns the string that describes how this user is connected to the * engine. This is set by the protocol layer. */ public String getConnectionString() { return connection_string; } /** * Returns the time the user connected. */ public long getTimeConnected() { return time_connected; } /** * Returnst the last time a command was executed by this user. */ public long getLastCommandTime() { return last_command_time; } /** * Returns the Database object that this user belongs to. */ public Database getDatabase() { return database; } /** * Refreshes the last time a command was executed by this user. */ public final void refreshLastCommandTime() { last_command_time = System.currentTimeMillis(); } /** * Logs out this user object. This will log the user out of the user manager. */ public void logout() { // Clear all triggers for this user, UserManager user_manager = database.getUserManager(); if (user_manager != null) { user_manager.userLoggedOut(this); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/UserAccessException.java000066400000000000000000000020141330501023400264740ustar00rootroot00000000000000/** * com.mckoi.database.UserAccessException 29 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An exception that is thrown when the user is not permitted to perform a * certain action. * * @author Tobias Downer */ public class UserAccessException extends DatabaseException { public UserAccessException(String message) { super(message); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/UserManager.java000066400000000000000000000041371330501023400247760ustar00rootroot00000000000000/** * com.mckoi.database.UserManager 11 Nov 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; /** * A class that manages the list of users connected to the engine. *

* This class is thread safe, however it is recommended that the callee should * synchronize over this object when inspecting a subset of the user list. * The reason being that a user can connect or disconnect at any time. * * @author Tobias Downer */ public final class UserManager { /** * The list of User objects that are currently connected to the database * engine. */ private ArrayList user_list; /** * Constructs the UserManager. */ UserManager() { user_list = new ArrayList(); } /** * Called when a new user connects to the engine. */ synchronized void userLoggedIn(User user) { if (!user_list.contains(user)) { user_list.add(user); } else { throw new Error("UserManager already has this User instance logged in."); } } /** * Called when the user logs out of the engine. */ synchronized void userLoggedOut(User user) { user_list.remove(user); } /** * Returns the number of users that are logged in. */ public synchronized int userCount() { return user_list.size(); } /** * Returns the User object at index 'n' in the manager where 0 is the first * user. */ public synchronized User userAt(int n) { return (User) user_list.get(n); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/V1FileStoreSystem.java000066400000000000000000000166521330501023400261020ustar00rootroot00000000000000/** * com.mckoi.database.V1FileStoreSystem 04 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.store.*; import com.mckoi.debug.Lvl; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; /** * An implementation of StoreSystem that manages persistant data through the * native file system. Each store is represented by a ScatteringFileStore * object against the current path. This implementation is compatible with * versions of the database from 0.94 onwards. * * @author Tobias Downer */ class V1FileStoreSystem implements StoreSystem { /** * The name of the file extention of the file lock on this conglomerate. */ private static final String FLOCK_EXT = ".lock"; /** * The TransactionSystem that contains the various configuration options for * the database. */ private TransactionSystem system; /** * The path in the filesystem where the data files are located. */ private File path; /** * True if the stores are read-only. */ private boolean read_only; /** * The lock file. */ private FileOutputStream lock_file; /** * Constructor. */ public V1FileStoreSystem(TransactionSystem system, File path, boolean read_only) { this.system = system; this.path = path; this.read_only = read_only; // If the database path doesn't exist, create it now, if (!read_only && !path.exists()) { path.mkdirs(); } } /** * Creates the JournalledFileStore object for this table. */ private JournalledFileStore createFileStore(String file_name) throws IOException { LoggingBufferManager buffer_manager = system.getBufferManager(); return new JournalledFileStore(file_name, buffer_manager, read_only); } // ---------- Implemented from StoreSystem ---------- public boolean storeExists(String name) { try { JournalledFileStore store = createFileStore(name); return store.exists(); } catch (IOException e) { system.Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } } public Store createStore(String name) { LoggingBufferManager buffer_manager = system.getBufferManager(); if (read_only) { throw new RuntimeException( "Can not create store because system is read-only."); } try { buffer_manager.lockForWrite(); JournalledFileStore store = createFileStore(name); if (!store.exists()) { store.open(); return store; } else { throw new RuntimeException("Can not create - store with name " + name + " already exists."); } } catch (IOException e) { system.Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } catch (InterruptedException e) { throw new Error("Interrupted: " + e.getMessage()); } finally { buffer_manager.unlockForWrite(); } } public Store openStore(String name) { LoggingBufferManager buffer_manager = system.getBufferManager(); try { buffer_manager.lockForWrite(); JournalledFileStore store = createFileStore(name); if (store.exists()) { store.open(); return store; } else { throw new RuntimeException("Can not open - store with name " + name + " does not exist."); } } catch (IOException e) { system.Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } catch (InterruptedException e) { throw new Error("Interrupted: " + e.getMessage()); } finally { buffer_manager.unlockForWrite(); } } public boolean closeStore(Store store) { LoggingBufferManager buffer_manager = system.getBufferManager(); try { buffer_manager.lockForWrite(); ((JournalledFileStore) store).close(); return true; } catch (IOException e) { system.Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } catch (InterruptedException e) { throw new Error("Interrupted: " + e.getMessage()); } finally { buffer_manager.unlockForWrite(); } } public boolean deleteStore(Store store) { LoggingBufferManager buffer_manager = system.getBufferManager(); try { buffer_manager.lockForWrite(); return ((JournalledFileStore) store).delete(); } catch (IOException e) { system.Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } catch (InterruptedException e) { throw new Error("Interrupted: " + e.getMessage()); } finally { buffer_manager.unlockForWrite(); } } public void setCheckPoint() { try { LoggingBufferManager buffer_manager = system.getBufferManager(); buffer_manager.setCheckPoint(false); } catch (IOException e) { system.Debug().writeException(e); throw new RuntimeException("IO Error: " + e.getMessage()); } catch (InterruptedException e) { system.Debug().writeException(e); throw new RuntimeException("Interrupted Error: " + e.getMessage()); } } public void lock(String name) throws IOException { File flock_fn = new File(path, name + FLOCK_EXT); if (flock_fn.exists()) { // Okay, the file lock exists. This means either an extremely bad // crash or there is another database locked on the files. If we can // delete the lock then we can go on. system.Debug().write(Lvl.WARNING, this, "File lock file exists: " + flock_fn); boolean deleted = false; deleted = flock_fn.delete(); if (!deleted) { // If we couldn't delete, then most likely database being used. System.err.println("\n" + "I couldn't delete the file lock for Database '" + name + "'.\n" + "This most likely means the database is open and being used by\n" + "another process.\n" + "The lock file is: " + flock_fn + "\n\n"); throw new IOException("Couldn't delete conglomerate file lock."); } } //#IFDEF(NO_1.1) // Atomically create the file, flock_fn.createNewFile(); // Set it to delete on normal exit of the JVM. flock_fn.deleteOnExit(); //#ENDIF // Open up a stream and lock it in the OS lock_file = new FileOutputStream(flock_fn); } public void unlock(String name) throws IOException { // Close and delete the lock file. if (lock_file != null) { lock_file.close(); } // Try and delete it File flock_fn = new File(path, name + FLOCK_EXT); flock_fn.delete(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/V1HeapStoreSystem.java000066400000000000000000000054021330501023400260670ustar00rootroot00000000000000/** * com.mckoi.database.V1HeapStoreSystem 20 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.store.*; import java.io.IOException; import java.util.HashMap; /** * An implementation of StoreSystem that stores all persistent data on the * heap using HeapStore objects. * * @author Tobias Downer */ class V1HeapStoreSystem implements StoreSystem { /** * A mapping from name to Store object for this heap store system. */ private HashMap name_store_map; /** * A mapping from Store object to name. */ private HashMap store_name_map; /** * Constructor. */ V1HeapStoreSystem() { name_store_map = new HashMap(); store_name_map = new HashMap(); } public boolean storeExists(String name) { return (name_store_map.get(name) != null); } public Store createStore(String name) { if (!storeExists(name)) { HeapStore store = new HeapStore(); name_store_map.put(name, store); store_name_map.put(store, name); return store; } else { throw new RuntimeException("Store exists: " + name); } } public Store openStore(String name) { HeapStore store = (HeapStore) name_store_map.get(name); if (store == null) { throw new RuntimeException("Store does not exist: " + name); } return store; } public boolean closeStore(Store store) { if (store_name_map.get(store) == null) { throw new RuntimeException("Store does not exist."); } return true; } public boolean deleteStore(Store store) { String name = (String) store_name_map.remove(store); name_store_map.remove(name); return true; } public void setCheckPoint() { // Check point logging not necessary with heap store } // ---------- Locking ---------- public void lock(String lock_name) throws IOException { // Not required because heap memory is not a shared resource that can be // accessed by multiple JVMs } public void unlock(String lock_name) throws IOException { // Not required because heap memory is not a shared resource that can be // accessed by multiple JVMs } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/V1MasterTableDataSource.java000066400000000000000000000635111330501023400271530ustar00rootroot00000000000000/** * com.mckoi.database.V1MasterTableDataSource 01 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.ArrayList; import java.io.*; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.UserTerminal; import com.mckoi.debug.*; /** * A MasterTableDataSource that uses IndexStore and VariableSizeDataStore as * its backing mechanism for representing the table structure in a file on * disk. *

* The MasterTableDataSource is basically backed by a VariableSizeDataStore * for data and an IndexStore for storing indexing information. * * @author Tobias Downer */ public final class V1MasterTableDataSource extends MasterTableDataSource { // ---------- State information ---------- /** * The file name of this store in the conglomerate path. */ private String file_name; /** * A VariableSizeDataStore object that physically contains the information * stored in the file system in the contents of the data source. */ private VariableSizeDataStore data_store; /** * An IndexStore object that manages the indexes for this table. */ private IndexStore index_store; /** * The object we use to serialize TObject objects. */ private final DataCellSerialization data_cell_serializer = new DataCellSerialization(); /** * The persistent object we use to read information from a row stream. */ private CellInputStream cell_in; /** * The Constructor. */ public V1MasterTableDataSource(TransactionSystem system, StoreSystem store_system, OpenTransactionList open_transactions) { super(system, store_system, open_transactions, null); cell_in = new CellInputStream(null); } /** * Returns the name of the file in the conglomerate that represents this * store in the file system. */ String getFileName() { return file_name; } /** * Returns the path of where this table is located. */ File getPath() { return getSystem().getDatabasePath(); } /** * Physically create this master table in the file system at the given * path. This will initialise the various file objects and result in a * new empty master table to store data in. *

* The 'data_sector_size' and 'index_sector_size' are for fine grain * performance and size optimization of the data files. The default * 'index_sector_size' is 1024. * * @param data_sector_size used to configure the size of the sectors in the * data files. For tables with small records this number should be low. * @param index_sector_size used to configure the size of the sectors in the * index file. For small tables it is best to keep the index sector size * low. Recommend 1024 for normal use, 128 for minimalist use. */ synchronized void create(int table_id, DataTableDef table_def, int data_sector_size, int index_sector_size) throws IOException { // Setup the internal methods setupDataTableDef(table_def); // Generate the name of the store file name. this.file_name = makeTableFileName(getSystem(), table_id, getTableName()); // Create the store. data_store = new VariableSizeDataStore(new File(getPath(), file_name), data_sector_size, Debug()); // Open the store in read/write mode data_store.open(false); // Open table indices index_store = new IndexStore( new File(getPath(), file_name + ".iid"), Debug()); // Open the table index file. index_store.create(index_sector_size); index_store.init(); // Make room for columns+1 indices in the index store file index_store.addIndexLists(table_def.columnCount() + 1, (byte) 1); index_store.flush(); // Save the table definition to the new store. saveDataTableDef(table_def); // Write the 'table_id' of this table to the reserved area of the data // store. byte[] reserved_buffer = new byte[64]; ByteArrayUtil.setInt(table_id, reserved_buffer, 0); data_store.writeReservedBuffer(reserved_buffer, 0, 64); // Set up internal state of this object this.table_id = table_id; // Load internal state loadInternal(); } /** * Returns true if the master table data source with the given filename * exists. */ synchronized boolean exists(String file_name) throws IOException { VariableSizeDataStore data_store = new VariableSizeDataStore(new File(getPath(), file_name), Debug()); return data_store.exists(); } /** * Opens an existing master table from the file system at the path of the * conglomerate this belongs to. This will set up the internal state of * this object with the data read in. */ synchronized void open(String file_name) throws IOException { // Open the store. data_store = new VariableSizeDataStore( new File(getPath(), file_name), Debug()); boolean need_check = data_store.open(isReadOnly()); // Set up the internal state of this object // Get the 'table_id' of this table from the reserved area of the data // store. byte[] reserved_buffer = new byte[64]; data_store.readReservedBuffer(reserved_buffer, 0, 64); table_id = ByteArrayUtil.getInt(reserved_buffer, 0); // Set the file name. this.file_name = file_name; // Load the table definition from the store. table_def = loadDataTableDef(); // Set the column count column_count = table_def.columnCount(); // Open table indices table_indices = new MultiVersionTableIndices(getSystem(), table_def.getTableName(), table_def.columnCount()); // The column rid list cache column_rid_list = new RIDList[table_def.columnCount()]; // Open table indices index_store = new IndexStore( new File(getPath(), file_name + ".iid"), Debug()); // If the index store doesn't exist then create it. if (!index_store.exists()) { if (!isReadOnly()) { // Does the original .ijf file exist? File original_ijf = new File(getPath(), file_name + ".ijf"); if (original_ijf.exists()) { // Message String str = "Converting index file for: " + file_name; System.out.println(str); Debug().write(Lvl.INFORMATION, this, str); // NOTE: The following method leaves the index store open. ArrayList transaction_journals = ConvertUtils.convertIndexFiles1(original_ijf, index_store, table_def, Debug()); if (transaction_journals.size() > 0) { // Notify the user that this may be a problem Debug().write(Lvl.ERROR, this, "There are uncommitted changes that were not " + "converted because the pre 0.92 database was not closed " + "cleanly."); } // Force a full table scan need_check = true; } else { throw new IOException("The index file for '" + file_name + "' does not exist."); } } else { throw new IOException( "Can not create .iid index file in read-only mode."); } } else { // Open the table index file. index_store.open(isReadOnly()); index_store.init(); } // Load internal state loadInternal(); // Setup a DataIndexSetDef from the information here setupDataIndexSetDef(); if (need_check) { // Do an opening scan of the table. Any records that are uncommited // must be marked as deleted. doOpeningScan(); } } /** * Opens this source in the most minimal way. This should only be used * for diagnostics of the data. This will not load the index. */ synchronized void dirtyOpen(String file_name) throws IOException { // We have to open this... // Open the store. data_store = new VariableSizeDataStore( new File(getPath(), file_name), Debug()); data_store.open(false); // Set up the internal state of this object // Get the 'table_id' of this table from the reserved area of the data // store. byte[] reserved_buffer = new byte[64]; data_store.readReservedBuffer(reserved_buffer, 0, 64); table_id = ByteArrayUtil.getInt(reserved_buffer, 0); // Set the file name. this.file_name = file_name; // Load the table definition from the store. table_def = loadDataTableDef(); } /** * Closes this master table in the file system. This frees up all the * resources associated with this master table. *

* This method is typically called when the database is shut down. */ synchronized void close() throws IOException { if (table_indices != null) { // Merge all journal changes when we close mergeJournalChanges(Integer.MAX_VALUE); if (!isReadOnly()) { // Synchronize the current state with the file system. index_store.flush(); //table_indices.synchronizeIndexFile(); } } // Close the index store. index_store.close(); data_store.close(); table_id = -1; // file_name = null; table_def = null; table_indices = null; column_rid_list = null; is_closed = true; } /** * Returns the number of bytes the row takes up in the data file. This is * the actual space used. If a cell is compressed then it includes the * compressed size, not the uncompressed. */ synchronized int rawRecordSize(int row_number) throws IOException { int size = 2; ++row_number; // Open a stream for this row. InputStream in = data_store.getRecordInputStream(row_number); cell_in.setParentStream(in); cell_in.skip(2); for (int i = 0; i < column_count; ++i) { int len = data_cell_serializer.skipSerialization(cell_in); if (len <= 0) { throw new Error("Corrupt data - cell size is <= 0"); } cell_in.skip(len); size += 4 + len; } cell_in.close(); return size; } /** * Returns the current sector size for this data source. */ synchronized int rawDataSectorSize() throws IOException { return data_store.sectorSize(); } /** * This may only be called from the 'fix' method. It performs a full scan of * the records and rebuilds all the index information from the information. *

* This should only be used as a recovery mechanism and may not accurately * rebuild in some cases (but should rebuild as best as possible non the * less). */ private synchronized void rebuildAllIndices(File path, String file_name) throws IOException { // Temporary name of the index store File temporary_name = new File(path, file_name + ".id2"); // Actual name of the index store File actual_name = new File(path, file_name + ".iid"); // Make a new blank index store IndexStore temp_store = new IndexStore(temporary_name, Debug()); // Copy the same block size as the original temp_store.create(index_store.getBlockSize()); temp_store.init(); temp_store.addIndexLists(column_count + 1, (byte) 1); // Get the index of rows in this table IndexSet index_set = temp_store.getSnapshotIndexSet(); // The master index, IntegerListInterface master_index = index_set.getIndex(0); // The selectable schemes for the table. TableDataSource table = minimalTableDataSource(master_index); // Create a set of index for this table. SelectableScheme[] cols = new SelectableScheme[column_count]; for (int i = 0; i < column_count; ++i) { cols[i] = createSelectableSchemeForColumn(index_set, table, i); } // For each row int row_count = rawRowCount(); for (int i = 0 ; i < row_count; ++i) { // Is this record marked as deleted? if (!recordDeleted(i)) { // Get the type flags for this record. int type = recordTypeInfo(i); // Check if this record is marked as committed removed, or is an // uncommitted record. if (type == RawDiagnosticTable.COMMITTED_ADDED) { // Insert into the master index master_index.uniqueInsertSort(i); // Insert into schemes for (int n = 0; n < column_count; ++n) { cols[n].insert(i); } } } // if not deleted } // for each row // Commit the index store // Write the modified index set to the index store // (Updates the index file) temp_store.commitIndexSet(index_set); index_set.dispose(); temp_store.flush(); // Close and delete the original index_store index_store.close(); index_store.delete(); // Close the temporary store temp_store.close(); // Rename temp file to the actual file boolean b = temporary_name.renameTo(actual_name); if (b == false) { throw new IOException("Unable to rename " + temporary_name + " to " + actual_name); } temp_store = null; // Copy and open the new reference index_store = new IndexStore(actual_name, Debug()); index_store.open(false); index_store.init(); } /** * Copies the persistant information in this table data source to the given * directory in the file system. This makes an exact copy of the table as * it currently is. It is recommended that when this is used, there is a * lock to prevent committed changes to the database. */ synchronized void copyTo(File path) throws IOException { data_store.copyTo(path); index_store.copyTo(path); } // ---------- Diagnostic and repair ---------- /** * Performs a complete check and repair of the table. The table must not * have been opened before this method is called. The given UserTerminal * parameter is an implementation of a user interface that is used to ask * any questions and output the results of the check. */ public synchronized void checkAndRepair(String file_name, UserTerminal terminal) throws IOException { // Open the store. data_store = new VariableSizeDataStore( new File(getPath(), file_name), Debug()); boolean need_check = data_store.open(isReadOnly()); // if (need_check) { data_store.fix(terminal); // } // Set up the internal state of this object // Get the 'table_id' of this table from the reserved area of the data // store. byte[] reserved_buffer = new byte[64]; data_store.readReservedBuffer(reserved_buffer, 0, 64); table_id = ByteArrayUtil.getInt(reserved_buffer, 0); // Set the file name. this.file_name = file_name; // Load the table definition from the store. table_def = loadDataTableDef(); // Table journal information table_indices = new MultiVersionTableIndices(getSystem(), table_def.getTableName(), table_def.columnCount()); // The column rid list cache column_rid_list = new RIDList[table_def.columnCount()]; // Open table indices index_store = new IndexStore( new File(getPath(), file_name + ".iid"), Debug()); // Open the table index file. need_check = index_store.open(isReadOnly()); // Attempt to fix the table index file. boolean index_store_stable = index_store.fix(terminal); // Load internal state loadInternal(); // Merge all journal changes when we open mergeJournalChanges(Integer.MAX_VALUE); // If the index store is not stable then clear it and rebuild the // indices. // if (!index_store_stable) { terminal.println("+ Rebuilding all index information for table!"); rebuildAllIndices(getPath(), file_name); // } // Do an opening scan of the table. Any records that are uncommited // must be marked as deleted. doOpeningScan(); } public synchronized void checkForCleanup() { // No-op } // ---------- Implemented from AbstractMasterTableDataSource ---------- String getSourceIdent() { return getFileName(); } synchronized void synchAll() throws IOException { // Flush the indices. index_store.flush(); // Synchronize the data store. if (!getSystem().dontSynchFileSystem()) { data_store.hardSynch(); } // Synchronize the file handle. When this returns, we are guarenteed that // the index store and the data store are nore persistantly stored in the // file system. if (!getSystem().dontSynchFileSystem()) { index_store.hardSynch(); } } synchronized int writeRecordType(int row_index, int row_state) throws IOException { return data_store.writeRecordType(row_index + 1, row_state); } synchronized int readRecordType(int row_index) throws IOException { return data_store.readRecordType(row_index + 1); } synchronized boolean recordDeleted(int row_index) throws IOException { return data_store.recordDeleted(row_index + 1); } synchronized int rawRowCount() throws IOException { return data_store.rawRecordCount() - 1; } synchronized void internalDeleteRow(int row_index) throws IOException { // Delete the row permanently from the data store. data_store.delete(row_index + 1); } IndexSet createIndexSet() { return index_store.getSnapshotIndexSet(); } synchronized void commitIndexSet(IndexSet index_set) { index_store.commitIndexSet(index_set); index_set.dispose(); } synchronized DataTableDef loadDataTableDef() throws IOException { // Read record 0 which contains all this info. byte[] d = new byte[65536]; int read = data_store.read(0, d, 0, 65536); if (read == 65536) { throw new IOException( "Buffer overflow when reading table definition, > 64k"); } ByteArrayInputStream bin = new ByteArrayInputStream(d, 0, read); DataTableDef def; DataInputStream din = new DataInputStream(bin); int mn = din.readInt(); // This is the latest format... if (mn == 0x0bebb) { // Read the DataTableDef object from the input stream, def = DataTableDef.read(din); } else { // Legacy no longer supported... throw new IOException( "Couldn't find magic number for table definition data."); } return def; } synchronized void saveDataTableDef(DataTableDef def) throws IOException { ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(0x0bebb); def.write(dout); // Write the byte array to the data store, byte[] d = bout.toByteArray(); int rindex = data_store.write(d, 0, d.length); // rindex MUST be 0 else we buggered. if (rindex != 0) { throw new IOException("Couldn't write table fields to record 0."); } } synchronized int internalAddRow(RowData data) throws IOException { OutputStream out = data_store.getRecordOutputStream(); DataOutputStream temp_out = new DataOutputStream(out); // Reserved for future use. temp_out.writeShort(0); int row_cells = data.getColumnCount(); // Write out the data, for (int i = 0; i < row_cells; ++i) { TObject cell = data.getCellData(i); data_cell_serializer.setToSerialize(cell); data_cell_serializer.writeSerialization(temp_out); } // Close the stream and complete it. temp_out.close(); int record_index = data_store.completeRecordStreamWrite(); // Update the cell cache as appropriate if (DATA_CELL_CACHING) { for (int i = 0; i < row_cells; ++i) { // Put the row/column/TObject into the cache. cache.put(table_id, record_index, i, data.getCellData(i)); } } // Record index is -1 because sector 0 is DataTableDef. int row_number = record_index - 1; // If we have a rid_list for any of the columns, then update the indexing // there, for (int i = 0; i < column_count; ++i) { RIDList rid_list = column_rid_list[i]; if (rid_list != null) { rid_list.insertRID(data.getCellData(i), row_number); } } // Return the record index of the new data in the table return row_number; } // ---- getCellContents ---- private short s_run_total_hits = 0; private short s_run_file_hits = 0; // ---- Optimization that saves some cycles ----- /** * Some variables that are used for optimization in the 'getCellContents' * method. */ private int OPT_last_row = -1; private int OPT_last_col = -1; private int OPT_last_skip_offset = -1; synchronized TObject internalGetCellContents(int column, int row) { // NOTES: // This is called *A LOT*. It's a key part of the 20% of the program // that's run 80% of the time. // This performs very nicely for rows that are completely contained within // 1 sector. However, rows that contain large cells (eg. a large binary // or a large string) and spans many sectors will not be utilizing memory // as well as it could. // The reason is because all the data for a row is read from the store even // if only 1 cell of the column is requested. This will have a big // impact on column scans and searches. The cell cache takes some of this // performance bottleneck away. // However, a better implementation of this method is made difficult by // the fact that sector spans can be compressed. We should perhaps // revise the low level data storage so only sectors can be compressed. // If the database stats need updating then do so now. if (s_run_total_hits >= 1600) { getSystem().stats().add(s_run_total_hits, total_hits_key); getSystem().stats().add(s_run_file_hits, file_hits_key); s_run_total_hits = 0; s_run_file_hits = 0; } // Increment the total hits counter ++s_run_total_hits; // Row 0 is reserved for DataTableDef ++row; // First check if this is within the cache before we continue. TObject cell; if (DATA_CELL_CACHING) { cell = cache.get(table_id, row, column); if (cell != null) { return cell; } } // Increment the file hits counter ++s_run_file_hits; // We maintain a cache of byte[] arrays that contain the rows read in // from the file. If consequtive reads are made to the same row, then // this will cause lots of fast cache hits. try { // Open a stream for this row. InputStream in = data_store.getRecordInputStream(row); cell_in.setParentStream(in); // NOTE: This is an optimization for a common sequence of pulling cells // from a row. It remembers the index of the last column read in, and // if the next column requested is > than the last column read, then // it trivially skips the file pointer to the old point. // Variables starting with 'OPT_' are member variables used for // keeping the optimization state information. int start_col; if (OPT_last_row == row && column >= OPT_last_col) { cell_in.skip(OPT_last_skip_offset); start_col = OPT_last_col; } else { cell_in.skip(2); OPT_last_row = row; OPT_last_skip_offset = 2; OPT_last_col = 0; start_col = 0; } for (int i = start_col; i < column; ++i) { int len = data_cell_serializer.skipSerialization(cell_in); if (len <= 0) { throw new Error("Corrupt data - cell size is <= 0"); } cell_in.skip(len); ++OPT_last_col; OPT_last_skip_offset += len + 4; // ( +4 for the header ) } // Read the cell Object ob = data_cell_serializer.readSerialization(cell_in); // Get the TType for this column // NOTE: It's possible this call may need optimizing? TType ttype = getDataTableDef().columnAt(column).getTType(); // Wrap it around a TObject cell = new TObject(ttype, ob); // And close the reader. cell_in.close(); // And put in the cache and return it. if (DATA_CELL_CACHING) { cache.put(table_id, row, column, cell); } return cell; } catch (IOException e) { Debug().writeException(e); throw new Error("IOError getting cell at (" + column + ", " + row + ")."); } } synchronized long currentUniqueID() { return index_store.currentUniqueID(); } synchronized long nextUniqueID() { return index_store.nextUniqueID(); } synchronized void setUniqueID(long value) { index_store.setUniqueID(value); } synchronized void dispose(boolean pending_close) throws IOException { close(); } synchronized boolean drop() throws IOException { if (!is_closed) { close(); } Debug().write(Lvl.MESSAGE, this, "Dropping: " + getFileName()); data_store.delete(); index_store.delete(); return true; } void shutdownHookCleanup() { // This does nothing... } /** * For diagnostic. */ public String toString() { return "[V1MasterTableDataSource: " + file_name + "]"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/V2MasterTableDataSource.java000066400000000000000000001343671330501023400271640ustar00rootroot00000000000000/** * com.mckoi.database.V2MasterTableDataSource 01 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.store.*; import com.mckoi.debug.*; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.IntegerListInterface; import com.mckoi.util.UserTerminal; import com.mckoi.database.global.ObjectTransfer; import com.mckoi.database.global.StringObject; import com.mckoi.database.global.ClobRef; import com.mckoi.database.global.Ref; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.ArrayList; import java.util.List; import java.io.*; /** * A MasterTableDataSource that is backed by a non-shared com.mckoi.store.Store * object. The store interface allows us a great deal of flexibility because * we can map a store around different underlying devices. For example, a * store could map to a memory region, a memory mapped file, or a standard * random access file. *

* Unlike V1MasterTableDataSource, this manages data and index information in * a single store which can be backed by a single file in the file system. *

* The structure of the store comprises of a header block that contains the * following information; *

 *       HEADER BLOCK
 *   +-------------------------------+
 *   | version                       |
 *   | table id                      |
 *   | table sequence id             |
 *   | pointer to DataTableDef       |
 *   | pointer to DataIndexSetDef    |
 *   | pointer to index block        |
 *   | LIST BLOCK HEADER pointer     |
 *   +-------------------------------+
 * 
*

* Each record is comprised of a header which contains offsets to the fields * in the record, and a serializable of the fields themselves. * * @author Tobias Downer */ public final class V2MasterTableDataSource extends MasterTableDataSource { /** * The file name of this store in the conglomerate path. */ private String file_name; /** * The backing store object. */ private Store store; /** * An IndexSetStore object that manages the indexes for this table. */ private IndexSetStore index_store; /** * The current sequence id. */ private long sequence_id; // ---------- Pointers into the store ---------- // /** // * Points to the store header area. // */ // private long header_p; /** * Points to the index header area. */ private long index_header_p; /** * Points to the block list header area. */ private long list_header_p; /** * The header area itself. */ private MutableArea header_area; /** * The structure that manages the pointers to the records. */ private FixedRecordList list_structure; /** * The first delete chain element. */ private long first_delete_chain_record; /** * Set to true when the VM has shutdown and writes should no longer be * possible on the object. */ private boolean has_shutdown; /** * The Constructor. */ public V2MasterTableDataSource(TransactionSystem system, StoreSystem store_system, OpenTransactionList open_transactions, BlobStoreInterface blob_store_interface) { super(system, store_system, open_transactions, blob_store_interface); first_delete_chain_record = -1; has_shutdown = false; } /** * Convenience - wraps the given output stream around a buffered data output * stream. */ private static DataOutputStream getDOut(OutputStream out) { // return new DataOutputStream(out); return new DataOutputStream(new BufferedOutputStream(out, 512)); } /** * Convenience - wraps the given input stream around a buffered data input * stream. */ private static DataInputStream getDIn(InputStream in) { // return new DataInputStream(in); return new DataInputStream(new BufferedInputStream(in, 512)); } /** * Sets up an initial store (should only be called from the 'create' method). */ private void setupInitialStore() throws IOException { // Serialize the DataTableDef object ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(1); getDataTableDef().write(dout); // Convert to a byte array byte[] data_table_def_buf = bout.toByteArray(); // Serialize the DataIndexSetDef object bout = new ByteArrayOutputStream(); dout = new DataOutputStream(bout); dout.writeInt(1); getDataIndexSetDef().write(dout); // Convert to byte array byte[] index_set_def_buf = bout.toByteArray(); bout = null; dout = null; try { store.lockForWrite(); // Allocate an 80 byte header AreaWriter header_writer = store.createArea(80); long header_p = header_writer.getID(); // Allocate space to store the DataTableDef serialization AreaWriter data_table_def_writer = store.createArea(data_table_def_buf.length); long data_table_def_p = data_table_def_writer.getID(); // Allocate space to store the DataIndexSetDef serialization AreaWriter data_index_set_writer = store.createArea(index_set_def_buf.length); long data_index_set_def_p = data_index_set_writer.getID(); // Allocate space for the list header list_header_p = list_structure.create(); list_structure.setReservedLong(-1); first_delete_chain_record = -1; // Create the index store index_store = new IndexSetStore(store, getSystem()); index_header_p = index_store.create(); // Write the main header header_writer.putInt(1); // Version header_writer.putInt(table_id); // table_id header_writer.putLong(sequence_id); // initial sequence id header_writer.putLong(data_table_def_p); // pointer to DataTableDef header_writer.putLong(data_index_set_def_p); // pointer to DataIndexSetDef header_writer.putLong(index_header_p); // index header pointer header_writer.putLong(list_header_p); // list header pointer header_writer.finish(); // Write the data_table_def data_table_def_writer.put(data_table_def_buf); data_table_def_writer.finish(); // Write the data_index_set_def data_index_set_writer.put(index_set_def_buf); data_index_set_writer.finish(); // Set the pointer to the header in the reserved area. MutableArea fixed_area = store.getMutableArea(-1); fixed_area.putLong(header_p); fixed_area.checkOut(); // Set the header area header_area = store.getMutableArea(header_p); } finally { store.unlockForWrite(); } } /** * Read the store headers and initialize any internal object state. This is * called by the 'open' method. */ private void readStoreHeaders() throws IOException { // Read the fixed header Area fixed_area = store.getArea(-1); // Set the header area header_area = store.getMutableArea(fixed_area.getLong()); // Open a stream to the header int version = header_area.getInt(); // version if (version != 1) { throw new IOException("Incorrect version identifier."); } this.table_id = header_area.getInt(); // table_id this.sequence_id = header_area.getLong(); // sequence id long def_p = header_area.getLong(); // pointer to DataTableDef long index_def_p = header_area.getLong(); // pointer to DataIndexSetDef this.index_header_p = header_area.getLong(); // pointer to index header this.list_header_p = header_area.getLong(); // pointer to list header // Read the data table def DataInputStream din = getDIn(store.getAreaInputStream(def_p)); version = din.readInt(); if (version != 1) { throw new IOException("Incorrect DataTableDef version identifier."); } table_def = DataTableDef.read(din); din.close(); // Read the data index set def din = getDIn(store.getAreaInputStream(index_def_p)); version = din.readInt(); if (version != 1) { throw new IOException("Incorrect DataIndexSetDef version identifier."); } index_def = DataIndexSetDef.read(din); din.close(); // Read the list header list_structure.init(list_header_p); first_delete_chain_record = list_structure.getReservedLong(); // Init the index store index_store = new IndexSetStore(store, getSystem()); try { index_store.init(index_header_p); } catch (IOException e) { // If this failed try writing out a new empty index set. // ISSUE: Should this occur here? This is really an attempt at repairing // the index store. index_store = new IndexSetStore(store, getSystem()); index_header_p = index_store.create(); index_store.addIndexLists(table_def.columnCount() + 1, (byte) 1, 1024); header_area.position(32); header_area.putLong(index_header_p); header_area.position(0); header_area.checkOut(); } } /** * Create this master table in the file system at the given path. This will * initialise the various file objects and result in a new empty master table * to store data in. */ void create(int table_id, DataTableDef table_def) throws IOException { // Set the data table def object setupDataTableDef(table_def); // Initially set the table sequence_id to 1 this.sequence_id = 1; // Generate the name of the store file name. this.file_name = makeTableFileName(getSystem(), table_id, getTableName()); // Create and open the store. store = storeSystem().createStore(file_name); try { store.lockForWrite(); // Setup the list structure list_structure = new FixedRecordList(store, 12); } finally { store.unlockForWrite(); } // Set up internal state of this object this.table_id = table_id; // Initialize the store to an empty state, setupInitialStore(); index_store.addIndexLists(table_def.columnCount() + 1, (byte) 1, 1024); // Load internal state loadInternal(); // synchAll(); } /** * Returns true if the master table data source with the given source * identity exists. */ boolean exists(String identity) throws IOException { return storeSystem().storeExists(identity); } /** * Opens an existing master table from the file system at the path of the * conglomerate this belongs to. This will set up the internal state of * this object with the data read in. */ public void open(String file_name) throws IOException { // Set read only flag. this.file_name = file_name; // Open the store. store = storeSystem().openStore(file_name); boolean need_check = !store.lastCloseClean(); // Setup the list structure list_structure = new FixedRecordList(store, 12); // Read and setup the pointers readStoreHeaders(); // Set the column count column_count = table_def.columnCount(); // Open table indices table_indices = new MultiVersionTableIndices(getSystem(), table_def.getTableName(), table_def.columnCount()); // The column rid list cache column_rid_list = new RIDList[table_def.columnCount()]; // Load internal state loadInternal(); if (need_check) { // Do an opening scan of the table. Any records that are uncommited // must be marked as deleted. doOpeningScan(); // Scan for any leaks in the file, Debug().write(Lvl.INFORMATION, this, "Scanning File: " + file_name + " for leaks."); scanForLeaks(); } // HashMap properties = new HashMap(); // file_store.statsScan(properties); // System.out.println("File: " + file_name); // Iterator key_i = properties.keySet().iterator(); // while (key_i.hasNext()) { // String key = (String) key_i.next(); // System.out.print(key); // System.out.print(" = "); // System.out.println(properties.get(key)); // } } /** * Closes this master table in the file system. This frees up all the * resources associated with this master table. *

* This method is typically called when the database is shut down. */ synchronized void close(boolean pending_drop) throws IOException { // NOTE: This method MUST be synchronized over the table to prevent // establishing a root lock on this table. If a root lock is established // then the collection event could fail. synchronized (list_structure) { // If we are root locked, we must become un root locked. clearAllRootLocks(); try { try { store.lockForWrite(); // Force a garbage collection event. if (!isReadOnly()) { garbage_collector.performCollectionEvent(true); } // If we are closing pending a drop, we need to remove all blob // references in the table. // NOTE: This must only happen after the above collection event. if (pending_drop) { // Scan and remove all blob references for this dropped table. dropAllBlobReferences(); } } finally { store.unlockForWrite(); } } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Exception during table (" + toString() + ") close: " + e.getMessage()); Debug().writeException(e); } // Synchronize the store index_store.close(); // store.flush(); // Close the store in the store system. storeSystem().closeStore(store); table_def = null; table_indices = null; column_rid_list = null; is_closed = true; } } /** * Creates a new v2 master table data source that is a copy of the given * MasterTableDataSource object. * * @param table_id the table id to given the new table. * @param src_master_table the table to copy. * @param index_set the view of the table to be copied. */ void copy(int table_id, MasterTableDataSource src_master_table, IndexSet index_set) throws IOException { // Basically we need to copy all the data and then set the new index view. create(table_id, src_master_table.getDataTableDef()); // The record list. IntegerListInterface master_index = index_set.getIndex(0); // For each row in the master table int sz = src_master_table.rawRowCount(); for (int i = 0; i < sz; ++i) { // Is this row in the set we are copying from? if (master_index.contains(i)) { // Yes so copy the record into this table. copyRecordFrom(src_master_table, i); } } // Copy the index set if (src_master_table instanceof V2MasterTableDataSource) { index_store.copyAllFrom(index_set); } else if (src_master_table instanceof V1MasterTableDataSource) { // HACK: This is a bit of a hack. We should copy the index_set into // this newly created object but instead we rebuild the indexes from // scratch. // This is only used when converting a 0.93 database to the // V2MasterTableDataSource format. buildIndexes(); } // Finally set the unique id long un_id = src_master_table.nextUniqueID(); setUniqueID(un_id); } // ---------- Low level operations ---------- /** * Writes a record to the store and returns a pointer to the area that * represents the new record. This does not manipulate the fixed structure * in any way. This method only allocates an area to store the record and * serializes the record. It is the responsibility of the callee to add the * record into the general file structure. *

* Note that if the RowData contains any references to Blob objects then a * reference count to the blob is generated at this point. */ private long writeRecordToStore(RowData data) throws IOException { // Calculate how much space this record will use int row_cells = data.getColumnCount(); int[] cell_sizes = new int[row_cells]; int[] cell_type = new int[row_cells]; try { store.lockForWrite(); // Establish a reference to any blobs in the record int all_records_size = 0; for (int i = 0; i < row_cells; ++i) { TObject cell = data.getCellData(i); int sz; int ctype; if (cell.getObject() instanceof Ref) { Ref large_object_ref = (Ref) cell.getObject(); // TBinaryType that are BlobRef objects have to be handled separately. sz = 16; ctype = 2; if (large_object_ref != null) { // Tell the blob store interface that we've made a static reference // to this blob. blob_store_interface.establishReference(large_object_ref.getID()); } } else { sz = ObjectTransfer.exactSize(cell.getObject()); ctype = 1; } cell_sizes[i] = sz; cell_type[i] = ctype; all_records_size += sz; } long record_p; // Allocate space for the record, AreaWriter writer = store.createArea(all_records_size + (row_cells * 8) + 4); record_p = writer.getID(); // The record output stream DataOutputStream dout = getDOut(writer.getOutputStream()); // Write the record header first, dout.writeInt(0); // reserved for future use int cell_skip = 0; for (int i = 0; i < row_cells; ++i) { dout.writeInt((int) cell_type[i]); dout.writeInt(cell_skip); cell_skip += cell_sizes[i]; } // Now write a serialization of the cells themselves, for (int i = 0; i < row_cells; ++i) { TObject t_object = data.getCellData(i); int ctype = cell_type[i]; if (ctype == 1) { // Regular object ObjectTransfer.writeTo(dout, t_object.getObject()); } else if (ctype == 2) { // This is a binary large object and must be represented as a ref // to a blob in the BlobStore. Ref large_object_ref = (Ref) t_object.getObject(); if (large_object_ref == null) { // null value dout.writeInt(1); dout.writeInt(0); // Reserved for future use dout.writeLong(-1); } else { dout.writeInt(0); dout.writeInt(0); // Reserved for future use dout.writeLong(large_object_ref.getID()); } } else { throw new IOException("Unrecognised cell type."); } } // Flush the output dout.flush(); // Finish the record writer.finish(); // Return the record return record_p; } finally { store.unlockForWrite(); } } /** * Copies the record at the given index in the source table to the same * record index in this table. Note that this may need to expand the * fixed list record heap as necessary to copy the record into the given * position. The record is NOT copied into the first free record position. */ private void copyRecordFrom(MasterTableDataSource src_master_table, int record_id) throws IOException { // Copy the record from the source table in a RowData object, int sz = src_master_table.getDataTableDef().columnCount(); RowData row_data = new RowData(getSystem(), sz); for (int i = 0; i < sz; ++i) { TObject tob = src_master_table.getCellContents(i, record_id); row_data.setColumnDataFromTObject(i, tob); } try { store.lockForWrite(); // Write record to this table but don't update any structures for the new // record. long record_p = writeRecordToStore(row_data); // Add this record into the table structure at the given index addToRecordList(record_id, record_p); // Set the record type for this record (committed added). writeRecordType(record_id, 0x010); } finally { store.unlockForWrite(); } } /** * Removes all blob references in the record area pointed to by 'record_p'. * This should only be used when the record is be reclaimed. */ private void removeAllBlobReferencesForRecord(long record_p) throws IOException { // NOTE: Does this need to be optimized? Area record_area = store.getArea(record_p); int reserved = record_area.getInt(); // reserved // Look for any blob references in the row for (int i = 0; i < column_count; ++i) { int ctype = record_area.getInt(); int cell_offset = record_area.getInt(); if (ctype == 1) { // Type 1 is not a large object } else if (ctype == 2) { int cur_p = record_area.position(); record_area.position(cell_offset + 4 + (column_count * 8)); int btype = record_area.getInt(); record_area.getInt(); // (reserved) if (btype == 0) { long blob_ref_id = record_area.getLong(); // Release this reference blob_store_interface.releaseReference(blob_ref_id); } // Revert the area pointer record_area.position(cur_p); } else { throw new RuntimeException("Unrecognised type."); } } } /** * Scans the table and drops ALL blob references in this table. This is * used when a table is dropped when is still contains elements referenced * in the BlobStore. This will decrease the reference count in the BlobStore * for all blobs. In effect, this is like calling 'delete' on all the data * in the table. *

* This method should only be called when the table is about to be deleted * from the file system. */ private void dropAllBlobReferences() throws IOException { synchronized (list_structure) { long elements = list_structure.addressableNodeCount(); for (long i = 0; i < elements; ++i) { Area a = list_structure.positionOnNode(i); int status = a.getInt(); // Is the record not deleted? if ((status & 0x020000) == 0) { // Get the record pointer long record_p = a.getLong(); removeAllBlobReferencesForRecord(record_p); } } } } // ---------- Diagnostic and repair ---------- /** * Looks for any leaks in the file. This works by walking through the * file and index area graph and 'remembering' all areas that were read. * The store is then checked that all other areas except these are deleted. *

* Assumes the master table is open. */ public void scanForLeaks() throws IOException { synchronized (list_structure) { // The list of pointers to areas (as Long). ArrayList used_areas = new ArrayList(); // Add the header_p pointer used_areas.add(new Long(header_area.getID())); header_area.position(16); // Add the DataTableDef and DataIndexSetDef objects used_areas.add(new Long(header_area.getLong())); used_areas.add(new Long(header_area.getLong())); // Add all the used areas in the list_structure itself. list_structure.addAllAreasUsed(used_areas); // Adds all the user areas in the index store. index_store.addAllAreasUsed(used_areas); // Search the list structure for all areas long elements = list_structure.addressableNodeCount(); for (long i = 0; i < elements; ++i) { Area a = list_structure.positionOnNode(i); int status = a.getInt(); if ((status & 0x020000) == 0) { long pointer = a.getLong(); // System.out.println("Not deleted = " + pointer); // Record is not deleted, used_areas.add(new Long(pointer)); } } // Following depends on store implementation if (store instanceof AbstractStore) { AbstractStore a_store = (AbstractStore) store; ArrayList leaked_areas = a_store.findAllocatedAreasNotIn(used_areas); if (leaked_areas.size() == 0) { Debug().write(Lvl.INFORMATION, this, "No leaked areas."); } else { Debug().write(Lvl.INFORMATION, this, "There were " + leaked_areas.size() + " leaked areas found."); for (int n = 0; n < leaked_areas.size(); ++n) { Long area_pointer = (Long) leaked_areas.get(n); store.deleteArea(area_pointer.longValue()); } Debug().write(Lvl.INFORMATION, this, "Leaked areas successfully freed."); } } } } /** * Performs a complete check and repair of the table. The table must not * have been opened before this method is called. The given UserTerminal * parameter is an implementation of a user interface that is used to ask * any questions and output the results of the check. */ public void checkAndRepair(String file_name, UserTerminal terminal) throws IOException { this.file_name = file_name; terminal.println("+ Repairing V2MasterTableDataSource " + file_name); store = storeSystem().openStore(file_name); // If AbstractStore then fix if (store instanceof AbstractStore) { ((AbstractStore) store).openScanAndFix(terminal); } // Setup the list structure list_structure = new FixedRecordList(store, 12); try { // Read and setup the pointers readStoreHeaders(); // Set the column count column_count = table_def.columnCount(); } catch (IOException e) { // If this fails, the table is not recoverable. terminal.println( "! Table is not repairable because the file headers are corrupt."); terminal.println(" Error reported: " + e.getMessage()); e.printStackTrace(); return; } // From here, we at least have intact headers. terminal.println("- Checking record integrity."); // Get the sorted list of all areas in the file. List all_areas = store.getAllAreas(); // The list of all records generated when we check each record ArrayList all_records = new ArrayList(); // Look up each record and check it's intact, Any records that are deleted // are added to the delete chain. first_delete_chain_record = -1; int record_count = 0; int free_count = 0; int sz = rawRowCount(); for (int i = sz - 1; i >= 0; --i) { boolean record_valid = checkAndRepairRecord(i, all_areas, terminal); if (record_valid) { all_records.add(new Long(i)); ++record_count; } else { ++free_count; } } // Set the reserved area list_structure.setReservedLong(first_delete_chain_record); terminal.print("* Record count = " + record_count); terminal.println(" Free count = " + free_count); // Check indexes terminal.println("- Rebuilding all table index information."); int index_count = table_def.columnCount() + 1; for (int i = 0; i < index_count; ++i) { index_store.commitDropIndex(i); } // store.flush(); buildIndexes(); terminal.println("- Table check complete."); // // Flush any changes // store.flush(); } /** * Checks and repairs a record if it requires repairing. Returns true if the * record is valid, or false otherwise (record is/was deleted). */ private boolean checkAndRepairRecord( int row_index, List all_areas, UserTerminal terminal) throws IOException { synchronized (list_structure) { // Position in the list structure MutableArea block_area = list_structure.positionOnNode(row_index); int p = block_area.position(); int status = block_area.getInt(); // If it is not deleted, if ((status & 0x020000) == 0) { long record_p = block_area.getLong(); // System.out.println("row_index = " + row_index + " record_p = " + record_p); // Is this pointer valid? int i = Collections.binarySearch(all_areas, new Long(record_p)); if (i >= 0) { // Pointer is valid in the store, // Try reading from column 0 try { internalGetCellContents(0, row_index); // Return because the record is valid. return true; } catch (Throwable e) { // If an exception is generated when accessing the data, delete the // record. terminal.println("+ Error accessing record: " + e.getMessage()); } } // If we get here, the record needs to be deleted and added to the delete // chain terminal.println("+ Record area not valid: row = " + row_index + " pointer = " + record_p); terminal.println("+ Deleting record."); } // Put this record in the delete chain block_area.position(p); block_area.putInt(0x020000); block_area.putLong(first_delete_chain_record); block_area.checkOut(); first_delete_chain_record = row_index; return false; } } /** * Grows the list structure to accomodate more entries. The new entries * are added to the free chain pool. Assumes we are synchronized over * list_structure. */ private void growListStructure() throws IOException { try { store.lockForWrite(); // Increase the size of the list structure. list_structure.increaseSize(); // The start record of the new size int new_block_number = list_structure.listBlockCount() - 1; long start_index = list_structure.listBlockFirstPosition(new_block_number); long size_of_block = list_structure.listBlockNodeCount(new_block_number); // The Area object for the new position MutableArea a = list_structure.positionOnNode(start_index); // Set the rest of the block as deleted records for (long n = 0; n < size_of_block - 1; ++n) { a.putInt(0x020000); a.putLong(start_index + n + 1); } // The last block is end of delete chain. a.putInt(0x020000); a.putLong(first_delete_chain_record); a.checkOut(); // And set the new delete chain first_delete_chain_record = start_index; // Set the reserved area list_structure.setReservedLong(first_delete_chain_record); } finally { store.unlockForWrite(); } } /** * Adds a record to the given position in the fixed structure. If the place * is already used by a record then an exception is thrown, otherwise the * record is set. */ private long addToRecordList(long index, long record_p) throws IOException { synchronized (list_structure) { if (has_shutdown) { throw new IOException("IO operation while VM shutting down."); } long addr_count = list_structure.addressableNodeCount(); // First make sure there are enough nodes to accomodate this entry, while (index >= addr_count) { growListStructure(); addr_count = list_structure.addressableNodeCount(); } // Remove this from the delete chain by searching for the index in the // delete chain. long prev = -1; long chain = first_delete_chain_record; while (chain != -1 && chain != index) { Area a = list_structure.positionOnNode(chain); if (a.getInt() == 0x020000) { prev = chain; chain = a.getLong(); } else { throw new IOException("Not deleted record is in delete chain!"); } } // Wasn't found if (chain == -1) { throw new IOException( "Unable to add record because index is not available."); } // Read the next entry in the delete chain. Area a = list_structure.positionOnNode(chain); if (a.getInt() != 0x020000) { throw new IOException("Not deleted record is in delete chain!"); } long next_p = a.getLong(); try { store.lockForWrite(); // If prev == -1 then first_delete_chain_record points to this record if (prev == -1) { first_delete_chain_record = next_p; list_structure.setReservedLong(first_delete_chain_record); } else { // Otherwise we need to set the previous node to point to the next node MutableArea ma = list_structure.positionOnNode(prev); ma.putInt(0x020000); ma.putLong(next_p); ma.checkOut(); } // Finally set the record_p MutableArea ma = list_structure.positionOnNode(index); ma.putInt(0); ma.putLong(record_p); ma.checkOut(); } finally { store.unlockForWrite(); } } return index; } /** * Finds a free place to add a record and returns an index to the record here. * This may expand the record space as necessary if there are no free record * slots to use. */ private long addToRecordList(long record_p) throws IOException { synchronized (list_structure) { if (has_shutdown) { throw new IOException("IO operation while VM shutting down."); } // If there are no free deleted records in the delete chain, if (first_delete_chain_record == -1) { // Grow the fixed structure to allow more nodes, growListStructure(); } // Pull free block from the delete chain and recycle it. long recycled_record = first_delete_chain_record; MutableArea block = list_structure.positionOnNode(recycled_record); int rec_pos = block.position(); // Status of the recycled block int status = block.getInt(); if ((status & 0x020000) == 0) { throw new Error("Assertion failed: record is not deleted. " + "status = " + status + ", rec_pos = " + rec_pos); } // The pointer to the next in the chain. long next_chain = block.getLong(); first_delete_chain_record = next_chain; try { store.lockForWrite(); // Update the first_delete_chain_record field in the header list_structure.setReservedLong(first_delete_chain_record); // Update the block block.position(rec_pos); block.putInt(0); block.putLong(record_p); block.checkOut(); } finally { store.unlockForWrite(); } return recycled_record; } } // ---------- Implemented from AbstractMasterTableDataSource ---------- String getSourceIdent() { return file_name; } int writeRecordType(int row_index, int row_state) throws IOException { synchronized (list_structure) { if (has_shutdown) { throw new IOException("IO operation while VM shutting down."); } // Find the record entry in the block list. MutableArea block_area = list_structure.positionOnNode(row_index); int pos = block_area.position(); // Get the status. int old_status = block_area.getInt(); int mod_status = (old_status & 0x0FFFF0000) | (row_state & 0x0FFFF); // Write the new status try { store.lockForWrite(); block_area.position(pos); block_area.putInt(mod_status); block_area.checkOut(); } finally { store.unlockForWrite(); } return old_status & 0x0FFFF; } } int readRecordType(int row_index) throws IOException { synchronized (list_structure) { // Find the record entry in the block list. Area block_area = list_structure.positionOnNode(row_index); // Get the status. return block_area.getInt() & 0x0FFFF; } } boolean recordDeleted(int row_index) throws IOException { synchronized (list_structure) { // Find the record entry in the block list. Area block_area = list_structure.positionOnNode(row_index); // If the deleted bit set for the record return (block_area.getInt() & 0x020000) != 0; } } int rawRowCount() throws IOException { synchronized (list_structure) { long total = list_structure.addressableNodeCount(); // 32-bit row limitation here - we should return a long. return (int) total; } } void internalDeleteRow(int row_index) throws IOException { long record_p; synchronized (list_structure) { if (has_shutdown) { throw new IOException("IO operation while VM shutting down."); } // Find the record entry in the block list. MutableArea block_area = list_structure.positionOnNode(row_index); int p = block_area.position(); int status = block_area.getInt(); // Check it is not already deleted if ((status & 0x020000) != 0) { throw new IOException("Record is already marked as deleted."); } record_p = block_area.getLong(); // Update the status record. try { store.lockForWrite(); block_area.position(p); block_area.putInt(0x020000); block_area.putLong(first_delete_chain_record); block_area.checkOut(); first_delete_chain_record = row_index; // Update the first_delete_chain_record field in the header list_structure.setReservedLong(first_delete_chain_record); // If the record contains any references to blobs, remove the reference // here. removeAllBlobReferencesForRecord(record_p); // Free the record from the store store.deleteArea(record_p); } finally { store.unlockForWrite(); } } } IndexSet createIndexSet() { return index_store.getSnapshotIndexSet(); } void commitIndexSet(IndexSet index_set) { index_store.commitIndexSet(index_set); index_set.dispose(); } int internalAddRow(RowData data) throws IOException { long row_number; int int_row_number; // Write the record to the store. synchronized (list_structure) { long record_p = writeRecordToStore(data); // Now add this record into the record block list, row_number = addToRecordList(record_p); int_row_number = (int) row_number; } // Update the cell cache as appropriate if (DATA_CELL_CACHING) { int row_cells = data.getColumnCount(); for (int i = 0; i < row_cells; ++i) { // Put the row/column/TObject into the cache. cache.put(table_id, int_row_number, i, data.getCellData(i)); } } // Return the record index of the new data in the table // NOTE: We are casting this from a long to int which means we are limited // to ~2 billion record references. return (int) row_number; } synchronized void checkForCleanup() { // index_store.cleanUpEvent(); garbage_collector.performCollectionEvent(false); } // ---- getCellContents ---- private void skipStream(InputStream in, final long amount) throws IOException { long count = amount; long skipped = 0; while (skipped < amount) { long last_skipped = in.skip(count); skipped += last_skipped; count -= last_skipped; } } // private short s_run_total_hits = 0; private short s_run_file_hits = Short.MAX_VALUE; // ---- Optimization that saves some cycles ----- TObject internalGetCellContents(int column, int row) { // NOTES: // This is called *A LOT*. It's a key part of the 20% of the program // that's run 80% of the time. // This performs very nicely for rows that are completely contained within // 1 sector. However, rows that contain large cells (eg. a large binary // or a large string) and spans many sectors will not be utilizing memory // as well as it could. // The reason is because all the data for a row is read from the store even // if only 1 cell of the column is requested. This will have a big // impact on column scans and searches. The cell cache takes some of this // performance bottleneck away. // However, a better implementation of this method is made difficult by // the fact that sector spans can be compressed. We should perhaps // revise the low level data storage so only sectors can be compressed. // // If the database stats need updating then do so now. // if (s_run_total_hits >= 1600) { // getSystem().stats().add(s_run_total_hits, total_hits_key); // getSystem().stats().add(s_run_file_hits, file_hits_key); // s_run_total_hits = 0; // s_run_file_hits = 0; // } // // Increment the total hits counter // ++s_run_total_hits; // First check if this is within the cache before we continue. TObject cell; if (DATA_CELL_CACHING) { cell = cache.get(table_id, row, column); if (cell != null) { return cell; } } // We maintain a cache of byte[] arrays that contain the rows read in // from the file. If consequtive reads are made to the same row, then // this will cause lots of fast cache hits. long record_p = -1; try { synchronized (list_structure) { // Increment the file hits counter ++s_run_file_hits; if (s_run_file_hits >= 100) { getSystem().stats().add(s_run_file_hits, file_hits_key); s_run_file_hits = 0; } // Get the node for the record Area list_block = list_structure.positionOnNode(row); int status = list_block.getInt(); // Check it's not deleted if ((status & 0x020000) != 0) { throw new Error("Unable to read deleted record."); } // Get the pointer to the record we are reading record_p = list_block.getLong(); } // Open a stream to the record DataInputStream din = getDIn(store.getAreaInputStream(record_p)); skipStream(din, 4 + (column * 8)); int cell_type = din.readInt(); int cell_offset = din.readInt(); int cur_at = 8 + 4 + (column * 8); int be_at = 4 + (column_count * 8); int skip_amount = (be_at - cur_at) + cell_offset; skipStream(din, skip_amount); Object ob; if (cell_type == 1) { // If standard object type ob = ObjectTransfer.readFrom(din); } else if (cell_type == 2) { // If reference to a blob in the BlobStore int f_type = din.readInt(); int f_reserved = din.readInt(); long ref_id = din.readLong(); if (f_type == 0) { // Resolve the reference ob = blob_store_interface.getLargeObject(ref_id); } else if (f_type == 1) { ob = null; } else { throw new RuntimeException("Unknown blob type."); } } else { throw new RuntimeException("Unrecognised cell type in data."); } // Get the TType for this column // NOTE: It's possible this call may need optimizing? TType ttype = getDataTableDef().columnAt(column).getTType(); // Wrap it around a TObject cell = new TObject(ttype, ob); // And close the reader. din.close(); } catch (IOException e) { Debug().writeException(e); // System.out.println("Pointer = " + row_pointer); throw new RuntimeException("IOError getting cell at (" + column + ", " + row + ") pointer = " + record_p + "."); } // And put in the cache and return it. if (DATA_CELL_CACHING) { cache.put(table_id, row, column, cell); } return cell; } long currentUniqueID() { synchronized (list_structure) { return sequence_id - 1; } } long nextUniqueID() { synchronized (list_structure) { long v = sequence_id; ++sequence_id; if (has_shutdown) { throw new RuntimeException("IO operation while VM shutting down."); } try { try { store.lockForWrite(); header_area.position(4 + 4); header_area.putLong(sequence_id); header_area.checkOut(); } finally { store.unlockForWrite(); } } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } return v; } } void setUniqueID(long value) { synchronized (list_structure) { sequence_id = value; if (has_shutdown) { throw new RuntimeException("IO operation while VM shutting down."); } try { try { store.lockForWrite(); header_area.position(4 + 4); header_area.putLong(sequence_id); header_area.checkOut(); } finally { store.unlockForWrite(); } } catch (IOException e) { Debug().writeException(e); throw new Error("IO Error: " + e.getMessage()); } } } synchronized void dispose(boolean pending_drop) throws IOException { synchronized (list_structure) { if (!is_closed) { close(pending_drop); } } } synchronized boolean drop() throws IOException { synchronized (list_structure) { if (!is_closed) { close(true); } boolean b = storeSystem().deleteStore(store); if (b) { Debug().write(Lvl.MESSAGE, this, "Dropped: " + getSourceIdent()); } return b; } } void shutdownHookCleanup() { // try { synchronized (list_structure) { index_store.close(); // store.synch(); has_shutdown = true; } // } // catch (IOException e) { // Debug().write(Lvl.ERROR, this, "IO Error during shutdown hook."); // Debug().writeException(e); // } } boolean isWorthCompacting() { // PENDING: We should perform some analysis on the data to decide if a // compact is necessary or not. return true; } /** * For diagnostic. */ public String toString() { return "[V2MasterTableDataSource: " + file_name + "]"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/Variable.java000066400000000000000000000153431330501023400243130ustar00rootroot00000000000000/** * com.mckoi.database.Variable 11 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * This represents a column name that may be qualified. This object * encapsulated a column name that can be fully qualified in the system. Such * uses of this object would not typically be used against any context. For * example, it would not be desirable to use ColumnName in DataTableDef * because the column names contained in DataTableDef are within a known * context. This object is intended for use within parser processes where * free standing column names with potentially no context are required. *

* NOTE: This object is NOT immutable. * * @author Tobias Downer */ public final class Variable implements java.io.Serializable, Cloneable { static final long serialVersionUID = -8772800465139383297L; /** * Static that represents an unknown table name. */ private static final TableName UNKNOWN_TABLE_NAME = new TableName("##UNKNOWN_TABLE_NAME##"); /** * The TableName that is the context of this column. This may be * UNKNOWN_TABLE_NAME if the table name is not known. */ private TableName table_name; /** * The column name itself. */ private String column_name; /** * Constructs the ColumnName. */ public Variable(TableName table_name, String column_name) { if (table_name == null || column_name == null) { throw new NullPointerException(); } this.table_name = table_name; this.column_name = column_name; } public Variable(String column_name) { this(UNKNOWN_TABLE_NAME, column_name); } public Variable(Variable v) { this.table_name = v.table_name; this.column_name = v.column_name; } /** * Returns the TableName context. */ public TableName getTableName() { if (!(table_name.equals(UNKNOWN_TABLE_NAME))) { return table_name; } return null; } /** * Returns the column name context. */ public String getName() { return column_name; } /** * Attempts to resolve a string '[table_name].[column]' to a Variable * instance. */ public static Variable resolve(String name) { int div = name.lastIndexOf("."); if (div != -1) { // Column represents '[something].[name]' String column_name = name.substring(div + 1); // Make the '[something]' into a TableName TableName table_name = TableName.resolve(name.substring(0, div)); // Set the variable name return new Variable(table_name, column_name); } else { // Column represents '[something]' return new Variable(name); } } /** * Attempts to resolve a string '[table_name].[column]' to a Variable * instance. If the table name does not exist, or the table name schema is * not specified, then the schema/table name is copied from the given object. */ public static Variable resolve(TableName tname, String name) { Variable v = resolve(name); if (v.getTableName() == null) { return new Variable(tname, v.getName()); } else if (v.getTableName().getSchema() == null) { return new Variable( new TableName(tname.getSchema(), v.getTableName().getName()), v.getName()); } return v; } /** * Returns a ColumnName that is resolved against a table name context only * if the ColumnName is unknown in this object. */ public Variable resolveTableName(TableName tablen) { if (table_name.equals(UNKNOWN_TABLE_NAME)) { return new Variable(tablen, getName()); } else { return new Variable(table_name.resolveSchema(tablen.getSchema()), getName()); } } /** * Sets this Variable object with information from the given Variable. */ public Variable set(Variable from) { this.table_name = from.table_name; this.column_name = from.column_name; return this; } /** * Sets the column name of this variable. This should be used if the * variable is resolved from one form to another. */ public void setColumnName(String column_name) { if (column_name == null) { throw new NullPointerException(); } this.column_name = column_name; } /** * Sets the TableName of this variable. */ public void setTableName(TableName tname) { if (table_name == null) { throw new NullPointerException(); } this.table_name = tname; } // ---- /** * Performs a deep clone of this object. */ public Object clone() throws CloneNotSupportedException { return super.clone(); } /** * To string. */ public String toString() { if (getTableName() != null) { return getTableName() + "." + getName(); } return getName(); } /** * To a differently formatted string. */ public String toTechString() { TableName tn = getTableName(); if (tn != null) { return tn.getSchema() + "^" + tn.getName() + "^" + getName(); } return getName(); } /** * Equality. */ public boolean equals(Object ob) { Variable cn = (Variable) ob; return cn.table_name.equals(table_name) && cn.column_name.equals(column_name); } /** * Comparable. */ public int compareTo(Object ob) { Variable cn = (Variable) ob; int v = table_name.compareTo(cn.table_name); if (v == 0) { return column_name.compareTo(cn.column_name); } return v; } /** * Hash code. */ public int hashCode() { return table_name.hashCode() + column_name.hashCode(); } // /** // * The name of the variable. // */ // private String name; // // /** // * Constructs the variable. // */ // public Variable(String name) { // this.name = name; // } // // /** // * Renames the variable to a new name. This should only be used as part // * of resolving an variable alias or lookup. // */ // public void rename(String name) { // this.name = name; // } // // /** // * Returns the name of the variable. // */ // public String getName() { // return name; // } // // // // public String toString() { // return name; // } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/VariableResolver.java000066400000000000000000000026151330501023400260330ustar00rootroot00000000000000/** * com.mckoi.database.VariableResolver 11 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; /** * An interface to resolve a variable name to a constant object. This is used * as a way to resolve a variable into a value to use in an expression. * * @author Tobias Downer */ public interface VariableResolver { /** * A number that uniquely identifies the current state of the variable * resolver. This typically returns the row_index of the table we are * resolving variables on. */ public int setID(); /** * Returns the value of a given variable. */ public TObject resolve(Variable variable); /** * Returns the TType of object the given variable is. */ public TType returnTType(Variable variable); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/VariableSizeDataStore.java000066400000000000000000000546541330501023400267650ustar00rootroot00000000000000/** * com.mckoi.database.VariableSizeDataStore 25 Jun 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; import com.mckoi.util.ByteArrayUtil; import com.mckoi.util.UserTerminal; import java.io.*; import java.util.zip.*; /** * Provides a mechanism for storing variable length data in a file which can * quickly be indexed via a reference number. The store maintains a master * index file that contains a reference to all individual data records stored * in the system. The data file contains all the data that has been stored. *

* This file format is not intended to be a fully fledged file system. For * example, we can not easily change the size of a data entry. To change the * record, we must delete it then add a new. *

* This system uses two files. One file is the index, and the second file * stores the data. The index file contains sectors as follows: *

 *   4 (int) : index  - The sector index of the data in the data file.
 *   4 (int) : length - Length of the data that was stored.
 *   4 (int) : status - 32 status bits.
 * 
*

* This employs a simple compression scheme when writing out data that would * span more than one sector. It tries compressing the field. If the field * can be compressed into less sectors than if left uncompressed, then the * compressed field is put into the data store. * * @author Tobias Downer */ public class VariableSizeDataStore { /** * Set to true to enable compressed writes. */ private final static boolean COMPRESSED_WRITE_ENABLED = true; /** * The size of elements of each allocation sector. */ private final static int INDEX_SECTOR_SIZE = (4 * 3); /** * A DebugLogger object used to log error messages to. */ private final DebugLogger debug; /** * The index data allocation file. */ private FixedSizeDataStore allocation_store; /** * The actual data file. */ private FixedSizeDataStore data_store; /** * A buffer to store the index key. */ private byte[] index_key; /** * A Deflater and Inflater used to compress and uncompress the size of data * fields put into the store. */ private Deflater deflater; private Inflater inflater; private byte[] compress_buffer; /** * Constructs the variable size store. */ public VariableSizeDataStore(File name, int sector_size, DebugLogger logger) { this.debug = logger; index_key = new byte[INDEX_SECTOR_SIZE]; // We create two files, name + ".axi" and name + ".dss" // The .axi file is the allocation index. The .dss is the data sector // storage. String path = name.getPath(); allocation_store = new FixedSizeDataStore(new File(path + ".axi"), INDEX_SECTOR_SIZE, debug); data_store = new FixedSizeDataStore(new File(path + ".dss"), sector_size, debug); } public VariableSizeDataStore(File name, DebugLogger logger) { this(name, -1, logger); } // ---------- Private methods ---------- // ---------- Public methods ---------- /** * Synchronizes all the data in memory with the hard copy on disk. */ public void synch() throws IOException { allocation_store.synch(); data_store.synch(); } /** * Hard synchronizes the data from memory into the hard copy on disk. This * is guarenteed to ensure the image on the disk will match the image in * memory. */ public void hardSynch() throws IOException { allocation_store.hardSynch(); data_store.hardSynch(); } /** * Returns true if we are locked. */ public boolean locked() { return allocation_store.locked(); } /** * Locks the store so that not deleted elements may be overwritten. */ public void lock() { allocation_store.lock(); data_store.lock(); } /** * Unlocks the store so that deleted elements can be reclaimed again. */ public void unlock() { data_store.unlock(); allocation_store.unlock(); } /** * Returns true if the given record is marked as deleted or not. */ public boolean recordDeleted(int record_index) throws IOException { return allocation_store.isSectorDeleted(record_index); } /** * Returns the number of records that are being used. */ public int usedRecordCount() { return allocation_store.getSectorUseCount(); } /** * Returns the total number of records that are in the store (including * deleted records. */ public int rawRecordCount() throws IOException { return allocation_store.rawSectorCount(); } /** * Returns true if the data store exists. */ public boolean exists() throws IOException { return allocation_store.exists() && data_store.exists(); } /** * Returns true if the store was openned in read only mode. */ public boolean isReadOnly() { return data_store.isReadOnly(); } /** * Opens the data store. The data store can be opened in 'read only' mode. * Returns 'true' if the open procedure should repair itself (dirty open) * or false if the file was cleanly closed down. *

* It is not possible to open a damaged store in read only mode. * * @param read_only if true, then the database is opened in read only mode, * otherwise it is opened in read/write mode. */ public boolean open(boolean read_only) throws IOException { boolean r1 = allocation_store.open(read_only); boolean r2 = data_store.open(read_only); return r1 | r2; } /** * Closes the data store. */ public void close() throws IOException { allocation_store.close(); data_store.close(); } /** * Deletes the store from the file system. Must be called after a 'close'. */ public void delete() { allocation_store.delete(); data_store.delete(); } /** * Attempts to fix a corrupt VariableSizeDataStore object. *

* The store should be open before this method is called. */ public void fix(UserTerminal terminal) throws IOException { terminal.println("+ Fixing variable data store."); // First fix the allocation store and data store. allocation_store.fix(terminal); data_store.fix(terminal); terminal.println("- Repairing references."); // Now look for bad references on this layer. What we do here is we check // that each allocation record references a valid chain in the data // store. If it doesn't then we delete it. int sector_count = allocation_store.rawSectorCount(); int data_sector_count = data_store.rawSectorCount(); terminal.println("- Sector count: " + sector_count); // For each allocation entry trace its chain and mark each sector as // taken. int bad_record_count = 0; int deleted_record_count = 0; for (int i = 0; i < sector_count; ++i) { if (!allocation_store.isSectorDeleted(i)) { allocation_store.getSector(i, index_key); int sector_head = ByteArrayUtil.getInt(index_key, 0); int length = ByteArrayUtil.getInt(index_key, 4); // Is the sector head pointing to a valid record? if (sector_head < 0 || sector_head >= data_sector_count || length <= 0) { ++bad_record_count; // Mark this allocation entry as deleted. allocation_store.deleteAcross(i); ++deleted_record_count; } else { int[] chain_span = data_store.getSectorChain(sector_head, length); } } else { ++deleted_record_count; } } // Print statistics, terminal.println("- Fixed " + bad_record_count + " bad chains."); } /** * Copies this data store to the given path. The store must be open when * this is called. */ public void copyTo(File path) throws IOException { allocation_store.copyTo(path); data_store.copyTo(path); } /** * Updates the 32-bit type_key int of a record. Bit 1-8 are reserved for * this data store, and are used to indicate such things as whether the * record chain is compressed or not. The rest of the bits can be used * for any purpose. It is recommended bits 8 through 16 are used for * user-definable information. */ public int writeRecordType(int record_index, int type_key) throws IOException { // Read it in first, // The index of the record to retrieve. allocation_store.getSector(record_index, index_key); // Any special keys regarding how the info was stored int cur_type_key = ByteArrayUtil.getInt(index_key, 8); // Record this. final int old_type_key = cur_type_key; // Merge type key type_key = (type_key & 0x0FFFFFFF0) | (cur_type_key & 0x0F); ByteArrayUtil.setInt(type_key, index_key, 8); // And overwrite the sector allocation_store.overwriteSector(record_index, index_key); // Return the type key as it was before the change. return old_type_key; } /** * Reads the 32-bit type_key int for the given record. The 'type_key' * contains various bit flags set for the record. */ public int readRecordType(int record_index) throws IOException { // Read it in first, // The index of the record to retrieve. allocation_store.getSector(record_index, index_key); // Any special keys regarding how the info was stored int cur_type_key = ByteArrayUtil.getInt(index_key, 8); // Return the type key for this record. return cur_type_key; } /** * Writes a variable length byte[] array to the first available index. * Returns the index reference for this element. */ public int write(byte[] buf, int offset, int length) throws IOException { // If the length of the record to add is bigger than a sector then try // and compress it. int sector_size = data_store.getSectorSize(); boolean use_compressed_form = false; int compress_size = -1; if (COMPRESSED_WRITE_ENABLED) { if (length > sector_size) { int orig_span = data_store.calculateSectorSpan(length); if (deflater == null) { deflater = new Deflater(); } deflater.setInput(buf, offset, length); deflater.finish(); if (compress_buffer == null || compress_buffer.length < length + 4) { compress_buffer = new byte[length + 4]; } compress_size = deflater.deflate(compress_buffer) + 4; deflater.reset(); int new_span = data_store.calculateSectorSpan(compress_size); if (new_span < orig_span) { // Put the length of the original buffer on the end of the compressed // data. ByteArrayUtil.setInt(length, compress_buffer, compress_size - 4); use_compressed_form = true; } } } // Write the data to the data file, int v; int real_length; int type_key = 0; if (use_compressed_form) { v = data_store.writeAcross(compress_buffer, 0, compress_size); real_length = compress_size; // Indicate this run is compressed. type_key = type_key | 0x0001; } else { v = data_store.writeAcross(buf, offset, length); real_length = length; } // Create a new index key, // The first index ByteArrayUtil.setInt(v, index_key, 0); ByteArrayUtil.setInt(real_length, index_key, 4); ByteArrayUtil.setInt(type_key, index_key, 8); // Add to the allocation store last. return allocation_store.addSector(index_key); } /** * Reads a variable length byte[] array from the given index position. * This will read the first n bytes from the element, upto the maximum that * was stored. It returns the number of bytes that were read. */ public int read(int record, byte[] buf, int offset, int length) throws IOException { // The index of the record to retrieve. allocation_store.getSector(record, index_key); // Get the head of the chain, int chain_head = ByteArrayUtil.getInt(index_key, 0); // The length of data that was stored. int data_length = ByteArrayUtil.getInt(index_key, 4); // Any special keys regarding how the info was stored int type_key = ByteArrayUtil.getInt(index_key, 8); // If it's compressed, read in the compressed data to the buffer. if ((type_key & 0x0001) != 0) { if (compress_buffer == null || compress_buffer.length < data_length) { compress_buffer = new byte[data_length]; } data_store.readAcross(chain_head, compress_buffer, 0, data_length); // Then extract as much as we can into the input buffer. if (inflater == null) { inflater = new Inflater(); } inflater.reset(); inflater.setInput(compress_buffer, 0, data_length); int inflate_count; try { inflate_count = inflater.inflate(buf, offset, length); } catch (DataFormatException e) { e.printStackTrace(); debug.writeException(e); throw new Error(e.getMessage()); } return inflate_count; } else { // Not compressed... // The amount we are reading, int read_amount = Math.min(length, data_length); // Read it in, data_store.readAcross(chain_head, buf, offset, read_amount); return read_amount; } } /** * Reads in a complete record and puts it into the returned byte[] array. */ public byte[] readRecord(int record) throws IOException { // The index of the record to retrieve. allocation_store.getSector(record, index_key); // Get the head of the chain, int chain_head = ByteArrayUtil.getInt(index_key, 0); // The length of data that was stored. int data_length = ByteArrayUtil.getInt(index_key, 4); // Any special keys regarding how the info was stored int type_key = ByteArrayUtil.getInt(index_key, 8); // If it's compressed, read in the compressed data to the buffer. if ((type_key & 0x0001) != 0) { if (compress_buffer == null || compress_buffer.length < data_length) { compress_buffer = new byte[data_length]; } data_store.readAcross(chain_head, compress_buffer, 0, data_length); // Then extract as much as we can into the input buffer. if (inflater == null) { inflater = new Inflater(); } // Get the size of the uncompressed form... int uncompressed_size = ByteArrayUtil.getInt(compress_buffer, data_length - 4); byte[] buf = new byte[uncompressed_size]; inflater.reset(); inflater.setInput(compress_buffer, 0, data_length - 4); int inflate_count; try { inflate_count = inflater.inflate(buf); } catch (DataFormatException e) { e.printStackTrace(); debug.writeException(e); throw new Error(e.getMessage()); } if (inflate_count != buf.length) { throw new Error("Inflate size != buf.length (" + inflate_count + " != " + buf.length + ")"); } return buf; } else { // Not compressed... // Allocate the buffer store. byte[] buf = new byte[data_length]; // Read it in, data_store.readAcross(chain_head, buf, 0, data_length); return buf; } } /** * Deletes the data at the given index position. */ public int delete(int record) throws IOException { // The index of the record to delete, allocation_store.getSector(record, index_key); // Get the head of the chain to delete. int chain_head = ByteArrayUtil.getInt(index_key, 0); // Delete the allocation index, allocation_store.deleteSector(record); // Delete the data chain, data_store.deleteAcross(chain_head); return record; } private OutputStream sector_output_stream = null; /** * Returns an OutputStream object that can be used to write data into the * store. When the 'completeWriteStream' method is called, the records in * this store are updated appropriately for the data written in, and a * record index is returned. *

* NOTE: Only one open stream may be active at a time. While this stream * is open this VariableSizeDataStore object may not be used in any other * way. */ public OutputStream getRecordOutputStream() throws IOException { if (sector_output_stream == null) { sector_output_stream = data_store.getSectorOutputStream(); return sector_output_stream; } else { throw new Error("More than one record output stream opened."); } } /** * Updates the record allocation table with the data in the output stream * returned by 'getRecordOutputStream'. Returns an index for how to * reference this data later. *

* After this method is called it is safe again to use this * VariableSizeDataStore object. */ public int completeRecordStreamWrite() throws IOException { if (sector_output_stream != null) { int v = data_store.getSectorOfLastOutputStream(); int real_length = data_store.getLengthOfLastOutputStream(); int type_key = 0; // Create a new index key, // The first index ByteArrayUtil.setInt(v, index_key, 0); ByteArrayUtil.setInt(real_length, index_key, 4); ByteArrayUtil.setInt(type_key, index_key, 8); sector_output_stream = null; data_store.wipeLastOutputStream(); // Add to the allocation store last. return allocation_store.addSector(index_key); } else { throw new Error("Output stream not available."); } } /** * Returns an InputStream that is used to read a record in this store with * the given index. *

* NOTE: This can not handle compressed records. *

* NOTE: This does not detect the end of stream (reading past the end of the * record will return undefined data). */ public InputStream getRecordInputStream(int record) throws IOException { // The index of the record to read, allocation_store.getSector(record, index_key); // Get the head of the chain to read. int chain_head = ByteArrayUtil.getInt(index_key, 0); // Open the input stream. return data_store.getSectorInputStream(chain_head); } /** * Returns the size (in bytes) of the sectors used to store information in * the data file. */ public int sectorSize() throws IOException { return data_store.getSectorSize(); } /** * Returns the size of the given record number (compressed size if * applicable). */ public int recordSize(int record) throws IOException { // The index of the record to retrieve. allocation_store.getSector(record, index_key); // Return the size of the record return ByteArrayUtil.getInt(index_key, 4); } /** * Returns true if the given record is compressed. */ public boolean isCompressed(int record) throws IOException { // The index of the record. allocation_store.getSector(record, index_key); // Return true if the compressed bit is set. return (ByteArrayUtil.getInt(index_key, 8) & 0x0001) != 0; } /** * Returns the number of sectors the given record takes up in the data * store. */ public int recordSectorCount(int record) throws IOException { // Returns the number of sectors a record of this size will span using // the current sector size. return data_store.calculateSectorSpan(recordSize(record)); } /** * Returns the size of the data file that keeps all the data in this * store. This is the file size of the data store. */ public long totalStoreSize() { return data_store.totalSize(); } /** * Writes reserved information to the variable data store. You may only * write upto 128 bytes to the reserved data buffer. */ public void writeReservedBuffer(byte[] info, int offset, int length, int res_offset) throws IOException { allocation_store.writeReservedBuffer(info, offset, length, res_offset); } public void writeReservedBuffer(byte[] info, int offset, int length) throws IOException { allocation_store.writeReservedBuffer(info, offset, length); } /** * Reads reserved information from the variable data store. You may only * read upto 128 bytes from the reserved data buffer. */ public void readReservedBuffer(byte[] info, int offset, int length) throws IOException { allocation_store.readReservedBuffer(info, offset, length); } // ---------- Static methods ---------- /** * Convenience for checking if a given data store exists or not. Returns * true if it exists. */ public static boolean exists(File path, String name) throws IOException { File af = new File(path, name + ".axi"); File df = new File(path, name + ".dss"); return (af.exists() & df.exists()); } /** * Convenience for deleting a VariableSizeDataStore store. */ public static boolean delete(File path, String name) throws IOException { File af = new File(path, name + ".axi"); File df = new File(path, name + ".dss"); return (af.delete() & df.delete()); } /** * Convenience for renaming a VariableSizeDataStore store to another name. */ public static boolean rename(File path_source, String name_source, File path_dest, String name_dest) throws IOException { File afs = new File(path_source, name_source + ".axi"); File dfs = new File(path_source, name_source + ".dss"); File afd = new File(path_dest, name_dest + ".axi"); File dfd = new File(path_dest, name_dest + ".dss"); return (afs.renameTo(afd) & dfs.renameTo(dfd)); } // ---------- Testing methods ---------- public int writeString(String str) throws IOException { byte[] bts = str.getBytes(); return write(bts, 0, bts.length); } public String readString(int record) throws IOException { byte[] buffer = new byte[65536]; int read_in = read(record, buffer, 0, 65536); return new String(buffer, 0, read_in); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ViewDef.java000066400000000000000000000072301330501023400241130ustar00rootroot00000000000000/** * com.mckoi.database.ViewDef 23 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.io.*; import com.mckoi.database.global.BlobAccessor; import com.mckoi.database.global.ByteLongObject; /** * A ViewDef object is a definition of a view stored in the database. It is * an object that can be easily serialized and deserialized to/from the system * view table. It contains the DataTableDef that describes the characteristics * of the view result, and a QueryPlanNode that describes how the view can be * constructed. * * @author Tobias Downer */ public class ViewDef { /** * The DataTableDef object that describes the view column def. */ private DataTableDef view_def; /** * The QueryPlanNode that is used to evaluate the view. */ private QueryPlanNode view_query_node; /** * Constructs the ViewDef object. */ public ViewDef(DataTableDef view_def, QueryPlanNode query_node) { this.view_def = view_def; this.view_query_node = query_node; } /** * Returns the DataTableDef for this view. */ public DataTableDef getDataTableDef() { return view_def; } /** * Returns the QueryPlanNode for this view. */ public QueryPlanNode getQueryPlanNode() { try { return (QueryPlanNode) view_query_node.clone(); } catch (CloneNotSupportedException e) { throw new Error("Clone error: " + e.getMessage()); } } /** * Forms this ViewDef object into a serialized ByteLongObject object that can * be stored in a table. */ ByteLongObject serializeToBlob() { try { ByteArrayOutputStream byte_out = new ByteArrayOutputStream(); ObjectOutputStream out = new ObjectOutputStream(byte_out); // Write the version number out.writeInt(1); // Write the DataTableDef getDataTableDef().write(out); // Serialize the QueryPlanNode out.writeObject(getQueryPlanNode()); out.flush(); return new ByteLongObject(byte_out.toByteArray()); } catch (IOException e) { throw new Error("IO Error: " + e.getMessage()); } } /** * Creates an instance of ViewDef from the serialized information stored in * the blob. */ static final ViewDef deserializeFromBlob(BlobAccessor blob) { InputStream blob_in = blob.getInputStream(); try { ObjectInputStream in = new ObjectInputStream(blob_in); // Read the version int version = in.readInt(); if (version == 1) { DataTableDef view_def = DataTableDef.read(in); view_def.setImmutable(); QueryPlanNode view_plan = (QueryPlanNode) in.readObject(); return new ViewDef(view_def, view_plan); } else { throw new IOException( "Newer ViewDef version serialization: " + version); } } catch (IOException e) { throw new Error("IO Error: " + e.getMessage()); } catch (ClassNotFoundException e) { throw new Error("Class not found: " + e.getMessage()); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/ViewManager.java000066400000000000000000000261201330501023400247660ustar00rootroot00000000000000/** * com.mckoi.database.ViewManager 20 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import java.util.HashMap; import com.mckoi.database.global.BlobAccessor; import com.mckoi.database.jdbc.SQLQuery; import com.mckoi.util.IntegerVector; /** * A DatabaseConnection view manager. This controls adding, updating, deleting, * and processing views inside the system view table. * * @author Tobias Downer */ public class ViewManager { /** * The DatabaseConnection. */ private DatabaseConnection connection; /** * The context. */ private DatabaseQueryContext context; /** * Set to true when the connection makes changes to the view table through * this manager. */ private boolean view_table_changed; /** * A local cache of ViewDef objects mapped by row id in the system view * table. This cache is invalidated when changes are committed to the system * view table. */ private HashMap local_cache; /** * Constructs the ViewManager for a DatabaseConnection. */ ViewManager(DatabaseConnection connection) { this.connection = connection; this.context = new DatabaseQueryContext(connection); this.local_cache = new HashMap(); this.view_table_changed = false; // Attach a cache backed on the VIEW table which will invalidate the // connection cache whenever the view table is modified. connection.attachTableBackedCache(new TableBackedCache(Database.SYS_VIEW) { public void purgeCacheOfInvalidatedEntries( IntegerVector added_rows, IntegerVector removed_rows) { // If there were changed then invalidate the cache if (view_table_changed) { invalidateViewCache(); view_table_changed = false; } // Otherwise, if there were committed added or removed changes also // invalidate the cache, else if ((added_rows != null && added_rows.size() > 0) || (removed_rows != null && removed_rows.size() > 0)) { invalidateViewCache(); } } }); } /** * Returns the local cache of ViewDef objects. This cache is mapped from * row_id to view object. The cache is invalidated when changes are * committed to the system view table. */ private HashMap getViewCache() { return local_cache; } /** * Invalidates the view cache. */ private void invalidateViewCache() { local_cache.clear(); } /** * Given the SYS_VIEW table, this returns a new table that contains the * entry with the given view name, or an empty result if the view is not * found. * Generates an error if more than 1 entry found. */ private Table findViewEntry(DataTable table, TableName view_name) { Operator EQUALS = Operator.get("="); Variable schemav = table.getResolvedVariable(0); Variable namev = table.getResolvedVariable(1); Table t = table.simpleSelect(context, namev, EQUALS, new Expression(TObject.stringVal(view_name.getName()))); t = t.exhaustiveSelect(context, Expression.simple( schemav, EQUALS, TObject.stringVal(view_name.getSchema()))); // This should be at most 1 row in size if (t.getRowCount() > 1) { throw new RuntimeException( "Assert failed: multiple view entries for " + view_name); } // Return the entries found. return t; } /** * Returns true if the view with the given name exists. */ public boolean viewExists(TableName view_name) { DataTable table = connection.getTable(Database.SYS_VIEW); return findViewEntry(table, view_name).getRowCount() == 1; } /** * Defines a view. If the view with the name has not been defined it is * defined. If the view has been defined then it is overwritten with this * information. * * @param view information that defines the view. * @param query the query that forms the view. * @param user the user that owns this view being defined. */ public void defineView(ViewDef view, SQLQuery query, User user) throws DatabaseException { DataTableDef data_table_def = view.getDataTableDef(); DataTable view_table = connection.getTable(Database.SYS_VIEW); TableName view_name = data_table_def.getTableName(); // Create the view record RowData rdat = new RowData(view_table); rdat.setColumnDataFromObject(0, data_table_def.getSchema()); rdat.setColumnDataFromObject(1, data_table_def.getName()); rdat.setColumnDataFromObject(2, query.serializeToBlob()); rdat.setColumnDataFromObject(3, view.serializeToBlob()); rdat.setColumnDataFromObject(4, user.getUserName()); // Find the entry from the view that equals this name Table t = findViewEntry(view_table, view_name); // Delete the entry if it already exists. if (t.getRowCount() == 1) { view_table.delete(t); } // Insert the new view entry in the system view table view_table.add(rdat); // Notify that this database object has been successfully created. connection.databaseObjectCreated(view_name); // Change to the view table view_table_changed = true; } /** * Deletes the view with the given name, or returns false if no entries were * deleted from the view table. */ public boolean deleteView(TableName view_name) throws DatabaseException { DataTable table = connection.getTable(Database.SYS_VIEW); // Find the entry from the view table that equal this name Table t = findViewEntry(table, view_name); // No entries so return false if (t.getRowCount() == 0) { return false; } table.delete(t); // Notify that this database object has been successfully dropped. connection.databaseObjectDropped(view_name); // Change to the view table view_table_changed = true; // Return that 1 or more entries were dropped. return true; } /** * Creates a ViewDef object for the given view name in the table. The * access is cached through the given HashMap object. *

* We assume the access to the cache is limited to the current thread * calling this method. We don't synchronize over the cache at any time. */ private static ViewDef getViewDef(HashMap cache, TableDataSource view_table, TableName view_name) { RowEnumeration e = view_table.rowEnumeration(); while (e.hasMoreRows()) { int row = e.nextRowIndex(); String c_schema = view_table.getCellContents(0, row).getObject().toString(); String c_name = view_table.getCellContents(1, row).getObject().toString(); if (view_name.getSchema().equals(c_schema) && view_name.getName().equals(c_name)) { Object cache_key = new Long(row); ViewDef view_def = (ViewDef) cache.get(cache_key); if (view_def == null) { // Not in the cache, so deserialize it and put it in the cache. BlobAccessor blob = (BlobAccessor) view_table.getCellContents(3, row).getObject(); // Derserialize the blob view_def = ViewDef.deserializeFromBlob(blob); // Put this in the cache.... cache.put(cache_key, view_def); } return view_def; } } throw new StatementException("View '" + view_name + "' not found."); } /** * Creates a ViewDef object for the given index value in the table. The * access is cached through the given HashMap object. *

* We assume the access to the cache is limited to the current thread * calling this method. We don't synchronize over the cache at any time. */ private static ViewDef getViewDef(HashMap cache, TableDataSource view_table, int index) { RowEnumeration e = view_table.rowEnumeration(); int i = 0; while (e.hasMoreRows()) { int row = e.nextRowIndex(); if (i == index) { Object cache_key = new Long(row); ViewDef view_def = (ViewDef) cache.get(cache_key); if (view_def == null) { // Not in the cache, so deserialize it and put it in the cache. BlobAccessor blob = (BlobAccessor) view_table.getCellContents(3, row).getObject(); // Derserialize the blob view_def = ViewDef.deserializeFromBlob(blob); // Put this in the cache.... cache.put(cache_key, view_def); } return view_def; } ++i; } throw new Error("Index out of range."); } /** * Returns a freshly deserialized QueryPlanNode object for the given view * object. */ public QueryPlanNode createViewQueryPlanNode(TableName view_name) { DataTable table = connection.getTable(Database.SYS_VIEW); return getViewDef(local_cache, table, view_name).getQueryPlanNode(); } /** * Returns an InternalTableInfo object used to model the list of views * that are accessible within the given Transaction object. This is used to * model all views as regular tables accessible within a transaction. *

* Note that the 'ViewManager' parameter can be null if there is no backing * view manager. The view manager is intended as a cache to improve the * access speed of the manager. */ static InternalTableInfo createInternalTableInfo(ViewManager manager, Transaction transaction) { return new ViewInternalTableInfo(manager, transaction); } // ---------- Inner classes ---------- /** * An object that models the list of views as table objects in a * transaction. */ private static class ViewInternalTableInfo extends AbstractInternalTableInfo2 { ViewManager view_manager; HashMap view_cache; ViewInternalTableInfo(ViewManager manager, Transaction transaction) { super(transaction, Database.SYS_VIEW); this.view_manager = manager; if (view_manager == null) { view_cache = new HashMap(); } else { view_cache = view_manager.getViewCache(); } } public String getTableType(int i) { return "VIEW"; } public DataTableDef getDataTableDef(int i) { return getViewDef(view_cache, transaction.getTable(Database.SYS_VIEW), i).getDataTableDef(); } public MutableTableDataSource createInternalTable(int i) { throw new RuntimeException("Not supported for views."); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/VirtualTable.java000066400000000000000000000110021330501023400251500ustar00rootroot00000000000000/** * com.mckoi.database.VirtualTable 08 Mar 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.util.IntegerVector; import com.mckoi.util.BlockIntegerList; /** * A VirtualTable is a representation of a table whose rows are actually * physically stored in another table. In other words, this table just * stores pointers to rows in other tables. *

* We use the VirtualTable to represent temporary tables created from select, * join, etc operations. *

* An important note about VirtualTables: When we perform a 'select' operation * on a virtual table, unlike a DataTable that permanently stores information * about column cell relations, we must resolve column relations between the * sub-set at select time. This involves asking the tables parent(s) for a * scheme to describe relations in a sub-set. * * @author Tobias Downer */ public class VirtualTable extends JoinedTable { /** * Array of IntegerVectors that represent the rows taken from the given * parents. */ protected IntegerVector[] row_list; /** * The number of rows in the table. */ private int row_count; /** * Helper function for the constructor. */ protected void init(Table[] tables) { super.init(tables); int table_count = tables.length; row_list = new IntegerVector[table_count]; for (int i = 0; i < table_count; ++i) { row_list[i] = new IntegerVector(); } } /** * The Constructor. It is constructed with a list of tables that this * virtual table is a sub-set or join of. */ VirtualTable(Table[] tables) { super(tables); } VirtualTable(Table table) { super(table); } protected VirtualTable() { super(); } /** * Returns the list of IntegerVector that represents the rows that this * VirtualTable references. */ protected IntegerVector[] getReferenceRows() { return row_list; } /** * Returns the number of rows stored in the table. */ public int getRowCount() { return row_count; } /** * Sets the rows in this table. We should search for the * 'table' in the 'reference_list' however we don't for efficiency. */ void set(Table table, IntegerVector rows) { row_list[0] = new IntegerVector(rows); row_count = rows.size(); } /** * This is used in a join to set a list or joined rows and tables. The * 'tables' array should be an exact mirror of the 'reference_list'. The * IntegerVector[] array contains the rows to add for each respective table. * The given IntegerVector objects should have identical lengths. */ void set(Table[] tables, IntegerVector[] rows) { for (int i = 0; i < tables.length; ++i) { row_list[i] = new IntegerVector(rows[i]); } if (rows.length > 0) { row_count = rows[0].size(); } } /** * Sets the rows in this table as above, but uses a BlockIntegerList as an * argument instead. */ void set(Table table, BlockIntegerList rows) { row_list[0] = new IntegerVector(rows); row_count = rows.size(); } /** * Sets the rows in this table as above, but uses a BlockIntegerList array * as an argument instead. */ void set(Table[] tables, BlockIntegerList[] rows) { for (int i = 0; i < tables.length; ++i) { row_list[i] = new IntegerVector(rows[i]); } if (rows.length > 0) { row_count = rows[0].size(); } } // ---------- Implemented from JoinedTable ---------- protected int resolveRowForTableAt(int row_number, int table_num) { return row_list[table_num].intAt(row_number); } protected void resolveAllRowsForTableAt( IntegerVector row_set, int table_num) { IntegerVector cur_row_list = row_list[table_num]; for (int n = row_set.size() - 1; n >= 0; --n) { int aa = row_set.intAt(n); int bb = cur_row_list.intAt(aa); row_set.setIntAt(bb, n); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/WorkerPool.java000066400000000000000000000174171330501023400246750ustar00rootroot00000000000000/** * com.mckoi.database.WorkerPool 12 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.DebugLogger; import java.util.LinkedList; /** * Maintains a pool of worker threads that are used to dispatch commands to * a Database sub-system. * * @author Tobias Downer */ final class WorkerPool { /** * The TransactionSystem that this pool is part of. */ private TransactionSystem system; /** * This is the maximum number of worker threads that will be created. */ private int MAXIMUM_WORKER_THREADS = 4; /** * This is a queue of 'WorkerThread' objects that are currently available * to process commands from the service providers. */ private LinkedList available_worker_threads; /** * The number of worker threads that have been created in total. */ private int worker_thread_count; /** * A list of pending Runnable objects that are due to be executed. This is * a queue of events to be run. */ private LinkedList run_queue; /** * If this is set to false, then no commands will be executed by the * 'execute' method. */ private boolean is_executing_commands; /** * Constructs the worker thread pool. */ WorkerPool(TransactionSystem system, int max_worker_threads) { this.system = system; MAXIMUM_WORKER_THREADS = max_worker_threads; is_executing_commands = false; // Set up the run queue run_queue = new LinkedList(); // Set up the worker threads available_worker_threads = new LinkedList(); worker_thread_count = 0; // // Create a single worker thread and start it. // ++worker_thread_count; // WorkerThread wt = new WorkerThread(this); // wt.start(); } /** * Returns a DebugLogger object that we can use to log debug messages. */ public final DebugLogger Debug() { return system.Debug(); } // ---------- Thread Pooling methods ---------- /** * This is called by a WorkerThread when it is decided that it is ready * to service a new command. */ void notifyWorkerReady(WorkerThread worker_thread) { synchronized (available_worker_threads) { // Add it to the queue of worker threads that are available. available_worker_threads.add(worker_thread); // Are there any commands pending? int q_len = run_queue.size(); if (q_len > 0) { // Execute the bottom element on the queue RunCommand command = (RunCommand) run_queue.remove(0); execute(command.user, command.database, command.runnable); } } } /** * This returns the first available WorkerThread object from the thread * pool. If there are no available worker threads available then it returns * null. This method must execute fast and must not block. */ private WorkerThread getFirstWaitingThread() { synchronized (available_worker_threads) { // Is there a worker thread available? int size = available_worker_threads.size(); if (size > 0) { // Yes so remove the first element and return it. WorkerThread wt = (WorkerThread) available_worker_threads.remove(0); return wt; } else { // Otherwise create a new worker thread if we can. if (worker_thread_count < MAXIMUM_WORKER_THREADS) { ++worker_thread_count; WorkerThread wt = new WorkerThread(this); wt.start(); // NOTE: We must _not_ return the worker thread we have just created. // We must wait until the worker thread has made it self known by // it calling the 'notifyWorkerReady' method. } return null; } } } /** * Executes database functions from the 'run' method of the given runnable * instance on a worker thread. All database functions should go through * a worker thread. If we ensure this, we can easily stop all database * functions from executing. Also, we only need to have a certain number * of threads active at any one time rather than a unique thread for each * connection. */ void execute(User user, DatabaseConnection database, Runnable runner) { synchronized (available_worker_threads) { if (is_executing_commands) { WorkerThread worker = getFirstWaitingThread(); if (worker != null) { // System.out.println("[Database] executing runner"); worker.execute(user, database, runner); return; } } // System.out.println("[Database] adding to run queue"); RunCommand command = new RunCommand(user, database, runner); run_queue.add(command); } } /** * Controls whether the database is allowed to execute commands or not. If * this is set to true, then calls to 'execute' will make commands execute. */ void setIsExecutingCommands(boolean status) { synchronized (available_worker_threads) { if (status == true) { is_executing_commands = true; // Execute everything on the queue for (int i = run_queue.size() - 1; i >= 0; --i) { RunCommand command = (RunCommand) run_queue.remove(i); execute(command.user, command.database, command.runnable); } } else { is_executing_commands = false; } } } /** * Waits until all executing commands have stopped. This is best called * right after a call to 'setIsExecutingCommands(false)'. If these two * commands are run, the database is in a known state where no commands * can be executed. *

* NOTE: This can't be called from the WorkerThread. Deadlock will * result if we were allowed to do this. */ void waitUntilAllWorkersQuiet() { if (Thread.currentThread() instanceof WorkerThread) { throw new Error("Can't call this method from a WorkerThread!"); } synchronized (available_worker_threads) { // loop until available works = total worker thread count. while (worker_thread_count != available_worker_threads.size()) { // Wait half a second try { available_worker_threads.wait(500); } catch (InterruptedException e) {} // ISSUE: If this lasts for more than 10 minutes, one of the worker // threads is likely in a state of deadlock. If this happens, we // should probably find all the open worker threads and clean them // up nicely. } } } /** * Shuts down the WorkerPool object stopping all worker threads. */ void shutdown() { synchronized (available_worker_threads) { while (available_worker_threads.size() > 0) { WorkerThread wt = (WorkerThread) available_worker_threads.remove(0); --worker_thread_count; wt.shutdown(); } } } // ---------- Inner classes ---------- /** * Structures within the run_queue list. This stores the Runnable to * run and the User that's executing the command. */ private static final class RunCommand { User user; DatabaseConnection database; Runnable runnable; public RunCommand(User user, DatabaseConnection database, Runnable runnable) { this.user = user; this.database = database; this.runnable = runnable; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/WorkerThread.java000066400000000000000000000100671330501023400251650ustar00rootroot00000000000000/** * com.mckoi.database.WorkerThread 09 Sep 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database; import com.mckoi.debug.*; /** * This is a worker thread. This is given commands to execute by the * WorkerPool. * * @author Tobias Downer */ final class WorkerThread extends Thread { /** * If this is set to true, the server displays the time each executed * command took. */ private static final boolean DISPLAY_COMMAND_TIME = false; /** * Set to true to turn off this worker thread. */ private boolean shutdown; /** * The Runnable command we are currently processing. */ private Runnable command; /** * The time the command was started. */ private long start_time; /** * The WorkerPool object that this worker thread is for. */ private WorkerPool worker_pool; /** * Constructs the thread. */ public WorkerThread(WorkerPool worker_pool) { super(); // setDaemon(true); setName("Mckoi - Worker"); this.worker_pool = worker_pool; command = null; shutdown = false; } /** * Returns a DebugLogger object we can use to log debug messages. */ public final DebugLogger Debug() { return worker_pool.Debug(); } // ---------- Other methods ---------- /** * Shuts down this worker thread. */ synchronized void shutdown() { shutdown = true; notifyAll(); } /** * Tells the worker thread that the user is executing the given command. */ void execute(User user, DatabaseConnection database_connection, Runnable runner) { // This should help to prevent deadlock synchronized (this) { if (command == null) { this.command = runner; notifyAll(); } else { throw new RuntimeException( "Deadlock Error, tried to execute command on running worker."); } } } /** * Starts executing this worker thread. */ public synchronized void run() { while (true) { try { // Is there any command waiting to be executed? if (command != null) { try { // Record the time this command was started. start_time = System.currentTimeMillis(); // Run the command command.run(); } finally { command = null; // Record the time the command ended. long elapsed_time = System.currentTimeMillis() - start_time; if (DISPLAY_COMMAND_TIME) { System.err.print("[Worker] Completed command in "); System.err.print(elapsed_time); System.err.print(" ms. "); System.err.println(this); } } } // Notifies the thread pool manager that this worker is ready // to go. worker_pool.notifyWorkerReady(this); // NOTE: The above command may cause a command to be posted on this // worker. while (command == null) { try { // Wait until there is a new command to process. wait(); } catch (InterruptedException e) { /* ignore */ } // Shut down if we need to... if (shutdown) { return; } } } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Worker thread interrupted because of exception:\n" + e.getMessage()); Debug().writeException(e); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/000077500000000000000000000000001330501023400233755ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/AbstractDBConfig.java000066400000000000000000000044321330501023400273420ustar00rootroot00000000000000/** * com.mckoi.database.control.AbstractDBConfig 29 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.control; import java.io.File; import java.util.Hashtable; /** * An abstract implementation of DBConfig. * * @author Tobias Downer */ public class AbstractDBConfig implements DBConfig { /** * The current base path of the database configuration. */ private File current_path; /** * The Hashtable mapping from configuration key to value for the key. */ private Hashtable key_map; /** * Constructs the DBConfig. */ public AbstractDBConfig(File current_path) { this.current_path = current_path; this.key_map = new Hashtable(); } /** * Returns the default value for the configuration property with the given * key. */ protected String getDefaultValue(String property_key) { // This abstract implementation returns null for all default keys. return null; } /** * Sets the configuration value for the key property key. */ protected void setValue(String property_key, String val) { key_map.put(property_key, val); } // ---------- Implemented from DBConfig ---------- public File currentPath() { return current_path; } public String getValue(String property_key) { // If the key is in the map, return it here String val = (String) key_map.get(property_key); if (val == null) { return getDefaultValue(property_key); } return val; } public DBConfig immutableCopy() { AbstractDBConfig immutable_copy = new AbstractDBConfig(current_path); immutable_copy.key_map = (Hashtable) key_map.clone(); return immutable_copy; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/DBConfig.java000066400000000000000000000034351330501023400256600ustar00rootroot00000000000000/** * com.mckoi.dbcontrol.DBConfig 27 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.control; import java.io.File; /** * A container object of configuration details of a database system. This * object can be used to programmatically setup configuration properies * in a database system. * * @author Tobias Downer */ public interface DBConfig { /** * Returns the current path set for this configuration. This is * useful if the configuration is based on a configuration file that has * path references relative to the configuration file. In this case, * the path returned here would be the path to the configuration * file. */ File currentPath(); /** * Returns the value that was set for the configuration property with the * given name. *

* This method must always returns a value that the database engine can use * provided the 'property_key' is a supported key. If the property key * is not supported and the key was not set, null is returned. */ String getValue(String property_key); /** * Makes an immutable copy of this configuration. */ DBConfig immutableCopy(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/DBController.java000066400000000000000000000130361330501023400265740ustar00rootroot00000000000000/** * com.mckoi.dbcontrol.DBController 26 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.control; import com.mckoi.database.Database; import com.mckoi.database.DatabaseSystem; import com.mckoi.database.DatabaseException; import com.mckoi.debug.*; import com.mckoi.util.LogWriter; import java.io.File; import java.io.Writer; import java.io.PrintWriter; import java.io.IOException; import java.util.Date; /** * An object that provides methods for creating and controlling database * systems in the current JVM. * * @author Tobias Downer */ public final class DBController { /** * This object can not be constructed outside of this package. */ DBController() { } /** * Returns true if a Mckoi database exists in the given directory of the * file system, otherwise returns false if the path doesn't contain a * database. *

* The path string must be formatted using Unix '/' deliminators as * directory separators. * * @param config the configuration of the database to check the existence * of. * @return true if a database exists at the given path, false otherwise. */ public boolean databaseExists(DBConfig config) { Database database = createDatabase(config); boolean b = database.exists(); database.getSystem().dispose(); return b; } /** * Creates a database in the local JVM (and filesystem) given the * configuration in DBConfig and returns a DBSystem object. When this * method returns, the database created will be up and running providing * there was no failure during the database creation process. *

* A failure might happen because the database path does not exist. * * @param admin_user the username of the administrator for the new database. * @param admin_pass the password of the administrator for the new database. * @param config the configuration of the database to create and start in the * local JVM. * @return the DBSystem object used to access the database created. */ public DBSystem createDatabase(DBConfig config, String admin_user, String admin_pass) { // Create the Database object with this configuration. Database database = createDatabase(config); DatabaseSystem system = database.getSystem(); // Create the database. try { database.create(admin_user, admin_pass); database.init(); } catch (DatabaseException e) { system.Debug().write(Lvl.ERROR, this, "Database create failed"); system.Debug().writeException(e); throw new RuntimeException(e.getMessage()); } // Return the DBSystem object for the newly created database. return new DBSystem(this, config, database); } /** * Starts a database in the local JVM given the configuration in DBConfig * and returns a DBSystem object. When this method returns, the database * will be up and running providing there was no failure to initialize the * database. *

* A failure might happen if the database does not exist in the path given * in the configuration. * * @param config the configuration of the database to start in the local * JVM. * @return the DBSystem object used to access the database started. */ public DBSystem startDatabase(DBConfig config) { // Create the Database object with this configuration. Database database = createDatabase(config); DatabaseSystem system = database.getSystem(); // First initialise the database try { database.init(); } catch (DatabaseException e) { system.Debug().write(Lvl.ERROR, this, "Database init failed"); system.Debug().writeException(e); throw new RuntimeException(e.getMessage()); } // Return the DBSystem object for the newly created database. return new DBSystem(this, config, database); } // ---------- Static methods ---------- /** * Creates a Database object for the given DBConfig configuration. */ private static Database createDatabase(DBConfig config) { DatabaseSystem system = new DatabaseSystem(); // Initialize the DatabaseSystem first, // ------------------------------------ // This will throw an Error exception if the database system has already // been initialized. system.init(config); // Start the database class // ------------------------ // Note, currently we only register one database, and it is named // 'DefaultDatabase'. Database database = new Database(system, "DefaultDatabase"); // Start up message system.Debug().write(Lvl.MESSAGE, DBController.class, "Starting Database Server"); return database; } /** * Returns the static controller for this JVM. */ public static DBController getDefault() { return VM_DB_CONTROLLER; } /** * The static DBController object. */ private final static DBController VM_DB_CONTROLLER = new DBController(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/DBSystem.java000066400000000000000000000213431330501023400257350ustar00rootroot00000000000000/** * com.mckoi.database.control.DBSystem 27 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.control; import com.mckoi.database.Database; import com.mckoi.database.DatabaseException; import com.mckoi.database.jdbc.MConnection; import com.mckoi.database.jdbc.DatabaseInterface; import com.mckoi.database.jdbcserver.JDBCDatabaseInterface; import com.mckoi.debug.*; import java.sql.Connection; import java.sql.SQLException; /** * An object used to access and control a single database system running in * the current JVM. This object provides various access methods to * safely manipulate the database, as well as allowing server plug-ins. For * example, a TCP/IP JDBC server component might be plugged into this object * to open the database to remote access. * * @author Tobias Downer */ public final class DBSystem { /** * The DBController object. */ private DBController controller; /** * The DBConfig object that describes the startup configuration of the * database. */ private DBConfig config; /** * The underlying Database object of this system. This object gives low * level access to the system. */ private Database database; /** * An internal counter for internal connections created on this system. */ private int internal_counter; /** * Package-protected constructor. */ DBSystem(DBController controller, DBConfig config, Database database) { this.controller = controller; this.config = config; this.database = database; this.internal_counter = 0; // Register the shut down delegate, database.registerShutDownDelegate(new Runnable() { public void run() { internalDispose(); } }); // Enable commands to the database system... database.setIsExecutingCommands(true); } /** * Returns an immutable version of the database system configuration. */ public DBConfig getConfig() { return config; } // ---------- Internal access methods ---------- /** * Returns the com.mckoi.database.Database object for this control. This * methods only works correctly if the database engine has successfully been * initialized. *

* This object is generally not very useful unless you intend to perform * some sort of low level function on the database. This object can be * used to bypass the SQL layer and talk directly with the internals of * the database. * * @return a Database object that can be used to access the database system * at a low level. */ public Database getDatabase() { return database; } /** * Makes a connection to the database and returns a java.sql.Connection * object that can be used to execute queries on the database. This is a * standard connection that talks directly with the database without having * to go through any communication protocol layers. *

* For example, if this control is for a Mckoi database server, the * java.sql.Connection returned here does not go through the TCP/IP * connection. For this reason certain database configuration constraints * (such as number of concurrent connection on the database) may not apply * to this connection. *

* The java.sql.Connection returned here acts exactly as an object returned * by a java.sql.MDriver object. *

* An SQLException is thrown if the login fails. * * @param schema the initial database schema to start the connection in. * @param username the user to login to the database under. * @param password the password of the user. * @throws SQLException if authentication of the user fails. * @return a JDBC java.sql.Connection used to access the database. */ public Connection getConnection(String schema, String username, String password) throws SQLException { // Create the host string, formatted as 'Internal/[hash number]/[counter]' StringBuffer buf = new StringBuffer(); buf.append("Internal/"); buf.append(hashCode()); buf.append('/'); synchronized (this) { buf.append(internal_counter); ++internal_counter; } String host_string = new String(buf); // Create the database interface for an internal database connection. DatabaseInterface db_interface = new JDBCDatabaseInterface(getDatabase(), host_string); // Create the MConnection object (very minimal cache settings for an // internal connection). MConnection connection = new MConnection("", db_interface, 8, 4092000); // Attempt to log in with the given username and password (default schema) connection.login(schema, username, password); // And return the new connection return connection; } /** * Makes a connection to the database and returns a java.sql.Connection * object that can be used to execute queries on the database. This is a * standard connection that talks directly with the database without having * to go through any communication protocol layers. *

* For example, if this control is for a Mckoi database server, the * java.sql.Connection returned here does not go through the TCP/IP * connection. For this reason certain database configuration constraints * (such as number of concurrent connection on the database) may not apply * to this connection. *

* The java.sql.Connection returned here acts exactly as an object returned * by a java.sql.MDriver object. *

* An SQLException is thrown if the login fails. * * @param username the user to login to the database under. * @param password the password of the user. * @throws SQLException if authentication of the user fails. * @return a JDBC java.sql.Connection used to access the database. */ public Connection getConnection(String username, String password) throws SQLException { return getConnection(null, username, password); } // ---------- Global methods ---------- /** * Sets a flag that causes the database to delete itself from the file system * when it is shut down. This is useful if an application needs a * temporary database to work with that is released from the file system * when the application ends. *

* By default, a database is not deleted from the file system when it is * closed. *

* NOTE: Use with care - setting this flag will cause all data stored * in the database to be lost when the database is shut down. */ public final void setDeleteOnClose(boolean status) { database.setDeleteOnShutdown(status); } /** * Closes this database system so it is no longer able to process queries. * A database may be shut down either through this method or by executing a * query that shuts the system down (for example, 'SHUTDOWN'). *

* When a database system is closed, it is not able to be restarted again * unless a new DBSystem object is obtained from the DBController. *

* This method also disposes all resources associated with the * database system (such as threads, etc) so that it may be reclaimed by * the garbage collector. *

* When this method returns this object is no longer usable. */ public void close() { if (database != null) { database.startShutDownThread(); database.waitUntilShutdown(); } } // ---------- Private methods ---------- /** * Disposes of all the resources associated with this system. Note that * this is private method. It may only be called from the shutdown * delegate registered in the constructor. */ private void internalDispose() { if (database != null && database.isInitialized()) { // Disable commands (on worker threads) to the database system... database.setIsExecutingCommands(false); try { database.shutdown(); } catch (DatabaseException e) { database.Debug().write(Lvl.ERROR, this, "Unable to shutdown database because of exception"); database.Debug().writeException(Lvl.ERROR, e); } } controller = null; config = null; database = null; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/DefaultDBConfig.java000066400000000000000000000166151330501023400271710ustar00rootroot00000000000000/** * com.mckoi.database.control.DefaultDBConfig 29 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.control; import java.io.File; import java.io.InputStream; import java.io.BufferedInputStream; import java.io.IOException; import java.io.FileInputStream; import java.net.URL; import java.net.URLConnection; import java.util.Hashtable; import java.util.Properties; import java.util.Enumeration; /** * Implements a default database configuration that is useful for setting up * a database. This configuration object is mutable. Configuration properties * can be set by calling the 'setxxx' methods. * * @author Tobias Downer */ public class DefaultDBConfig extends AbstractDBConfig { /** * Constructs the configuration. * * @param the current path of the configuration in the file system. This is * useful if the configuration is based on a file with relative paths set * in it. */ public DefaultDBConfig(File current_path) { super(current_path); } /** * Constructs the configuration with the current system path as the * configuration path. */ public DefaultDBConfig() { this(new File(".")); } /** * Gets the default value for the given property value. */ protected String getDefaultValue(String property_key) { ConfigProperty property = (ConfigProperty) CONFIG_DEFAULTS.get(property_key); if (property == null) { return null; } else { return property.getDefaultValue(); } } /** * Overwrites the configuration key with the given value. */ public void setValue(String property_key, String value) { super.setValue(property_key, value); } /** * Loads all the configuration values from the given InputStream. The * input stream must be formatted in a standard properties format. */ public void loadFromStream(InputStream input) throws IOException { Properties config = new Properties(); config.load(new BufferedInputStream(input)); // For each property in the file Enumeration en = config.propertyNames(); while (en.hasMoreElements()) { // Set the property value in this configuration. String property_key = (String) en.nextElement(); setValue(property_key, config.getProperty(property_key)); } } /** * Loads all the configuration settings from a configuration file. Useful if * you want to load a default configuration from a 'db.conf' file. The * file must be formatted in a standard properties format. */ public void loadFromFile(File configuration_file) throws IOException { FileInputStream file_in = new FileInputStream(configuration_file); loadFromStream(file_in); file_in.close(); } /** * Loads all the configuration values from the given URL. The file must be * formatted in a standard properties format. */ public void loadFromURL(URL configuration_url) throws IOException { InputStream url_in = configuration_url.openConnection().getInputStream(); loadFromStream(url_in); url_in.close(); } // ---------- Variable helper setters ---------- /** * Sets the path of the database. */ public void setDatabasePath(String path) { setValue("database_path", path); } /** * Sets the path of the log. */ public void setLogPath(String path) { setValue("log_path", path); } /** * Sets that the engine ignores case for identifiers. */ public void setIgnoreIdentifierCase(boolean status) { setValue("ignore_case_for_identifiers", status ? "enabled" : "disabled"); } /** * Sets that the database is read only. */ public void setReadOnly(boolean status) { setValue("read_only", status ? "enabled" : "disabled"); } /** * Sets the minimum debug level for output to the debug log file. */ public void setMinimumDebugLevel(int debug_level) { setValue("debug_level", "" + debug_level); } // ---------- Statics ---------- /** * A Hashtable of default configuration values. This maps from property_key * to ConfigProperty object that describes the property. */ private static Hashtable CONFIG_DEFAULTS = new Hashtable(); /** * Adds a default property to the CONFIG_DEFAULTS map. */ private static void addDefProperty(ConfigProperty property) { CONFIG_DEFAULTS.put(property.getKey(), property); } /** * Sets up the CONFIG_DEFAULTS map with default configuration values. */ static { addDefProperty(new ConfigProperty("database_path", "./data", "PATH")); // addDefProperty(new ConfigProperty("log_path", "./log", "PATH")); addDefProperty(new ConfigProperty("root_path", "jvm", "STRING")); addDefProperty(new ConfigProperty("jdbc_server_port", "9157", "STRING")); addDefProperty(new ConfigProperty( "ignore_case_for_identifiers", "disabled", "BOOLEAN")); addDefProperty(new ConfigProperty( "regex_library", "gnu.regexp", "STRING")); addDefProperty(new ConfigProperty("data_cache_size", "4194304", "INT")); addDefProperty(new ConfigProperty( "max_cache_entry_size", "8192", "INT")); addDefProperty(new ConfigProperty( "lookup_comparison_list", "enabled", "BOOLEAN")); addDefProperty(new ConfigProperty("maximum_worker_threads", "4", "INT")); addDefProperty(new ConfigProperty( "dont_synch_filesystem", "disabled", "BOOLEAN")); addDefProperty(new ConfigProperty( "transaction_error_on_dirty_select", "enabled", "BOOLEAN")); addDefProperty(new ConfigProperty("read_only", "disabled", "BOOLEAN")); addDefProperty(new ConfigProperty( "debug_log_file", "debug.log", "FILE")); addDefProperty(new ConfigProperty("debug_level", "20", "INT")); addDefProperty(new ConfigProperty( "table_lock_check", "enabled", "BOOLEAN")); } // ---------- Inner classes ---------- /** * An object the describes a single configuration property and the default * value for it. */ private static class ConfigProperty { private String key; private String default_value; private String type; private String comment; ConfigProperty(String key, String default_value, String type, String comment) { this.key = key; this.default_value = default_value; this.type = type; this.comment = comment; } ConfigProperty(String key, String default_value, String type) { this(key, default_value, type, null); } String getKey() { return key; } String getDefaultValue() { return default_value; } String getType() { return type; } String getComment() { return comment; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/control/TCPJDBCServer.java000066400000000000000000000133621330501023400265050ustar00rootroot00000000000000/** * com.mckoi.database.control.TCPJDBCServer 27 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.control; import java.net.InetAddress; import java.net.UnknownHostException; import com.mckoi.database.jdbcserver.TCPServer; /** * Attaches to a DBSystem, and binds a TCP port and serves queries for JDBC * connections. This object is used to programmatically create a TCP JDBC * server on the local machine. *

* Note that multiple servers can be constructed to serve the same DBSystem. * You can not use this object to connect a single TCP server to multiple * DBSystem objects. *

* If the underlying database is shut down then this server is also shut down. * * @author Tobias Downer */ public class TCPJDBCServer { /** * The default TCP port for Mckoi SQL Database. */ private final static int DEFAULT_TCP_PORT = 9157; /** * The DBSystem object that we are serving. */ private DBSystem system; /** * An InetAddress representing the interface that server is bound to - useful * for multi-homed machines. null means we bind to all interfaces. */ private InetAddress bind_address; /** * The TCP port that this server is bound to. */ private int tcp_port; /** * The TCPServer object that is managing the connections to this database. */ private TCPServer server; /** * Constructs the TCP JDBC with the given DBSystem object, and sets the * inet address and TCP port that we serve the database from. *

* Constructing this server does not open the port to receive connections * from outside. To start the JDBC server you need to call the 'start' * method. */ public TCPJDBCServer(DBSystem system, InetAddress bind_address, int tcp_port) { this.system = system; this.bind_address = bind_address; this.tcp_port = tcp_port; registerShutdownDelegate(); } /** * Constructs the TCP JDBC with the given DBSystem object, and sets the * TCP port that we serve the database from. This binds the server to all * interfaces on the local machine. *

* Constructing this server does not open the port to receive connections * from outside. To start the JDBC server you need to call the 'start' * method. */ public TCPJDBCServer(DBSystem system, int tcp_port) { this(system, null, tcp_port); } /** * Constructs the TCP JDBC with the given DBSystem object, and sets the * TCP port and address (for multi-homed computers) to the setting of the * configuration in 'system'. *

* Constructing this server does not open the port to receive connections * from outside. To start the JDBC server you need to call the 'start' * method. */ public TCPJDBCServer(DBSystem system) { this.system = system; DBConfig config = system.getConfig(); int jdbc_port = DEFAULT_TCP_PORT; InetAddress interface_address = null; // Read the JDBC config properties. String jdbc_port_str = config.getValue("jdbc_server_port"); String interface_addr_str = config.getValue("jdbc_server_address"); if (jdbc_port_str != null) { try { jdbc_port = Integer.parseInt(jdbc_port_str); } catch (Exception e) { throw new RuntimeException("Unable to parse 'jdbc_server_port'"); } } if (interface_addr_str != null) { try { interface_address = InetAddress.getByName(interface_addr_str); } catch (UnknownHostException e) { throw new RuntimeException("Unknown host: " + e.getMessage()); } } // Set up this port and bind address this.tcp_port = jdbc_port; this.bind_address = interface_address; registerShutdownDelegate(); } /** * Registers the delegate that closes this server when the database * shuts down. */ private void registerShutdownDelegate() { system.getDatabase().registerShutDownDelegate(new Runnable() { public void run() { if (server != null) { stop(); } } }); } /** * Starts the server and binds it to the given port. This method will start * a new thread that listens for incoming connections. */ public synchronized void start() { if (server == null) { server = new TCPServer(system.getDatabase()); server.start(bind_address, tcp_port, "multi_threaded"); } else { throw new RuntimeException( "'start' method called when a server was already started."); } } /** * Stops the server running on the given port. This method will stop any * threads that are listening for incoming connections. *

* Note that this does NOT close the underlying DBSystem object. The * DBSystem object must be closed separately. */ public synchronized void stop() { if (server != null) { server.close(); server = null; } else { throw new RuntimeException( "'stop' method called when no server was started."); } } /** * Returns a string that contains some information about the server that * is running. */ public String toString() { return server.toString(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/000077500000000000000000000000001330501023400231555ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/BlobAccessor.java000066400000000000000000000024331330501023400263630ustar00rootroot00000000000000/** * com.mckoi.database.global.BlobAccessor 20 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * An interface that provides access to basic information about a BLOB so that * we may compare BLOBs implemented in different ways. * * @author Tobias Downer */ public interface BlobAccessor { /** * Returns the size of the BLOB. */ int length(); /** * Returns an InputStream that allows us to read the contents of the blob * from start to finish. This object should be wrapped in a * BufferedInputStream if 'read()' type efficiency is required. */ java.io.InputStream getInputStream(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/BlobRef.java000066400000000000000000000021401330501023400253300ustar00rootroot00000000000000/** * com.mckoi.database.global.BlobRef 20 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.IOException; /** * A lightweight interface that is a reference to a blob in a BlobStore. This * interface allows for data to be read and written to a blob. Writing to a * blob may be restricted depending on the state setting of the blob. * * @author Tobias Downer */ public interface BlobRef extends BlobAccessor, Ref { } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/ByteLongObject.java000066400000000000000000000070251330501023400266760ustar00rootroot00000000000000/** * com.mckoi.database.global.ByteLongObject 24 Sep 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.IOException; import java.io.InputStream; /** * A byte array that can be transferred between the client and server. This * is used for transferring BLOB data to/from the database engine. * * @author Tobias Downer */ public class ByteLongObject implements java.io.Serializable, BlobAccessor { static final long serialVersionUID = -6843780673892019530L; /** * The binary data. */ private byte[] data; /** * Constructor. */ public ByteLongObject(byte[] from, int offset, int length) { data = new byte[(int) length]; System.arraycopy(from, (int) offset, data, 0, (int) length); } public ByteLongObject(byte[] from) { this(from, 0, from.length); } public ByteLongObject(InputStream in, int length) throws IOException { data = new byte[length]; int i = 0; while (i < length) { int read = in.read(data, i, length - i); if (read == -1) { throw new IOException("Premature end of stream."); } i += read; } } /** * Returns the size of the data in this object. */ public int length() { return data.length; } /** * Returns the byte at offset 'n' into the binary object. */ public byte getByte(int n) { return data[n]; } /** * Returns the internal byte[] of this binary object. Care needs to be * taken when handling this object because altering the contents will * change this object. */ public byte[] getByteArray() { return data; } /** * Returns an InputStream that allows us to read the entire byte long object. */ public InputStream getInputStream() { return new BLOBInputStream(); } public String toString() { StringBuffer buf = new StringBuffer(); if (data == null) { buf.append("[ BLOB (NULL) ]"); } else { buf.append("[ BLOB size="); buf.append(data.length); buf.append(" ]"); } return new String(buf); } /** * Inner class that encapsulates the byte long object in an input stream. */ private class BLOBInputStream extends InputStream { private int index; public BLOBInputStream() { index = 0; } public int read() throws IOException { if (index >= length()) { return -1; } int b = ((int) getByte(index)) & 0x0FF; ++index; return b; } public int read(byte[] buf, int off, int len) throws IOException { // As per the InputStream specification. if (len == 0) { return 0; } int size = length(); int to_read = Math.min(len, size - index); if (to_read <= 0) { // Nothing can be read return -1; } System.arraycopy(data, index, buf, off, to_read); index += to_read; return to_read; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/CastHelper.java000066400000000000000000000430361330501023400260600ustar00rootroot00000000000000/** * com.mckoi.database.global.CastHelper 11 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.text.SimpleDateFormat; import java.text.ParseException; import java.text.DateFormat; import com.mckoi.util.BigNumber; import java.util.Date; /** * Various utility methods for helping to cast a Java object to a type that * is conformant to an SQL type. * * @author Tobias Downer */ public class CastHelper { /** * A couple of standard BigNumber statics. */ private static BigNumber BD_ZERO = BigNumber.fromLong(0); private static BigNumber BD_ONE = BigNumber.fromLong(1); /** * Date, Time and Timestamp parser/formatters */ private static DateFormat[] date_format_sql; private static DateFormat[] time_format_sql; private static DateFormat[] ts_format_sql; static { // The SQL time/date formatters date_format_sql = new DateFormat[1]; date_format_sql[0] = new SimpleDateFormat("yyyy-MM-dd"); time_format_sql = new DateFormat[4]; time_format_sql[0] = new SimpleDateFormat("HH:mm:ss.S z"); time_format_sql[1] = new SimpleDateFormat("HH:mm:ss.S"); time_format_sql[2] = new SimpleDateFormat("HH:mm:ss z"); time_format_sql[3] = new SimpleDateFormat("HH:mm:ss"); ts_format_sql = new DateFormat[4]; ts_format_sql[0] = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S z"); ts_format_sql[1] = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S"); ts_format_sql[2] = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z"); ts_format_sql[3] = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); } /** * Converts the given object to an SQL JAVA_OBJECT type by serializing * the object. */ private static Object toJavaObject(Object ob) { try { return ObjectTranslator.serialize(ob); } catch (Throwable e) { throw new Error("Can't serialize object " + ob.getClass()); } } /** * Formats the date object as a standard SQL string. */ private static String formatDateAsString(Date d) { synchronized (ts_format_sql) { // ISSUE: We have to assume the date is a time stamp because we don't // know if the date object represents an SQL DATE, TIMESTAMP or TIME. return ts_format_sql[1].format(d); } } /** * Returns the given string padded or truncated to the given size. If size * is -1 then the size doesn't matter. */ private static String paddedString(String str, int size) { if (size == -1) { return str; } int dif = size - str.length(); if (dif > 0) { StringBuffer buf = new StringBuffer(str); for (int n = 0; n < dif; ++n) { buf.append(' '); } return new String(buf); } else if (dif < 0) { return str.substring(0, size); } return str; } /** * Returns the given long value as a date object. */ private static Date toDate(long time) { return new Date(time); } /** * Converts the given string to a BigNumber. Returns 0 if the cast fails. */ private static BigNumber toBigNumber(String str) { try { return BigNumber.fromString(str); } catch (Throwable e) { return BD_ZERO; } } /** * Helper that generates an appropriate error message for a date format error. */ private static String dateErrorString(String msg, DateFormat[] df) { String pattern = ""; if (df[0] instanceof SimpleDateFormat) { SimpleDateFormat sdf = (SimpleDateFormat) df[0]; pattern = "(" + sdf.toPattern() + ")"; } return msg + pattern; } /** * Parses a String as an SQL date. */ public static Date toDate(String str) { synchronized(date_format_sql) { for (int i = 0; i < date_format_sql.length; ++i) { try { return date_format_sql[i].parse(str); } catch (ParseException e) {} } throw new RuntimeException( dateErrorString("Unable to parse string as a date ", date_format_sql)); } } /** * Parses a String as an SQL time. */ public static Date toTime(String str) { synchronized(time_format_sql) { for (int i = 0; i < time_format_sql.length; ++i) { try { return time_format_sql[i].parse(str); } catch (ParseException e) {} } throw new RuntimeException( dateErrorString("Unable to parse string as a time ", time_format_sql)); } } /** * Parses a String as an SQL timestamp. */ public static Date toTimeStamp(String str) { synchronized(ts_format_sql) { for (int i = 0; i < ts_format_sql.length; ++i) { try { return ts_format_sql[i].parse(str); } catch (ParseException e) {} } throw new RuntimeException( dateErrorString("Unable to parse string as a timestamp ", ts_format_sql)); } } /** * Casts a Java object to the SQL type specified by the given * DataTableColumnDef object. This is used for the following engine * functions; *

    *
  1. To prepare a value for insertion into the data store. For example, * the table column may be STRING but the value here is a BigNumber. *
  2. To cast an object to a specific type in an SQL function such as * CAST. *
* Given any supported object, this will return the internal database * representation of the object as either NullObject, BigNumber, String, * Date, Boolean or ByteLongObject. * * @param ob the Object to cast to the given type * @param sql_type the enumerated sql type, eg. SQLTypes.LONGVARCHAR * @param sql_size the size of the type. For example, CHAR(20) * @param sql_scale the scale of the numerical type. * @param sql_type_string 'sql_type' as a human understandable string, * eg. "LONGVARCHAR" */ public static Object castObjectToSQLType(Object ob, int sql_type, int sql_size, int sql_scale, String sql_type_string) { // if (ob == null) { // ob = NullObject.NULL_OBJ; // } // int sql_type = col_def.getSQLType(); // int sql_size = col_def.getSize(); // int sql_scale = col_def.getScale(); // String sql_type_string = col_def.getSQLTypeString(); // If the input object is a ByteLongObject and the output type is not a // binary SQL type then we need to attempt to deserialize the object. if (ob instanceof ByteLongObject) { if ( sql_type != SQLTypes.JAVA_OBJECT && sql_type != SQLTypes.BLOB && sql_type != SQLTypes.BINARY && sql_type != SQLTypes.VARBINARY && sql_type != SQLTypes.LONGVARBINARY ) { // Attempt to deserialize it try { ob = ObjectTranslator.deserialize((ByteLongObject) ob); } catch (Throwable e) { // Couldn't deserialize so it must be a standard blob which means // we are in error. throw new Error("Can't cast a BLOB to " + sql_type_string); } } else { // This is a ByteLongObject that is being cast to a binary type so // no further processing is necessary. return ob; } } // BlobRef can be BINARY, JAVA_OBJECT, VARBINARY or LONGVARBINARY if (ob instanceof BlobRef) { if (sql_type == SQLTypes.BINARY || sql_type == SQLTypes.BLOB || sql_type == SQLTypes.JAVA_OBJECT || sql_type == SQLTypes.VARBINARY || sql_type == SQLTypes.LONGVARBINARY) { return ob; } } // ClobRef can be VARCHAR, LONGVARCHAR, or CLOB if (ob instanceof ClobRef) { if (sql_type == SQLTypes.VARCHAR || sql_type == SQLTypes.LONGVARCHAR || sql_type == SQLTypes.CLOB) { return ob; } } // Cast from NULL if (ob == null) { switch (sql_type) { case(SQLTypes.BIT): // fall through case(SQLTypes.TINYINT): // fall through case(SQLTypes.SMALLINT): // fall through case(SQLTypes.INTEGER): // fall through case(SQLTypes.BIGINT): // fall through case(SQLTypes.FLOAT): // fall through case(SQLTypes.REAL): // fall through case(SQLTypes.DOUBLE): // fall through case(SQLTypes.NUMERIC): // fall through case(SQLTypes.DECIMAL): // fall through case(SQLTypes.CHAR): // fall through case(SQLTypes.VARCHAR): // fall through case(SQLTypes.LONGVARCHAR): // fall through case(SQLTypes.CLOB): // fall through case(SQLTypes.DATE): // fall through case(SQLTypes.TIME): // fall through case(SQLTypes.TIMESTAMP): // fall through case(SQLTypes.NULL): // fall through case(SQLTypes.BINARY): // fall through case(SQLTypes.VARBINARY): // fall through case(SQLTypes.LONGVARBINARY): // fall through case(SQLTypes.BLOB): // fall through case(SQLTypes.JAVA_OBJECT): // fall through case(SQLTypes.BOOLEAN): return null; default: throw new Error("Can't cast NULL to " + sql_type_string); } } // Cast from a number if (ob instanceof Number) { Number n = (Number) ob; switch (sql_type) { case(SQLTypes.BIT): return n.intValue() == 0 ? Boolean.FALSE : Boolean.TRUE; case(SQLTypes.TINYINT): // fall through case(SQLTypes.SMALLINT): // fall through case(SQLTypes.INTEGER): // return new BigDecimal(n.intValue()); return BigNumber.fromLong(n.intValue()); case(SQLTypes.BIGINT): // return new BigDecimal(n.longValue()); return BigNumber.fromLong(n.longValue()); case(SQLTypes.FLOAT): return BigNumber.fromString(Double.toString(n.doubleValue())); case(SQLTypes.REAL): return BigNumber.fromString(n.toString()); case(SQLTypes.DOUBLE): return BigNumber.fromString(Double.toString(n.doubleValue())); case(SQLTypes.NUMERIC): // fall through case(SQLTypes.DECIMAL): return BigNumber.fromString(n.toString()); case(SQLTypes.CHAR): return StringObject.fromString(paddedString(n.toString(), sql_size)); case(SQLTypes.VARCHAR): return StringObject.fromString(n.toString()); case(SQLTypes.LONGVARCHAR): return StringObject.fromString(n.toString()); case(SQLTypes.DATE): return toDate(n.longValue()); case(SQLTypes.TIME): return toDate(n.longValue()); case(SQLTypes.TIMESTAMP): return toDate(n.longValue()); case(SQLTypes.BLOB): // fall through case(SQLTypes.BINARY): // fall through case(SQLTypes.VARBINARY): // fall through case(SQLTypes.LONGVARBINARY): return new ByteLongObject(n.toString().getBytes()); case(SQLTypes.NULL): return null; case(SQLTypes.JAVA_OBJECT): return toJavaObject(ob); case(SQLTypes.BOOLEAN): return n.intValue() == 0 ? Boolean.FALSE : Boolean.TRUE; default: throw new Error("Can't cast number to " + sql_type_string); } } // if (ob instanceof Number) // Cast from a string if (ob instanceof StringObject || ob instanceof String) { String str = ob.toString(); switch (sql_type) { case(SQLTypes.BIT): return str.equalsIgnoreCase("true") ? Boolean.TRUE : Boolean.FALSE; case(SQLTypes.TINYINT): // fall through case(SQLTypes.SMALLINT): // fall through case(SQLTypes.INTEGER): // return new BigDecimal(toBigDecimal(str).intValue()); return BigNumber.fromLong(toBigNumber(str).intValue()); case(SQLTypes.BIGINT): // return new BigDecimal(toBigDecimal(str).longValue()); return BigNumber.fromLong(toBigNumber(str).longValue()); case(SQLTypes.FLOAT): return BigNumber.fromString( Double.toString(toBigNumber(str).doubleValue())); case(SQLTypes.REAL): return toBigNumber(str); case(SQLTypes.DOUBLE): return BigNumber.fromString( Double.toString(toBigNumber(str).doubleValue())); case(SQLTypes.NUMERIC): // fall through case(SQLTypes.DECIMAL): return toBigNumber(str); case(SQLTypes.CHAR): return StringObject.fromString(paddedString(str, sql_size)); case(SQLTypes.VARCHAR): return StringObject.fromString(str); case(SQLTypes.LONGVARCHAR): return StringObject.fromString(str); case(SQLTypes.DATE): return toDate(str); case(SQLTypes.TIME): return toTime(str); case(SQLTypes.TIMESTAMP): return toTimeStamp(str); case(SQLTypes.BLOB): // fall through case(SQLTypes.BINARY): // fall through case(SQLTypes.VARBINARY): // fall through case(SQLTypes.LONGVARBINARY): return new ByteLongObject(str.getBytes()); case(SQLTypes.NULL): return null; case(SQLTypes.JAVA_OBJECT): return toJavaObject(str); case(SQLTypes.BOOLEAN): return str.equalsIgnoreCase("true") ? Boolean.TRUE : Boolean.FALSE; case(SQLTypes.CLOB): return StringObject.fromString(str); default: throw new Error("Can't cast string to " + sql_type_string); } } // if (ob instanceof String) // Cast from a boolean if (ob instanceof Boolean) { Boolean b = (Boolean) ob; switch (sql_type) { case(SQLTypes.BIT): return b; case(SQLTypes.TINYINT): // fall through case(SQLTypes.SMALLINT): // fall through case(SQLTypes.INTEGER): // fall through case(SQLTypes.BIGINT): // fall through case(SQLTypes.FLOAT): // fall through case(SQLTypes.REAL): // fall through case(SQLTypes.DOUBLE): // fall through case(SQLTypes.NUMERIC): // fall through case(SQLTypes.DECIMAL): return b.equals(Boolean.TRUE) ? BD_ONE : BD_ZERO; case(SQLTypes.CHAR): return StringObject.fromString(paddedString(b.toString(), sql_size)); case(SQLTypes.VARCHAR): return StringObject.fromString(b.toString()); case(SQLTypes.LONGVARCHAR): return StringObject.fromString(b.toString()); case(SQLTypes.NULL): return null; case(SQLTypes.JAVA_OBJECT): return toJavaObject(ob); case(SQLTypes.BOOLEAN): return b; default: throw new Error("Can't cast boolean to " + sql_type_string); } } // if (ob instanceof Boolean) // Cast from a date if (ob instanceof Date) { Date d = (Date) ob; switch (sql_type) { case(SQLTypes.TINYINT): // fall through case(SQLTypes.SMALLINT): // fall through case(SQLTypes.INTEGER): // fall through case(SQLTypes.BIGINT): // fall through case(SQLTypes.FLOAT): // fall through case(SQLTypes.REAL): // fall through case(SQLTypes.DOUBLE): // fall through case(SQLTypes.NUMERIC): // fall through case(SQLTypes.DECIMAL): return BigNumber.fromLong(d.getTime()); case(SQLTypes.CHAR): return StringObject.fromString(paddedString(formatDateAsString(d), sql_size)); case(SQLTypes.VARCHAR): return StringObject.fromString(formatDateAsString(d)); case(SQLTypes.LONGVARCHAR): return StringObject.fromString(formatDateAsString(d)); case(SQLTypes.DATE): return d; case(SQLTypes.TIME): return d; case(SQLTypes.TIMESTAMP): return d; case(SQLTypes.NULL): return null; case(SQLTypes.JAVA_OBJECT): return toJavaObject(ob); default: throw new Error("Can't cast date to " + sql_type_string); } } // if (ob instanceof Date) // Some obscure types if (ob instanceof byte[]) { switch (sql_type) { case(SQLTypes.BLOB): // fall through case(SQLTypes.BINARY): // fall through case(SQLTypes.VARBINARY): // fall through case(SQLTypes.LONGVARBINARY): return new ByteLongObject((byte[]) ob); default: throw new Error("Can't cast byte[] to " + sql_type_string); } } // Finally, the object can only be something that we can cast to a // JAVA_OBJECT. if (sql_type == SQLTypes.JAVA_OBJECT) { return toJavaObject(ob); } throw new RuntimeException("Can't cast object " + ob.getClass() + " to " + sql_type_string); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/ClobRef.java000066400000000000000000000016411330501023400253360ustar00rootroot00000000000000/** * com.mckoi.database.global.ClobRef 30 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * A reference to a large character object in the database. * * @author Tobias Downer */ public interface ClobRef extends StringAccessor, Ref { } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/ColumnDescription.java000066400000000000000000000273551330501023400274750ustar00rootroot00000000000000/** * com.mckoi.database.global.ColumnDescription 19 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.*; import java.util.Date; import java.math.BigDecimal; /** * This is a description of a column and the data it stores. Specifically it * stores the 'type' as defined in the Types class, the 'size' if the column * cells may be different lengths (eg, string), the name of the column, whether * the column set must contain unique elements, and whether a cell may be added * that is null. * * @author Tobias Downer */ public class ColumnDescription { // static final long serialVersionUID = 8210197301596138014L; /** * The name of the field. */ private String name; /** * The type of the field, from the Types object. */ private int type; /** * The size of the type. The meaning of this field changes depending on the * type. For example, the size of an SQL NUMERIC represents the number of * digits in the value (precision). */ private int size; /** * The scale of a numerical value. This represents the number of digits to * the right of the decimal point. The number is rounded to this scale * in arithmatic operations. By default, the scale is '10' */ private int scale = -1; /** * The SQL type as defined in java.sql.Types. This is required to emulate * the various SQL types. The value is initialised to -9332 to indicate * the sql type has not be defined. */ private int sql_type = -9332; /** * If true, the field may not be null. If false, the column may contain * no information. This is enforced at the parse stage when adding or * altering a table. */ private boolean not_null; /** * If true, the field may only contain unique values. This is enforced at * the parse stage when adding or altering a table. */ private boolean unique; /** * This represents the 'unique_group' that this column is in. If two * columns in a table belong to the same unique_group, then the specific * combination of the groups columns can not exist more than once in the * table. * A value of -1 means the column does not belong to any unique group. */ private int unique_group; /** * The Constructors if the type does require a size. */ public ColumnDescription(String name, int type, int size, boolean not_null) { this.name = name; this.type = type; this.size = size; this.not_null = not_null; this.unique = false; this.unique_group = -1; } public ColumnDescription(String name, int type, boolean not_null) { this(name, type, -1, not_null); } public ColumnDescription(ColumnDescription cd) { this(cd.getName(), cd.getType(), cd.getSize(), cd.isNotNull()); if (cd.isUnique()) { setUnique(); } setUniqueGroup(cd.getUniqueGroup()); setScale(cd.getScale()); setSQLType(cd.getSQLType()); } public ColumnDescription(String name, ColumnDescription cd) { this(name, cd.getType(), cd.getSize(), cd.isNotNull()); if (cd.isUnique()) { setUnique(); } setUniqueGroup(cd.getUniqueGroup()); setScale(cd.getScale()); setSQLType(cd.getSQLType()); } /** * Sets this column to unique. * NOTE: This can only happen during the setup of the object. Unpredictable * results will occur otherwise. */ public void setUnique() { this.unique = true; } /** * Sets the column to belong to the specified unique group in the table. * Setting to -1 sets the column to no unique group. * NOTE: This can only happen during the setup of the object. Unpredictable * results will occur otherwise. */ public void setUniqueGroup(int group) { this.unique_group = group; } /** * Sets the SQL type for this ColumnDescription object. This is only used * to emulate SQL types in the database. They are mapped to the simpler * internal types as follows:

*

   *    DB_STRING := CHAR, VARCHAR, LONGVARCHAR
   *   DB_NUMERIC := TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, REAL,
   *                 DOUBLE, NUMERIC, DECIMAL
   *      DB_DATE := DATE, TIME, TIMESTAMP
   *   DB_BOOLEAN := BIT
   *      DB_BLOB := BINARY, VARBINARY, LONGVARBINARY
   *    DB_OBJECT := JAVA_OBJECT
   * 
*/ public void setSQLType(int sql_type) { this.sql_type = sql_type; } /** * Sets the scale of the numerical values stored. */ public void setScale(int scale) { this.scale = scale; } /** * Returns the name of the field. The field type returned should be * 'ZIP' or 'Address1'. To resolve to the tables type, we must append * an additional 'Company.' or 'Customer.' string to the front. */ public String getName() { return name; } /** * Returns an integer representing the type of the field. The types are * outlined in com.mckoi.database.global.Types. */ public int getType() { return type; } /** * Returns true if this column is a numeric type. */ public boolean isNumericType() { return (type == Types.DB_NUMERIC); } /** * Returns a value from java.sql.Type that is the SQL type defined for this * column. It's possible that the column may not have had the SQL type * set in which case we map from the internal db type ( DB_??? ) to the * most logical sql type. */ public int getSQLType() { if (sql_type == -9332) { // If sql type is unknown find from internal type if (type == Types.DB_NUMERIC) { return SQLTypes.NUMERIC; } else if (type == Types.DB_STRING) { return SQLTypes.LONGVARCHAR; } else if (type == Types.DB_BOOLEAN) { return SQLTypes.BIT; } else if (type == Types.DB_TIME) { return SQLTypes.TIMESTAMP; } else if (type == Types.DB_BLOB) { return SQLTypes.LONGVARBINARY; } else if (type == Types.DB_OBJECT) { return SQLTypes.JAVA_OBJECT; } else { throw new Error("Unrecognised internal type."); } } return sql_type; } /** * Returns the name (as a string) of the SQL type or null if the type is * not understood. */ public String getSQLTypeName() { int type = getSQLType(); switch (type) { case(SQLTypes.BIT): return "BIT"; case(SQLTypes.TINYINT): return "TINYINT"; case(SQLTypes.SMALLINT): return "SMALLINT"; case(SQLTypes.INTEGER): return "INTEGER"; case(SQLTypes.BIGINT): return "BIGINT"; case(SQLTypes.FLOAT): return "FLOAT"; case(SQLTypes.REAL): return "REAL"; case(SQLTypes.DOUBLE): return "DOUBLE"; case(SQLTypes.NUMERIC): return "NUMERIC"; case(SQLTypes.DECIMAL): return "DECIMAL"; case(SQLTypes.CHAR): return "CHAR"; case(SQLTypes.VARCHAR): return "VARCHAR"; case(SQLTypes.LONGVARCHAR): return "LONGVARCHAR"; case(SQLTypes.DATE): return "DATE"; case(SQLTypes.TIME): return "TIME"; case(SQLTypes.TIMESTAMP): return "TIMESTAMP"; case(SQLTypes.BINARY): return "BINARY"; case(SQLTypes.VARBINARY): return "VARBINARY"; case(SQLTypes.LONGVARBINARY): return "LONGVARBINARY"; case(SQLTypes.NULL): return "NULL"; case(SQLTypes.OTHER): return "OTHER"; case(SQLTypes.JAVA_OBJECT): return "JAVA_OBJECT"; case(SQLTypes.DISTINCT): return "DISTINCT"; case(SQLTypes.STRUCT): return "STRUCT"; case(SQLTypes.ARRAY): return "ARRAY"; case(SQLTypes.BLOB): return "BLOB"; case(SQLTypes.CLOB): return "CLOB"; case(SQLTypes.REF): return "REF"; case(SQLTypes.BOOLEAN): return "BOOLEAN"; default: return null; } } /** * Returns the class of Java object for this field. */ public Class classType() { return TypeUtil.toClass(type); // if (type == Types.DB_STRING) { // return String.class; // } // else if (type == Types.DB_NUMERIC) { // return BigDecimal.class; // } // else if (type == Types.DB_TIME) { // return Date.class; // } // else if (type == Types.DB_BOOLEAN) { // return Boolean.class; // } // else { // throw new Error("Unknown type."); // } } /** * Returns the size of the given field. This is only applicable to a few * of the types, ie VARCHAR. */ public int getSize() { return size; } /** * If this is a number, returns the scale of the field. */ public int getScale() { return scale; } /** * Determines whether the field can contain a null value or not. Returns * true if it is required for the column to contain data. */ public boolean isNotNull() { return not_null; } /** * Determines whether the field can contain two items that are identical. * Returns true if each element must be unique. */ public boolean isUnique() { return unique; } /** * Returns the unique group that this column is in. If it does not belong * to a unique group then the value -1 is returned. */ public int getUniqueGroup() { return unique_group; } /** * Returns true if the type of the field is searchable. Searchable means * that the database driver can quantify it, as in determine if a given * object of the same type is greater, equal or less. We can not quantify * BLOB types. */ public boolean isQuantifiable() { if (type == Types.DB_BLOB || type == Types.DB_OBJECT) { return false; } return true; } /** * The 'equals' method, used to determine equality between column * descriptions. */ public boolean equals(Object ob) { ColumnDescription cd = (ColumnDescription) ob; return (name.equals(cd.name) && type == cd.type && size == cd.size && not_null == cd.not_null && unique == cd.unique && unique_group == cd.unique_group); } /** * Writes this ColumnDescription to the given DataOutputStream. * ( Remember to flush output stream ) */ public void writeTo(DataOutputStream out) throws IOException { out.writeUTF(name); out.writeInt(type); out.writeInt(size); out.writeBoolean(not_null); out.writeBoolean(unique); out.writeInt(unique_group); out.writeInt(sql_type); out.writeInt(scale); } /** * Reads a ColumnDescription from the given DataInputStream and returns * a new instance of it. */ public static ColumnDescription readFrom(DataInputStream in) throws IOException { String name = in.readUTF(); int type = in.readInt(); int size = in.readInt(); boolean not_null = in.readBoolean(); boolean unique = in.readBoolean(); int unique_group = in.readInt(); ColumnDescription col_desc = new ColumnDescription(name, type, size, not_null); if (unique) col_desc.setUnique(); col_desc.setUniqueGroup(unique_group); col_desc.setSQLType(in.readInt()); col_desc.setScale(in.readInt()); return col_desc; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/NullObject.java000066400000000000000000000024661330501023400260710ustar00rootroot00000000000000/** * com.mckoi.database.global.NullObject 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * A Null Object. * * @author Tobias Downer * @deprecated do not use. Nulls are now handled via TObject and TType. This * method is only kept around for legacy with older databases. */ public class NullObject implements java.io.Serializable { static final long serialVersionUID = 8599490526855696529L; public static NullObject NULL_OBJ = new NullObject(); public int compareTo(Object ob) { if (ob == null || ob instanceof NullObject) { return 0; } return -1; } public String toString() { return "NULL"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/ObjectTransfer.java000066400000000000000000000156701330501023400267440ustar00rootroot00000000000000/** * com.mckoi.database.global.ObjectTransfer 20 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.*; import java.util.Date; import com.mckoi.util.BigNumber; import java.math.BigInteger; /** * Provides static methods for transfering different types of objects over * a Data input/output stream. * * @author Tobias Downer */ public class ObjectTransfer { /** * Makes an estimate of the size of the object. This is useful for making * a guess for how much this will take up. */ public static int size(Object ob) throws IOException { if (ob == null) { return 9; } else if (ob instanceof StringObject) { return (ob.toString().length() * 2) + 9; } else if (ob instanceof BigNumber) { return 15 + 9; } else if (ob instanceof Date) { return 8 + 9; } else if (ob instanceof Boolean) { return 2 + 9; } else if (ob instanceof ByteLongObject) { return ((ByteLongObject) ob).length() + 9; } else if (ob instanceof StreamableObject) { return 5 + 9; } else { throw new IOException("Unrecognised type: " + ob.getClass()); } } /** * Returns the exact size an object will take up when serialized. */ public static int exactSize(Object ob) throws IOException { if (ob == null) { return 1; } else if (ob instanceof StringObject) { return (ob.toString().length() * 2) + 1 + 4; } else if (ob instanceof BigNumber) { BigNumber n = (BigNumber) ob; if (n.canBeRepresentedAsInt()) { return 4 + 1; } else if (n.canBeRepresentedAsLong()) { return 8 + 1; } byte[] buf = n.toByteArray(); return buf.length + 1 + 1 + 4 + 4; } else if (ob instanceof Date) { return 8 + 1; } else if (ob instanceof Boolean) { return 1 + 1; } else if (ob instanceof ByteLongObject) { return ((ByteLongObject) ob).length() + 1 + 8; } else if (ob instanceof StreamableObject) { return 1 + 1 + 4; } else { throw new IOException("Unrecognised type: " + ob.getClass()); } } /** * Writes an object to the data output stream. */ public static void writeTo(DataOutput out, Object ob) throws IOException { if (ob == null) { out.writeByte(1); } else if (ob instanceof StringObject) { String str = ob.toString(); // All strings send as char array, out.writeByte(18); out.writeInt(str.length()); out.writeChars(str); } else if (ob instanceof BigNumber) { BigNumber n = (BigNumber) ob; if (n.canBeRepresentedAsInt()) { out.writeByte(24); out.writeInt(n.intValue()); } else if (n.canBeRepresentedAsLong()) { out.writeByte(8); out.writeLong(n.longValue()); } else { out.writeByte(7); out.writeByte(n.getState()); out.writeInt(n.getScale()); byte[] buf = n.toByteArray(); out.writeInt(buf.length); out.write(buf); } // out.writeByte(6); // // NOTE: This method is only available in 1.2. This needs to be // // compatible with 1.1 so we use a slower method, //// BigInteger unscaled_val = n.unscaledValue(); // // NOTE: This can be swapped out eventually when we can guarentee // // everything is 1.2 minimum. // BigInteger unscaled_val = n.movePointRight(n.scale()).toBigInteger(); // // byte[] buf = unscaled_val.toByteArray(); // out.writeInt(buf.length); // out.write(buf); } else if (ob instanceof Date) { Date d = (Date) ob; out.writeByte(9); out.writeLong(d.getTime()); } else if (ob instanceof Boolean) { Boolean b = (Boolean) ob; out.writeByte(12); out.writeBoolean(b.booleanValue()); } else if (ob instanceof ByteLongObject) { ByteLongObject barr = (ByteLongObject) ob; out.writeByte(15); byte[] arr = barr.getByteArray(); out.writeLong(arr.length); out.write(arr); } else if (ob instanceof StreamableObject) { StreamableObject ob_head = (StreamableObject) ob; out.writeByte(16); out.writeByte(ob_head.getType()); out.writeLong(ob_head.getSize()); out.writeLong(ob_head.getIdentifier()); } else { throw new IOException("Unrecognised type: " + ob.getClass()); } } /** * Writes an object from the data input stream. */ public static Object readFrom(DataInputStream in) throws IOException { byte type = in.readByte(); switch (type) { case(1): return null; case(3): String str = in.readUTF(); return StringObject.fromString(str); case(6): { int scale = in.readInt(); int blen = in.readInt(); byte[] buf = new byte[blen]; in.readFully(buf); return BigNumber.fromData(buf, scale, (byte) 0); } case(7): { byte state = in.readByte(); int scale = in.readInt(); int blen = in.readInt(); byte[] buf = new byte[blen]; in.readFully(buf); return BigNumber.fromData(buf, scale, state); } case(8): { // 64-bit long numeric value long val = in.readLong(); return BigNumber.fromLong(val); } case(9): long time = in.readLong(); return new Date(time); case(12): return new Boolean(in.readBoolean()); case(15): { long size = in.readLong(); byte[] arr = new byte[(int) size]; in.readFully(arr, 0, (int) size); return new ByteLongObject(arr); } case(16): { final byte h_type = in.readByte(); final long h_size = in.readLong(); final long h_id = in.readLong(); return new StreamableObject(h_type, h_size, h_id); } case(18): { // Handles strings > 64k int len = in.readInt(); StringBuffer buf = new StringBuffer(len); while (len > 0) { buf.append(in.readChar()); --len; } return StringObject.fromString(new String(buf)); } case(24): { // 32-bit int numeric value long val = (long) in.readInt(); return BigNumber.fromLong(val); } default: throw new IOException("Unrecognised type: " + type); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/ObjectTranslator.java000066400000000000000000000061121330501023400273000ustar00rootroot00000000000000/** * com.mckoi.database.global.ObjectTranslator 09 Feb 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.util.Date; import com.mckoi.util.BigNumber; import java.io.*; /** * This object compliments ObjectTransfer and provides a method to translate * any object into a type the database engine can process. * * @author Tobias Downer */ public class ObjectTranslator { /** * Translates the given object to a type the database can process. */ public static Object translate(Object ob) { if (ob == null) { return null; } else if (ob instanceof String) { return StringObject.fromString((String) ob); } else if (ob instanceof StringObject || ob instanceof BigNumber || ob instanceof Date || ob instanceof ByteLongObject || ob instanceof Boolean || ob instanceof StreamableObject) { return ob; } else if (ob instanceof byte[]) { return new ByteLongObject((byte[]) ob); } else if (ob instanceof Serializable) { return serialize(ob); } else { // System.out.println("Ob is: (" + ob.getClass() + ") " + ob); throw new Error("Unable to translate object. " + "It is not a primitive type or serializable."); } } /** * Serializes the Java object to a ByteLongObject. */ public static ByteLongObject serialize(Object ob) { try { ByteArrayOutputStream bout = new ByteArrayOutputStream(); ObjectOutputStream ob_out = new ObjectOutputStream(bout); ob_out.writeObject(ob); ob_out.close(); return new ByteLongObject(bout.toByteArray()); } catch (IOException e) { throw new Error("Serialization error: " + e.getMessage()); } } /** * Deserializes a ByteLongObject to a Java object. */ public static Object deserialize(ByteLongObject blob) { if (blob == null) { return null; } else { try { ByteArrayInputStream bin = new ByteArrayInputStream(blob.getByteArray()); ObjectInputStream ob_in = new ObjectInputStream(bin); Object ob = ob_in.readObject(); ob_in.close(); return ob; } catch (ClassNotFoundException e) { throw new Error("Class not found: " + e.getMessage()); } catch (IOException e) { throw new Error("De-serialization error: " + e.getMessage()); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/Ref.java000066400000000000000000000053671330501023400245470ustar00rootroot00000000000000/** * com.mckoi.database.global.Ref 30 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.IOException; /** * An interface that represents a reference to a object that isn't stored in * main memory. The reference to the object is made through the id value * returned by the 'getID' method. * * @author Tobias Downer */ public interface Ref { /** * An id used to reference this object in the context of the database. Note * that once a static reference is made (or removed) to/from this object, the * BlobStore should be notified of the reference. The store will remove an * large object that has no references to it. */ long getID(); /** * The type of large object that is being referenced. 2 = binary object, * 3 = ASCII character object, 4 = Unicode character object. */ byte getType(); /** * The 'raw' size of this large object in bytes when it is in its byte[] * form. This value allows us to know how many bytes we can read from this * large object when it's being transferred to the client. */ long getRawSize(); /** * Reads a part of this large object from the store into the given byte * buffer. This method should only be used when reading a large object * to transfer to the JDBC driver. It represents the byte[] representation * of the object only and is only useful for transferral of the large object. */ void read(long offset, byte[] buf, int length) throws IOException; /** * This method is used to write the contents of the large object into the * backing store. This method will only work when the large object is in * an initial 'write' phase in which the client is pushing the contents of * the large object onto the server to be stored. */ void write(long offset, byte[] buf, int length) throws IOException; /** * This method is called when the write phrase has completed, and it marks * this large object as complete. After this method is called the large * object reference is a static object that can not be changed. */ void complete() throws IOException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/SQLTypes.java000066400000000000000000000043311330501023400255050ustar00rootroot00000000000000/** * com.mckoi.database.global.SQLTypes 01 Mar 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * A JDBC independant type definition list. This allows the specification of * all JDBC 1.0 and 2.0 types without requiring the JDBC 2.0 * 'java.sql.Types' interface. *

* The values are compatible with the JDBC 1.0 and 2.0 spec. * * @author Tobias Downer */ public interface SQLTypes { public final static int BIT = -7; public final static int TINYINT = -6; public final static int SMALLINT = 5; public final static int INTEGER = 4; public final static int BIGINT = -5; public final static int FLOAT = 6; public final static int REAL = 7; public final static int DOUBLE = 8; public final static int NUMERIC = 2; public final static int DECIMAL = 3; public final static int CHAR = 1; public final static int VARCHAR = 12; public final static int LONGVARCHAR = -1; public final static int DATE = 91; public final static int TIME = 92; public final static int TIMESTAMP = 93; public final static int BINARY = -2; public final static int VARBINARY = -3; public final static int LONGVARBINARY = -4; public final static int NULL = 0; public final static int OTHER = 1111; public final static int JAVA_OBJECT = 2000; public final static int DISTINCT = 2001; public final static int STRUCT = 2002; public final static int ARRAY = 2003; public final static int BLOB = 2004; public final static int CLOB = 2005; public final static int REF = 2006; public final static int BOOLEAN = 16; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/StandardMessages.java000066400000000000000000000030301330501023400272440ustar00rootroot00000000000000/** * com.mckoi.database.global.StandardMessages 22 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * This class contains a number of standard messages that are displayed * throughout the operation of the database. They are put into a single class * to allow for easy future modification. * * @author Tobias Downer */ public final class StandardMessages { /** * The name of the author (me). */ public static String AUTHOR = "Tobias Downer"; /** * The standard copyright message. */ public static String COPYRIGHT = "Copyright (C) 2000 - 2013 Diehl and Associates, Inc. " + "All rights reserved."; /** * The global version number of the database system. */ public static String VERSION = "1.0.6"; /** * The global name of the system. */ public static String NAME = "Mckoi SQL Database ( " + VERSION + " )"; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/StreamableObject.java000066400000000000000000000045501330501023400272320ustar00rootroot00000000000000/** * com.mckoi.database.global.StreamableObject 07 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * An object that is streamable (such as a long binary object, or * a long string object). This is passed between client and server and * contains basic primitive information about the object it represents. The * actual contents of the object itself must be obtained through other * means (see com.mckoi.database.jdbc.DatabaseInterface). * * @author Tobias Downer */ public final class StreamableObject { /** * The type of the object. */ private byte type; /** * The size of the object in bytes. */ private long size; /** * The identifier that identifies this object. */ private long id; /** * Constructs the StreamableObject. */ public StreamableObject(byte type, long size, long id) { this.type = type; this.size = size; this.id = id; } /** * Returns the type of object this stub represents. Returns 1 if it * represents 2-byte unicde character object, 2 if it represents binary data. */ public byte getType() { return type; } /** * Returns the size of the object stream, or -1 if the size is unknown. If * this represents a unicode character string, you would calculate the total * characters as size / 2. */ public long getSize() { return size; } /** * Returns an identifier that can identify this object within some context. * For example, if this is a streamable object on the client side, then the * identifier might be the value that is able to retreive a section of the * streamable object from the DatabaseInterface. */ public long getIdentifier() { return id; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/StringAccessor.java000066400000000000000000000031621330501023400267530ustar00rootroot00000000000000/** * com.mckoi.database.global.StringAccessor 30 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.Reader; /** * An interface used by the engine to access and process strings. This * interface allows us to access the contents of a string that may be * implemented in several different ways. For example, a string may be * represented as a java.lang.String object in memeory, or it may be * represented as an ASCII sequence in a store. * * @author Tobias Downer */ public interface StringAccessor { /** * Returns the number of characters in the string. */ public int length(); /** * Returns a Reader that allows the string to be read sequentually from * start to finish. */ public Reader getReader(); /** * Returns this string as a java.lang.String object. Some care may be * necessary with this call because a very large string will require a lot * space on the heap. */ public String toString(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/StringObject.java000066400000000000000000000035121330501023400264160ustar00rootroot00000000000000/** * com.mckoi.database.global.StringObject 30 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; import java.io.Reader; import java.io.StringReader; /** * A concrete implementation of StringAccessor that uses a java.lang.String * object. * * @author Tobias Downer */ public class StringObject implements java.io.Serializable, StringAccessor { static final long serialVersionUID = 6066215992031250481L; /** * The java.lang.String object. */ private String str; /** * Constructs the object. */ private StringObject(String str) { this.str = str; } /** * Returns the length of the string. */ public int length() { return str.length(); } /** * Returns a Reader that can read from the string. */ public Reader getReader() { return new StringReader(str); } /** * Returns this object as a java.lang.String object (easy!) */ public String toString() { return str; } /** * Static method that returns a StringObject from the given java.lang.String. */ public static StringObject fromString(String str) { if (str != null) { return new StringObject(str); } return null; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/TypeUtil.java000066400000000000000000000040511330501023400255770ustar00rootroot00000000000000/** * com.mckoi.database.global.TypeUtil 01 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * Utility for converting to and from 'Types' objects. * * @author Tobias Downer */ public class TypeUtil { /** * Converts from a Class object to a type as specified in Types. */ public static int toDBType(Class clazz) { if (clazz == String.class) { return Types.DB_STRING; } else if (clazz == java.math.BigDecimal.class) { return Types.DB_NUMERIC; } else if (clazz == java.util.Date.class) { return Types.DB_TIME; } else if (clazz == Boolean.class) { return Types.DB_BOOLEAN; } else if (clazz == ByteLongObject.class) { return Types.DB_BLOB; } else { return Types.DB_OBJECT; } } /** * Converts from a db type to a Class object. */ public static Class toClass(int type) { if (type == Types.DB_STRING) { return String.class; } else if (type == Types.DB_NUMERIC) { return java.math.BigDecimal.class; } else if (type == Types.DB_TIME) { return java.util.Date.class; } else if (type == Types.DB_BOOLEAN) { return Boolean.class; } else if (type == Types.DB_BLOB) { return ByteLongObject.class; } else if (type == Types.DB_OBJECT) { return Object.class; } else { throw new Error("Unknown type."); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/Types.java000066400000000000000000000025631330501023400251320ustar00rootroot00000000000000/** * com.mckoi.database.global.Types 11 May 1998 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.global; /** * The possible types used in the database. *

* @author Tobias Downer */ public interface Types { public static final int DB_UNKNOWN = -1; public static final int DB_STRING = 1; public static final int DB_NUMERIC = 2; public static final int DB_TIME = 3; public static final int DB_BINARY = 4; // @deprecated - use BLOB public static final int DB_BOOLEAN = 5; public static final int DB_BLOB = 6; public static final int DB_OBJECT = 7; // This is an extended numeric type that handles neg and positive infinity // and NaN. public static final int DB_NUMERIC_EXTENDED = 8; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/global/package.html000066400000000000000000000002671330501023400254430ustar00rootroot00000000000000 com.mckoi.database.global - Constants

This package includes database constants such as Types. mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/000077500000000000000000000000001330501023400237315ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/AlterTable.java000066400000000000000000000306731330501023400266240ustar00rootroot00000000000000/** * com.mckoi.database.interpret.AlterTable 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.util.IntegerVector; import java.util.ArrayList; import java.util.List; /** * Logic for the ALTER TABLE SQL statement. * * @author Tobias Downer */ public class AlterTable extends Statement { /** * The create statement that we use to alter the current table. This is * only for compatibility reasons. */ StatementTree create_statement; /** * The name of the table we are altering. */ String table_name; /** * The list of actions to perform in this alter statement. */ private ArrayList actions; /** * The TableName object. */ private TableName tname; /** * The prepared create table statement. */ CreateTable create_stmt; /** * Adds an action to perform in this alter statement. */ public void addAction(AlterTableAction action) { if (actions == null) { actions = new ArrayList(); } actions.add(action); } /** * Returns true if the column names match. If the database is in case * insensitive mode then the columns will match if the case insensitive * search matches. */ public boolean checkColumnNamesMatch(DatabaseConnection db, String col1, String col2) { if (db.isInCaseInsensitiveMode()) { return col1.equalsIgnoreCase(col2); } return col1.equals(col2); } private void checkColumnConstraint(String col_name, String[] cols, TableName table, String constraint_name) { for (int i = 0; i < cols.length; ++i) { if (col_name.equals(cols[i])) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.DROP_COLUMN_VIOLATION, "Constraint violation (" + constraint_name + ") dropping column " + col_name + " because of " + "referential constraint in " + table); } } } // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Get variables from the model table_name = (String) cmd.getObject("table_name"); addAction((AlterTableAction) cmd.getObject("alter_action")); create_statement = (StatementTree) cmd.getObject("create_statement"); // --- if (create_statement != null) { create_stmt = new CreateTable(); create_stmt.init(database, create_statement, null); create_stmt.prepare(); this.table_name = create_stmt.table_name; // create_statement.doPrepare(db, user); } else { // If we don't have a create statement, then this is an SQL alter // command. } // tname = TableName.resolve(db.getCurrentSchema(), table_name); tname = resolveTableName(table_name, database); if (tname.getName().indexOf('.') != -1) { throw new DatabaseException("Table name can not contain '.' character."); } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); String schema_name = database.getCurrentSchema(); // Does the user have privs to alter this tables? if (!database.getDatabase().canUserAlterTableObject(context, user, tname)) { throw new UserAccessException( "User not permitted to alter table: " + table_name); } if (create_statement != null) { // Create the data table definition and tell the database to update it. DataTableDef table_def = create_stmt.createDataTableDef(); TableName tname = table_def.getTableName(); // Is the table in the database already? if (database.tableExists(tname)) { // Drop any schema for this table, database.dropAllConstraintsForTable(tname); database.updateTable(table_def); } // If the table isn't in the database, else { database.createTable(table_def); } // Setup the constraints create_stmt.setupAllConstraints(); // Return '0' if we created the table. return FunctionTable.resultTable(context, 0); } else { // SQL alter command using the alter table actions, // Get the table definition for the table name, DataTableDef table_def = database.getTable(tname).getDataTableDef(); String table_name = table_def.getName(); DataTableDef new_table = table_def.noColumnCopy(); // Returns a ColumnChecker implementation for this table. ColumnChecker checker = ColumnChecker.standardColumnChecker(database, tname); // Set to true if the table topology is alter, or false if only // the constraints are changed. boolean table_altered = false; for (int n = 0; n < table_def.columnCount(); ++n) { DataTableColumnDef column = new DataTableColumnDef(table_def.columnAt(n)); String col_name = column.getName(); // Apply any actions to this column boolean mark_dropped = false; for (int i = 0; i < actions.size(); ++i) { AlterTableAction action = (AlterTableAction) actions.get(i); if (action.getAction().equals("ALTERSET") && checkColumnNamesMatch(database, (String) action.getElement(0), col_name)) { Expression exp = (Expression) action.getElement(1); checker.checkExpression(exp); column.setDefaultExpression(exp); table_altered = true; } else if (action.getAction().equals("DROPDEFAULT") && checkColumnNamesMatch(database, (String) action.getElement(0), col_name)) { column.setDefaultExpression(null); table_altered = true; } else if (action.getAction().equals("DROP") && checkColumnNamesMatch(database, (String) action.getElement(0), col_name)) { // Check there are no referential links to this column Transaction.ColumnGroupReference[] refs = database.queryTableImportedForeignKeyReferences(tname); for (int p = 0; p < refs.length; ++p) { checkColumnConstraint(col_name, refs[p].ref_columns, refs[p].ref_table_name, refs[p].name); } // Or from it refs = database.queryTableForeignKeyReferences(tname); for (int p = 0; p < refs.length; ++p) { checkColumnConstraint(col_name, refs[p].key_columns, refs[p].key_table_name, refs[p].name); } // Or that it's part of a primary key Transaction.ColumnGroup primary_key = database.queryTablePrimaryKeyGroup(tname); if (primary_key != null) { checkColumnConstraint(col_name, primary_key.columns, tname, primary_key.name); } // Or that it's part of a unique set Transaction.ColumnGroup[] uniques = database.queryTableUniqueGroups(tname); for (int p = 0; p < uniques.length; ++p) { checkColumnConstraint(col_name, uniques[p].columns, tname, uniques[p].name); } mark_dropped = true; table_altered = true; } } // If not dropped then add to the new table definition. if (!mark_dropped) { new_table.addColumn(column); } } // Add any new columns, for (int i = 0; i < actions.size(); ++i) { AlterTableAction action = (AlterTableAction) actions.get(i); if (action.getAction().equals("ADD")) { ColumnDef cdef = (ColumnDef) action.getElement(0); if (cdef.isUnique() || cdef.isPrimaryKey()) { throw new DatabaseException("Can not use UNIQUE or PRIMARY KEY " + "column constraint when altering a column. Use " + "ADD CONSTRAINT instead."); } // Convert to a DataTableColumnDef DataTableColumnDef col = CreateTable.convertColumnDef(cdef); checker.checkExpression( col.getDefaultExpression(database.getSystem())); String col_name = col.getName(); // If column name starts with [table_name]. then strip it off col.setName(checker.stripTableName(table_name, col_name)); new_table.addColumn(col); table_altered = true; } } // Any constraints to drop... for (int i = 0; i < actions.size(); ++i) { AlterTableAction action = (AlterTableAction) actions.get(i); if (action.getAction().equals("DROP_CONSTRAINT")) { String constraint_name = (String) action.getElement(0); int drop_count = database.dropNamedConstraint(tname, constraint_name); if (drop_count == 0) { throw new DatabaseException( "Named constraint to drop on table " + tname + " was not found: " + constraint_name); } } else if (action.getAction().equals("DROP_CONSTRAINT_PRIMARY_KEY")) { boolean constraint_dropped = database.dropPrimaryKeyConstraintForTable(tname, null); if (!constraint_dropped) { throw new DatabaseException( "No primary key to delete on table " + tname); } } } // Any constraints to add... for (int i = 0; i < actions.size(); ++i) { AlterTableAction action = (AlterTableAction) actions.get(i); if (action.getAction().equals("ADD_CONSTRAINT")) { ConstraintDef constraint = (ConstraintDef) action.getElement(0); boolean foreign_constraint = (constraint.type == ConstraintDef.FOREIGN_KEY); TableName ref_tname = null; if (foreign_constraint) { ref_tname = resolveTableName(constraint.reference_table_name, database); if (database.isInCaseInsensitiveMode()) { ref_tname = database.tryResolveCase(ref_tname); } constraint.reference_table_name = ref_tname.toString(); } checker.stripColumnList(table_name, constraint.column_list); checker.stripColumnList(constraint.reference_table_name, constraint.column_list2); checker.checkExpression(constraint.check_expression); checker.checkColumnList(constraint.column_list); if (foreign_constraint && constraint.column_list2 != null) { ColumnChecker referenced_checker = ColumnChecker.standardColumnChecker(database, ref_tname); referenced_checker.checkColumnList(constraint.column_list2); } CreateTable.addSchemaConstraint(database, tname, constraint); } } // Alter the existing table to the new format... if (table_altered) { if (new_table.columnCount() == 0) { throw new DatabaseException( "Can not ALTER table to have 0 columns."); } database.updateTable(new_table); } else { // If the table wasn't physically altered, check the constraints. // Calling this method will also make the transaction check all // deferred constraints during the next commit. database.checkAllConstraints(tname); } // Return '0' if everything successful. return FunctionTable.resultTable(context, 0); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/AlterTableAction.java000066400000000000000000000066241330501023400277610ustar00rootroot00000000000000/** * com.mckoi.database.interpret.AlterTableAction 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.ArrayList; import com.mckoi.database.*; /** * Represents an action in an ALTER TABLE SQL statement. * * @author Tobias Downer */ public final class AlterTableAction implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = -3180332341627416727L; /** * Element parameters to do with the action. */ private ArrayList elements; /** * The action to perform. */ private String action; /** * Constructor. */ public AlterTableAction() { elements = new ArrayList(); } /** * Set the action to perform. */ public void setAction(String str) { this.action = str; } /** * Adds a parameter to this action. */ public void addElement(Object ob) { elements.add(ob); } /** * Returns the name of this action. */ public String getAction() { return action; } /** * Returns the ArrayList that represents the parameters of this action. */ public ArrayList getElements() { return elements; } /** * Returns element 'n'. */ public Object getElement(int n) { return elements.get(n); } // Implemented from StatementTreeObject public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { // This must search throw 'elements' for objects that we can prepare for (int i = 0; i < elements.size(); ++i) { Object ob = elements.get(i); if (ob instanceof String) { // Do not need to prepare this } else if (ob instanceof Expression) { ((Expression) ob).prepare(preparer); } else if (ob instanceof StatementTreeObject) { ((StatementTreeObject) ob).prepareExpressions(preparer); } else { throw new DatabaseException( "Unrecognised expression: " + ob.getClass()); } } } public Object clone() throws CloneNotSupportedException { // Shallow clone AlterTableAction v = (AlterTableAction) super.clone(); ArrayList cloned_elements = new ArrayList(); v.elements = cloned_elements; for (int i = 0; i < elements.size(); ++i) { Object ob = elements.get(i); if (ob instanceof String) { // Do not need to clone this } else if (ob instanceof Expression) { ob = ((Expression) ob).clone(); } else if (ob instanceof StatementTreeObject) { ob = ((StatementTreeObject) ob).clone(); } else { throw new CloneNotSupportedException(ob.getClass().toString()); } cloned_elements.add(ob); } return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/ByColumn.java000066400000000000000000000036451330501023400263340ustar00rootroot00000000000000/** * com.mckoi.database.interpret.ByColumn 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; /** * Object used to represent a column in the 'order by' and 'group by' * clauses of a select statement. * * @author Tobias Downer */ public final class ByColumn implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = 8194415767416200855L; /** * The name of the column in the 'by'. */ public Variable name; /** * The expression that we are ordering by. */ public Expression exp; /** * If 'order by' then true if sort is ascending (default). */ public boolean ascending = true; public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { if (exp != null) { exp.prepare(preparer); } } public Object clone() throws CloneNotSupportedException { ByColumn v = (ByColumn) super.clone(); if (name != null) { v.name = (Variable) name.clone(); } if (exp != null) { v.exp = (Expression) exp.clone(); } return v; } public String toString() { return "ByColumn(" + name + ", " + exp + ", " + ascending + ")"; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Call.java000066400000000000000000000101551330501023400254510ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Call 15 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.io.*; import java.util.ArrayList; import java.util.List; /** * A statement that calls a procedure, and returns a resultant table. This * is used to perform some sort of function over the database. For example, * "CALL SYSTEM_MAKE_BACKUP('/my_backups/1')" makes a copy of the database in * the given directory on the disk. * * @author Tobias Downer */ public class Call extends Statement { // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); String proc_name = (String) cmd.getObject("proc_name"); Expression[] args = (Expression[]) cmd.getObject("args"); // Get the procedure manager ProcedureManager manager = database.getProcedureManager(); ProcedureName name; TableName p_name = null; // If no schema def given in the procedure name, first check for the // function in the SYS_INFO schema. if (proc_name.indexOf(".") == -1) { // Resolve the procedure name into a TableName object. String schema_name = database.getCurrentSchema(); TableName tp_name = TableName.resolve(Database.SYSTEM_SCHEMA, proc_name); tp_name = database.tryResolveCase(tp_name); // If exists then use this if (manager.procedureExists(tp_name)) { p_name = tp_name; } } if (p_name == null) { // Resolve the procedure name into a TableName object. String schema_name = database.getCurrentSchema(); TableName tp_name = TableName.resolve(schema_name, proc_name); tp_name = database.tryResolveCase(tp_name); // Does the schema exist? boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(tp_name.getSchema(), ignore_case); if (schema == null) { throw new DatabaseException("Schema '" + tp_name.getSchema() + "' doesn't exist."); } else { tp_name = new TableName(schema.getName(), tp_name.getName()); } // If this doesn't exist then generate the error if (!manager.procedureExists(tp_name)) { throw new DatabaseException("Stored procedure '" + proc_name + "' was not found."); } p_name = tp_name; } // Does the procedure exist in the system schema? name = new ProcedureName(p_name); // Check the user has privs to use this stored procedure if (!database.getDatabase().canUserExecuteStoredProcedure(context, user, name.toString())) { throw new UserAccessException("User not permitted to call: " + proc_name); } // Evaluate the arguments TObject[] vals = new TObject[args.length]; for (int i = 0; i < args.length; ++i) { if (args[i].isConstant()) { vals[i] = args[i].evaluate(null, null, context); } else { throw new StatementException( "CALL argument is not a constant: " + args[i].text()); } } // Invoke the procedure TObject result = manager.invokeProcedure(name, vals); // Return the result of the procedure, return FunctionTable.resultTable(context, result); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/ColumnChecker.java000066400000000000000000000135411330501023400273220ustar00rootroot00000000000000/** * com.mckoi.database.interpret.ColumnChecker 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * A class that abstracts the checking of information in a table. This is * abstracted because the behaviour is shared between ALTER and CREATE * statement. * * @author Tobias Downer */ abstract class ColumnChecker { /** * Given a column name string, this will strip off the preceeding table * name if there is one specified. For example, 'Customer.id' would * become 'id'. This also checks that the table specification is in the * given table domain. For example, * stripTableName("Customer", "Customer.id") would not throw an error but * stripTableName("Order", "Customer.di") would. */ static String stripTableName(String table_domain, String column) { if (column.indexOf('.') != -1) { String st = table_domain + "."; if (!column.startsWith(st)) { throw new StatementException("Column '" + column + "' is not within the expected table domain '" + table_domain + "'"); } column = column.substring(st.length()); } return column; } /** * Calls the 'stripTableName' method on all elements in the given list. */ static ArrayList stripColumnList(String table_domain, ArrayList column_list) { if (column_list != null) { int size = column_list.size(); for (int i = 0; i < size; ++i) { String res = stripTableName(table_domain, (String) column_list.get(i)); column_list.set(i, res); } } return column_list; } /** * Returns the resolved column name if the column exists within the table * being checked under, or null if it doesn't. Throws an error if the * column name is abiguous. */ abstract String resolveColumnName(String col_name) throws DatabaseException; /** * Resolves all the variables in the expression throwing a DatabaseException * if any errors found. This checks that all variables point to a column * in the table being created. */ void checkExpression(Expression expression) throws DatabaseException { if (expression != null) { List list = expression.allVariables(); for (int i = 0; i < list.size(); ++i) { Variable v = (Variable) list.get(i); String orig_col = v.getName(); String resolved_column = resolveColumnName(orig_col); if (resolved_column == null) { throw new DatabaseException("Column '" + orig_col + "' not found in the table."); } // Resolve the column name if (!orig_col.equals(resolved_column)) { v.setColumnName(resolved_column); } } // Don't allow select statements because they don't convert to a // text string that we can encode into the DataTableDef file. if (expression.hasSubQuery()) { throw new DatabaseException("Sub-queries not permitted in " + "the check constraint expression."); } } } /** * Checks all the columns in the list and throws an exception if any * column names are not found in the columns in this create. Additionally * sets the entry with the correct column resolved to. */ void checkColumnList(ArrayList list) throws DatabaseException { if (list != null) { for (int i = 0; i < list.size(); ++i) { String col = (String) list.get(i); String resolved_col = resolveColumnName(col); if (resolved_col == null) { throw new DatabaseException( "Column '" + col + "' not found the table."); } list.set(i, resolved_col); } } } // ---------- Statics ---------- /** * Given a DatabaseConnection and a TableName object, this returns an * implementation of ColumnChecker that is able to check that the column * name exists in the table, and that the reference is not ambigious. */ static ColumnChecker standardColumnChecker( DatabaseConnection database, TableName tname) { final DataTableDef table_def = database.getTable(tname).getDataTableDef(); final boolean ignores_case = database.isInCaseInsensitiveMode(); // Implement the checker return new ColumnChecker() { String resolveColumnName(String col_name) throws DatabaseException { // We need to do case sensitive and case insensitive resolution, String found_col = null; for (int n = 0; n < table_def.columnCount(); ++n) { DataTableColumnDef col = (DataTableColumnDef) table_def.columnAt(n); if (!ignores_case) { if (col.getName().equals(col_name)) { return col_name; } } else { if (col.getName().equalsIgnoreCase(col_name)) { if (found_col != null) { throw new DatabaseException("Ambiguous column name '" + col_name + "'"); } found_col = col.getName(); } } } return found_col; } }; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/ColumnDef.java000066400000000000000000000213461330501023400264560ustar00rootroot00000000000000/** * com.mckoi.database.interpret.ColumnDef 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.database.sql.ParseException; import com.mckoi.database.sql.SQLConstants; import com.mckoi.database.sql.Token; import com.mckoi.database.global.SQLTypes; import java.util.ArrayList; /** * Represents a column definition (description). * * @author Tobias Downer */ public final class ColumnDef implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = 8347617136528650961L; // DataTableColumnDef col; String name; // int sql_type; // int size; // int scale; // String class_constraint; // // String locale_str; // int strength; // int decomposition; TType type; String index_str; Expression default_expression; Expression original_default_expression; private boolean not_null = false; private boolean primary_key = false; private boolean unique = false; public ColumnDef() { // col = new DataTableColumnDef(); } /** * Returns true if this column has a primary key constraint set on it. */ public boolean isPrimaryKey() { return primary_key; } /** * Returns true if this column has the unique constraint set for it. */ public boolean isUnique() { return unique; } /** * Returns true if this column has the not null constraint set for it. */ public boolean isNotNull() { return not_null; } /** * Sets the name of the column. */ public void setName(String name) { this.name = name; } /** * Adds a constraint to this column. */ public void addConstraint(String constraint) { if (constraint.equals("NOT NULL")) { not_null = true; // col.setNotNull(true); } else if (constraint.equals("NULL")) { not_null = false; // col.setNotNull(false); } else if (constraint.equals("PRIMARY")) { primary_key = true; } else if (constraint.equals("UNIQUE")) { unique = true; } else { throw new RuntimeException("Unknown constraint: " + constraint); } } /** * Sets the type of data of this column. */ public void setDataType(TType type) { this.type = type; } // /** // * Sets the type of data this column is. // */ // public void setDataType(String type, int size, int scale) // throws ParseException { // int data_type; // // String ltype = type.toLowerCase(); // if (ltype.equals("bit") || ltype.equals("boolean")) { // data_type = SQLTypes.BIT; // if (size != -1 || scale != -1) { // throw new ParseException("size/scale for bit."); // } // } // else if (ltype.equals("tinyint")) { // data_type = SQLTypes.TINYINT; // } // else if (ltype.equals("smallint")) { // data_type = SQLTypes.SMALLINT; // } // else if (ltype.equals("integer") || ltype.equals("int")) { // data_type = SQLTypes.INTEGER; // } // else if (ltype.equals("bigint")) { // data_type = SQLTypes.BIGINT; // } // else if (ltype.equals("float")) { // data_type = SQLTypes.FLOAT; // } // else if (ltype.equals("real")) { // data_type = SQLTypes.REAL; // } // else if (ltype.equals("double")) { // data_type = SQLTypes.DOUBLE; // } // else if (ltype.equals("numeric")) { // data_type = SQLTypes.NUMERIC; // } // else if (ltype.equals("decimal")) { // data_type = SQLTypes.DECIMAL; // } // else if (ltype.equals("char")) { // data_type = SQLTypes.CHAR; // if (scale != -1) { // throw new ParseException("scale for char."); // } // if (size == -1) { // size = 1; // } // } // else if (ltype.equals("varchar")) { // data_type = SQLTypes.VARCHAR; // if (scale != -1) { // throw new ParseException("scale for varchar."); // } // if (size == -1) size = Integer.MAX_VALUE; // } // else if (ltype.equals("longvarchar") || ltype.equals("string") || // ltype.equals("text") ) { // data_type = SQLTypes.LONGVARCHAR; // if (scale != -1) { // throw new ParseException("scale for longvarchar."); // } // if (size == -1) size = Integer.MAX_VALUE; // } // else if (ltype.equals("date")) { // data_type = SQLTypes.DATE; // if (size != -1 || scale != -1) { // throw new ParseException("size/scale for date."); // } // } // else if (ltype.equals("time")) { // data_type = SQLTypes.TIME; // if (size != -1 || scale != -1) { // throw new ParseException("size/scale for time."); // } // } // else if (ltype.equals("timestamp")) { // data_type = SQLTypes.TIMESTAMP; // if (size != -1 || scale != -1) { // throw new ParseException("size/scale for timestamp."); // } // } // else if (ltype.equals("binary")) { // data_type = SQLTypes.BINARY; // if (scale != -1) { // throw new ParseException("scale for binary."); // } // if (size == -1) { // size = Integer.MAX_VALUE; // } // } // else if (ltype.equals("varbinary")) { // data_type = SQLTypes.VARBINARY; // if (scale != -1) { // throw new ParseException("scale for varbinary."); // } // if (size == -1) { // size = Integer.MAX_VALUE; // } // } // else if (ltype.equals("longvarbinary") || // ltype.equals("blob")) { // data_type = SQLTypes.LONGVARBINARY; // if (scale != -1) { // throw new ParseException("scale for longvarbinary."); // } // if (size == -1) { // size = Integer.MAX_VALUE; // } // } // else { // throw new ParseException("Unknown type: " + ltype); // } // // this.sql_type = data_type; // this.size = size; // this.scale = scale; // // } // // /** // * Sets the column definition for a java object type. // */ // public void setDataType(String type, Token class_ref) { // if (!type.equals("JAVA_OBJECT")) { // throw new Error("setDataType called with incorrect type."); // } // // // Default class constraint is 'java.lang.Object' // String class_constraint = "java.lang.Object"; // if (class_ref != null) { // class_constraint = class_ref.image; // } // // this.sql_type = SQLTypes.JAVA_OBJECT; // this.size = -1; // this.scale = -1; // this.class_constraint = class_constraint; // // } // // /** // * Sets the locale, and collate strength and decomposition of this string // * column. If strength or decomposition are -1 then use the default // * strength and decomposition levels. // */ // public void setCollateType(String locale_str, // int strength, int decomposition) { // this.locale_str = locale_str; // this.strength = strength; // this.decomposition = decomposition; // } /** * Sets the indexing. */ public void setIndex(Token t) throws ParseException { if (t.kind == SQLConstants.INDEX_NONE) { index_str = "BlindSearch"; // col.setIndexScheme("BlindSearch"); } else if (t.kind == SQLConstants.INDEX_BLIST) { index_str = "InsertSearch"; // col.setIndexScheme("InsertSearch"); } else { throw new ParseException("Unrecognized indexing scheme."); } } /** * Sets the default expression (this is used to make a new constraint). */ public void setDefaultExpression(Expression exp) { default_expression = exp; try { original_default_expression = (Expression) exp.clone(); } catch (CloneNotSupportedException e) { throw new Error(e.getMessage()); } } // Implemented from StatementTreeObject public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { if (default_expression != null) { default_expression.prepare(preparer); } } public Object clone() throws CloneNotSupportedException { ColumnDef v = (ColumnDef) super.clone(); if (default_expression != null) { v.default_expression = (Expression) default_expression.clone(); } return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Compact.java000066400000000000000000000041201330501023400261570ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Compact 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.ArrayList; import java.util.List; import com.mckoi.database.*; /** * Statement that handles COMPACT sql command. * * @author Tobias Downer */ public class Compact extends Statement { /** * The name the table that we are to update. */ String table_name; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { table_name = (String) cmd.getObject("table_name"); } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // TableName tname = // TableName.resolve(database.getCurrentSchema(), table_name); TableName tname = resolveTableName(table_name, database); // Does the table exist? if (!database.tableExists(tname)) { throw new DatabaseException("Table '" + tname + "' does not exist."); } // Does the user have privs to compact this tables? if (!database.getDatabase().canUserCompactTableObject(context, user, tname)) { throw new UserAccessException( "User not permitted to compact table: " + table_name); } // Compact the table, database.compactTable(tname); // Return '0' if success. return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/CompleteTransaction.java000066400000000000000000000040401330501023400305500ustar00rootroot00000000000000/** * com.mckoi.database.interpret.CompleteTransaction 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * This represents either a COMMIT or ROLLBACK SQL command. * * @author Tobias Downer */ public class CompleteTransaction extends Statement { String command; // This is set to either 'commit' or 'rollback' // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { command = (String) cmd.getObject("command"); } public Table evaluate() throws DatabaseException, TransactionException { DatabaseQueryContext context = new DatabaseQueryContext(database); if (command.equals("commit")) { // try { // Commit the current transaction on this connection. database.commit(); // } // catch (TransactionException e) { // // This needs to be handled better! // Debug.writeException(e); // throw new DatabaseException(e.getMessage()); // } return FunctionTable.resultTable(context, 0); } else if (command.equals("rollback")) { // Rollback the current transaction on this connection. database.rollback(); return FunctionTable.resultTable(context, 0); } else { throw new Error("Unrecognised transaction completion command."); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/ConstraintDef.java000066400000000000000000000137301330501023400273430ustar00rootroot00000000000000/** * com.mckoi.database.interpret.ConstraintDef 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; /** * Represents a constraint definition (description) for a table. * * @author Tobias Downer */ public final class ConstraintDef implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = -6648793780645431100L; // ---------- Statics that represent the base types of constraints ---------- /** * A PRIMARY_KEY constraint. With this constraint, the 'column_list' * list contains the names of the columns in this table that are defined as * the primary key. There may only be one primary key constraint per table. */ public static final int PRIMARY_KEY = 1; /** * A UNIQUE constraint. With this constraint, the 'column_list' list * contains the names of the columns in this table that must be unique. */ public static final int UNIQUE = 2; /** * A FOREIGN_KEY constraint. With this constraint, the 'table_name' string * contains the name of the table that this is a foreign key for, the * 'column_list' list contains the list of foreign key columns, and * 'column_list2' optionally contains the referenced columns. */ public static final int FOREIGN_KEY = 3; /** * A CHECK constraint. With this constraint, the 'expression' object * contains the expression that must evaluate to true when adding a * column to the table. */ public static final int CHECK = 4; // The type of constraint (from types in DataTableConstraintDef) int type; // The name of the constraint or null if the constraint has no name (in // which case it must be given an auto generated unique name at some point). String name; // The Check Expression Expression check_expression; // The serializable plain check expression as originally parsed Expression original_check_expression; // The first column list ArrayList column_list; // The second column list ArrayList column_list2; // The name of the table if referenced. String reference_table_name; // The foreign key update rule String update_rule; // The foreign key delete rule String delete_rule; // Whether this constraint is deferred to when the transaction commits. // ( By default we are 'initially immediate deferrable' ) short deferred = Transaction.INITIALLY_IMMEDIATE; public ConstraintDef() { } /** * Sets the name of the constraint. */ public void setName(String name) { this.name = name; } /** * Sets object up for a primary key constraint. */ public void setPrimaryKey(ArrayList list) { type = PRIMARY_KEY; column_list = list; } /** * Sets object up for a unique constraint. */ public void setUnique(ArrayList list) { type = UNIQUE; column_list = list; } /** * Sets object up for a check constraint. */ public void setCheck(Expression exp) { type = CHECK; check_expression = exp; try { original_check_expression = (Expression) exp.clone(); } catch (CloneNotSupportedException e) { throw new Error(e.getMessage()); } } /** * Sets object up for foreign key reference. */ public void setForeignKey(String ref_table, ArrayList col_list, ArrayList ref_col_list, String delete_rule, String update_rule) { type = FOREIGN_KEY; reference_table_name = ref_table; column_list = col_list; column_list2 = ref_col_list; this.delete_rule = delete_rule; this.update_rule = update_rule; // System.out.println("ConstraintDef setting rules: " + delete_rule + ", " + update_rule); } /** * Sets that this constraint is initially deferred. */ public void setInitiallyDeferred() { deferred = Transaction.INITIALLY_DEFERRED; } /** * Sets that this constraint is not deferrable. */ public void setNotDeferrable() { deferred = Transaction.NOT_DEFERRABLE; } /** * Returns the first column list as a string array. */ public String[] getColumnList() { return (String[]) column_list.toArray(new String[column_list.size()]); } /** * Returns the first column list as a string array. */ public String[] getColumnList2() { return (String[]) column_list2.toArray(new String[column_list2.size()]); } /** * Returns the delete rule if this is a foreign key reference. */ public String getDeleteRule() { return delete_rule; } /** * Returns the update rule if this is a foreign key reference. */ public String getUpdateRule() { return update_rule; } // Implemented from StatementTreeObject public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { if (check_expression != null) { check_expression.prepare(preparer); } } public Object clone() throws CloneNotSupportedException { ConstraintDef v = (ConstraintDef) super.clone(); if (check_expression != null) { v.check_expression = (Expression) check_expression.clone(); } if (column_list != null) { v.column_list = (ArrayList) column_list.clone(); } if (column_list2 != null) { v.column_list2 = (ArrayList) column_list2.clone(); } return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/CreateTable.java000066400000000000000000000317561330501023400267630ustar00rootroot00000000000000/** * com.mckoi.database.interpret.CreateTable 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.util.IntegerVector; import java.util.Vector; import java.util.ArrayList; import java.util.List; /** * A parsed state container for the 'create' statement. * * @author Tobias Downer */ public class CreateTable extends Statement { /** * Set to true if this create statement is for a temporary table. */ boolean temporary = false; /** * Only create if table doesn't exist. */ boolean only_if_not_exists = false; /** * The name of the table to create. */ String table_name; /** * List of column declarations (ColumnDef) */ ArrayList columns; /** * List of table constraints (ConstraintDef) */ ArrayList constraints; // /** // * The expression that must be evaluated to true for this row to be // * added to the table. // */ // Expression check_exp; /** * The TableName object. */ private TableName tname; // /** // * Adds a new ColumnDef object to this create statement. A ColumnDef // * object describes a column for the new table we are creating. The column's // * must be added in the order they are to be in the created table. // */ // void addColumnDef(ColumnDef column) { // columns.addElement(column); // } /** * Adds a new ConstraintDef object to this create statement. A ConstraintDef * object describes any constraints for the new table we are creating. */ void addConstraintDef(ConstraintDef constraint) { constraints.add(constraint); } // /** // * Handles the create statement 'CHECK' expression for compatibility. // */ // void addCheckConstraint(Expression check_expression) { // ConstraintDef constraint = new ConstraintDef(); // constraint.setCheck(check_expression); // constraints.addElement(constraint); // } /** * Creates a DataTableDef that describes the table that was defined by * this create statement. This is used by the 'alter' statement. */ DataTableDef createDataTableDef() throws DatabaseException { // Make all this information into a DataTableDef object... DataTableDef table_def = new DataTableDef(); table_def.setTableName(tname); table_def.setTableClass("com.mckoi.database.VariableSizeDataTableFile"); // Add the columns. // NOTE: Any duplicate column names will be found here... for (int i = 0; i < columns.size(); ++i) { DataTableColumnDef cd = (DataTableColumnDef) columns.get(i); table_def.addColumn(cd); } return table_def; } /** * Adds a schema constraint to the rules for the schema represented by the * manager. */ static void addSchemaConstraint(DatabaseConnection manager, TableName table, ConstraintDef constraint) throws DatabaseException { if (constraint.type == ConstraintDef.PRIMARY_KEY) { manager.addPrimaryKeyConstraint(table, constraint.getColumnList(), constraint.deferred, constraint.name); } else if (constraint.type == ConstraintDef.FOREIGN_KEY) { // Currently we forbid referencing a table in another schema TableName ref_table = TableName.resolve(constraint.reference_table_name); String update_rule = constraint.getUpdateRule().toUpperCase(); String delete_rule = constraint.getDeleteRule().toUpperCase(); if (table.getSchema().equals(ref_table.getSchema())) { manager.addForeignKeyConstraint( table, constraint.getColumnList(), ref_table, constraint.getColumnList2(), delete_rule, update_rule, constraint.deferred, constraint.name); } else { throw new DatabaseException("Foreign key reference error: " + "Not permitted to reference a table outside of the schema: " + table + " -> " + ref_table); } } else if (constraint.type == ConstraintDef.UNIQUE) { manager.addUniqueConstraint(table, constraint.getColumnList(), constraint.deferred, constraint.name); } else if (constraint.type == ConstraintDef.CHECK) { manager.addCheckConstraint(table, constraint.original_check_expression, constraint.deferred, constraint.name); } else { throw new DatabaseException("Unrecognized constraint type."); } } /** * Returns a com.mckoi.database.interpret.ColumnDef object a a * com.mckoi.database.DataTableColumnDef object. */ static DataTableColumnDef convertColumnDef(ColumnDef cdef) { TType type = cdef.type; DataTableColumnDef dtcdef = new DataTableColumnDef(); dtcdef.setName(cdef.name); dtcdef.setNotNull(cdef.isNotNull()); dtcdef.setFromTType(type); if (cdef.index_str != null) { dtcdef.setIndexScheme(cdef.index_str); } if (cdef.default_expression != null) { dtcdef.setDefaultExpression(cdef.original_default_expression); } dtcdef.initTTypeInfo(); return dtcdef; } /** * Sets up all constraints specified in this create statement. */ void setupAllConstraints() throws DatabaseException { for (int i = 0; i < constraints.size(); ++i) { ConstraintDef constraint = (ConstraintDef) constraints.get(i); // Add this to the schema manager tables addSchemaConstraint(database, tname, constraint); } } // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Get the state from the model temporary = cmd.getBoolean("temporary"); only_if_not_exists = cmd.getBoolean("only_if_not_exists"); table_name = (String) cmd.getObject("table_name"); ArrayList column_list = (ArrayList) cmd.getObject("column_list"); constraints = (ArrayList) cmd.getObject("constraint_list"); // Convert column_list to list of com.mckoi.database.DataTableColumnDef int size = column_list.size(); columns = new ArrayList(size); for (int i = 0; i < size; ++i) { ColumnDef cdef = (ColumnDef) column_list.get(i); columns.add(convertColumnDef(cdef)); } // ---- String schema_name = database.getCurrentSchema(); tname = TableName.resolve(schema_name, table_name); String name_strip = tname.getName(); if (name_strip.indexOf('.') != -1) { throw new DatabaseException("Table name can not contain '.' character."); } final boolean ignores_case = database.isInCaseInsensitiveMode(); // Implement the checker class for this statement. ColumnChecker checker = new ColumnChecker() { String resolveColumnName(String col_name) throws DatabaseException { // We need to do case sensitive and case insensitive resolution, String found_col = null; for (int n = 0; n < columns.size(); ++n) { DataTableColumnDef col = (DataTableColumnDef) columns.get(n); if (!ignores_case) { if (col.getName().equals(col_name)) { return col_name; } } else { if (col.getName().equalsIgnoreCase(col_name)) { if (found_col != null) { throw new DatabaseException("Ambiguous column name '" + col_name + "'"); } found_col = col.getName(); } } } return found_col; } }; ArrayList unique_column_list = new ArrayList(); ArrayList primary_key_column_list = new ArrayList(); // Check the expressions that represent the default values for the columns. // Also check each column name for (int i = 0; i < columns.size(); ++i) { DataTableColumnDef cdef = (DataTableColumnDef) columns.get(i); ColumnDef model_cdef = (ColumnDef) column_list.get(i); checker.checkExpression(cdef.getDefaultExpression(database.getSystem())); String col_name = cdef.getName(); // If column name starts with [table_name]. then strip it off cdef.setName(checker.stripTableName(name_strip, col_name)); // If unique then add to unique columns if (model_cdef.isUnique()) { unique_column_list.add(col_name); } // If primary key then add to primary key columns if (model_cdef.isPrimaryKey()) { primary_key_column_list.add(col_name); } } // Add the unique and primary key constraints. if (unique_column_list.size() > 0) { ConstraintDef constraint = new ConstraintDef(); constraint.setUnique(unique_column_list); addConstraintDef(constraint); } if (primary_key_column_list.size() > 0) { ConstraintDef constraint = new ConstraintDef(); constraint.setPrimaryKey(primary_key_column_list); addConstraintDef(constraint); } // Strip the column names and set the expression in all the constraints. for (int i = 0; i < constraints.size(); ++i) { ConstraintDef constraint = (ConstraintDef) constraints.get(i); checker.stripColumnList(name_strip, constraint.column_list); // Check the referencing table for foreign keys if (constraint.type == ConstraintDef.FOREIGN_KEY) { checker.stripColumnList(constraint.reference_table_name, constraint.column_list2); TableName ref_tname = resolveTableName(constraint.reference_table_name, database); if (database.isInCaseInsensitiveMode()) { ref_tname = database.tryResolveCase(ref_tname); } constraint.reference_table_name = ref_tname.toString(); DataTableDef ref_table_def; if (database.tableExists(ref_tname)) { // Get the DataTableDef for the table we are referencing ref_table_def = database.getDataTableDef(ref_tname); } else if (ref_tname.equals(tname)) { // We are referencing the table we are creating ref_table_def = createDataTableDef(); } else { throw new DatabaseException( "Referenced table '" + ref_tname + "' in constraint '" + constraint.name + "' does not exist."); } // Resolve columns against the given table def ref_table_def.resolveColumnsInArray(database, constraint.column_list2); } checker.checkExpression(constraint.check_expression); checker.checkColumnList(constraint.column_list); } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Does the schema exist? boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(tname.getSchema(), ignore_case); if (schema == null) { throw new DatabaseException("Schema '" + tname.getSchema() + "' doesn't exist."); } else { tname = new TableName(schema.getName(), tname.getName()); } // Does the user have privs to create this tables? if (!database.getDatabase().canUserCreateTableObject(context, user, tname)) { throw new UserAccessException( "User not permitted to create table: " + table_name); } // PENDING: Creation of temporary tables... // Does the table already exist? if (!database.tableExists(tname)) { // Create the data table definition and tell the database to create // it. DataTableDef table_def = createDataTableDef(); database.createTable(table_def); // The initial grants for a table is to give the user who created it // full access. database.getGrantManager().addGrant( Privileges.TABLE_ALL_PRIVS, GrantManager.TABLE, tname.toString(), user.getUserName(), true, Database.INTERNAL_SECURE_USERNAME); // Set the constraints in the schema. setupAllConstraints(); // Return '0' if we created the table. (0 rows affected) return FunctionTable.resultTable(context, 0); } // Report error unless 'if not exists' command is in the statement. if (only_if_not_exists == false) { throw new DatabaseException("Table '" + tname + "' already exists."); } // Return '0' (0 rows affected). This happens when we don't create a // table (because it exists) and the 'IF NOT EXISTS' clause is present. return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/CreateTrigger.java000066400000000000000000000130611330501023400273240ustar00rootroot00000000000000/** * com.mckoi.database.interpret.CreateTrigger 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.util.IntegerVector; import java.util.ArrayList; import java.util.List; /** * A parsed state container for the 'CREATE TRIGGER' statement. * * @author Tobias Downer */ public class CreateTrigger extends Statement { // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { } public Table evaluate() throws DatabaseException { String trigger_name = (String) cmd.getObject("trigger_name"); String type = (String) cmd.getObject("type"); String table_name = (String) cmd.getObject("table_name"); List types = (List) cmd.getObject("trigger_types"); DatabaseQueryContext context = new DatabaseQueryContext(database); TableName tname = TableName.resolve(database.getCurrentSchema(), table_name); if (type.equals("callback_trigger")) { // Callback trigger - notifies the client when an event on a table // occurs. if (types.size() > 1) { throw new DatabaseException( "Multiple triggered types not allowed for callback triggers."); } String trig_type = ((String) types.get(0)).toUpperCase(); int int_type; if (trig_type.equals("INSERT")) { int_type = TriggerEvent.INSERT; } else if (trig_type.equals("DELETE")) { int_type = TriggerEvent.DELETE; } else if (trig_type.equals("UPDATE")) { int_type = TriggerEvent.UPDATE; } else { throw new DatabaseException("Unknown trigger type: " + trig_type); } database.createTrigger(trigger_name, tname.toString(), int_type); } else if (type.equals("procedure_trigger")) { // Get the procedure manager ProcedureManager proc_manager = database.getProcedureManager(); String before_after = (String) cmd.getObject("before_after"); String procedure_name = (String) cmd.getObject("procedure_name"); Expression[] procedure_args = (Expression[]) cmd.getObject("procedure_args"); // Convert the trigger into a table name, String schema_name = database.getCurrentSchema(); TableName t_name = TableName.resolve(schema_name, trigger_name); t_name = database.tryResolveCase(t_name); // Resolve the procedure name into a TableName object. TableName t_p_name = TableName.resolve(schema_name, procedure_name); t_p_name = database.tryResolveCase(t_p_name); // Does the procedure exist in the system schema? ProcedureName p_name = new ProcedureName(t_p_name); // Check the trigger name doesn't clash with any existing database object. if (database.tableExists(t_name)) { throw new DatabaseException("A database object with name '" + t_name + "' already exists."); } // Check the procedure exists. if (!proc_manager.procedureExists(p_name)) { throw new DatabaseException("Procedure '" + p_name + "' could not be found."); } // Resolve the listening type int listen_type = 0; if (before_after.equals("before")) { listen_type |= TableModificationEvent.BEFORE; } else if (before_after.equals("after")) { listen_type |= TableModificationEvent.AFTER; } else { throw new RuntimeException("Unknown before/after type."); } for (int i = 0; i < types.size(); ++i) { String trig_type = (String) types.get(i); if (trig_type.equals("insert")) { listen_type |= TableModificationEvent.INSERT; } else if (trig_type.equals("delete")) { listen_type |= TableModificationEvent.DELETE; } else if (trig_type.equals("update")) { listen_type |= TableModificationEvent.UPDATE; } } // Resolve the procedure arguments, TObject[] vals = new TObject[procedure_args.length]; for (int i = 0; i < procedure_args.length; ++i) { vals[i] = procedure_args[i].evaluate(null, null, context); } // Create the trigger, ConnectionTriggerManager manager = database.getConnectionTriggerManager(); manager.createTableTrigger(t_name.getSchema(), t_name.getName(), listen_type, tname, p_name.toString(), vals); // The initial grants for a trigger is to give the user who created it // full access. database.getGrantManager().addGrant( Privileges.PROCEDURE_ALL_PRIVS, GrantManager.TABLE, t_name.toString(), user.getUserName(), true, Database.INTERNAL_SECURE_USERNAME); } else { throw new RuntimeException("Unknown trigger type."); } // Return success return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Delete.java000066400000000000000000000112401330501023400257740ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Delete 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.*; import com.mckoi.database.*; /** * Logic for the DELETE FROM SQL statement. * * @author Tobias Downer */ public class Delete extends Statement { /** * The name the table that we are to delete from. */ String table_name; /** * If the delete statement has a 'where' clause, then this is set here. If * it has no 'where' clause then we apply to the entire table. */ SearchExpression where_condition; /** * The limit of the number of rows that are updated by this statement. A * limit of < 0 means there is no limit. */ int limit = -1; // ----- /** * The DataTable we are deleting from . */ private DataTable update_table; /** * The TableName object of the table being created. */ private TableName tname; /** * Tables that are relationally linked to the table being inserted into, set * after 'prepare'. This is used to determine the tables we need to read * lock because we need to validate relational constraints on the tables. */ private ArrayList relationally_linked_tables; /** * The plan for the set of records we are deleting in this query. */ private QueryPlanNode plan; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Get variables from the model. table_name = (String) cmd.getObject("table_name"); where_condition = (SearchExpression) cmd.getObject("where_clause"); limit = cmd.getInt("limit"); // --- // Resolve the TableName object. tname = resolveTableName(table_name, database); // Does the table exist? if (!database.tableExists(tname)) { throw new DatabaseException("Table '" + tname + "' does not exist."); } // Get the table we are updating update_table = database.getTable(tname); // Form a TableSelectExpression that represents the select on the table TableSelectExpression select_expression = new TableSelectExpression(); // Create the FROM clause select_expression.from_clause.addTable(table_name); // Set the WHERE clause select_expression.where_clause = where_condition; // Generate the TableExpressionFromSet hierarchy for the expression, TableExpressionFromSet from_set = Planner.generateFromSet(select_expression, database); // Form the plan plan = Planner.formQueryPlan(database, select_expression, from_set, null); // Resolve all tables linked to this TableName[] linked_tables = database.queryTablesRelationallyLinkedTo(tname); relationally_linked_tables = new ArrayList(linked_tables.length); for (int i = 0; i < linked_tables.length; ++i) { relationally_linked_tables.add(database.getTable(linked_tables[i])); } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Check that this user has privs to delete from the table. if (!database.getDatabase().canUserDeleteFromTableObject(context, user, tname)) { throw new UserAccessException( "User not permitted to delete from table: " + table_name); } // Check the user has select permissions on the tables in the plan. Select.checkUserSelectPermissions(context, user, plan); // Evaluates the delete statement... // Evaluate the plan to find the update set. Table delete_set = plan.evaluate(context); // Delete from the data table. int delete_count = update_table.delete(delete_set, limit); // Notify TriggerManager that we've just done an update. if (delete_count > 0) { database.notifyTriggerEvent(new TriggerEvent( TriggerEvent.DELETE, tname.toString(), delete_count)); } // Return the number of columns we deleted. return FunctionTable.resultTable(context, delete_count); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/DropTable.java000066400000000000000000000126571330501023400264630ustar00rootroot00000000000000/** * com.mckoi.database.interpret.DropTable 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * The logic of the 'DROP TABLE' SQL command. * * @author Tobias Downer */ public class DropTable extends Statement { /** * Only create if table doesn't exist. */ boolean only_if_exists = false; /** * The list of tables to drop. */ ArrayList drop_tables = new ArrayList(); // /** // * Adds the table name to the list of tables to drop. // */ // void addTable(String table) throws ParseException { // if (drop_tables.contains(table)) { // throw new ParseException("Duplicate table in drop"); // } // drop_tables.add(table); // } // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { only_if_exists = cmd.getBoolean("only_if_exists"); drop_tables = (ArrayList) cmd.getObject("table_list"); // Check there are no duplicate entries in the list of tables to drop for (int i = 0; i < drop_tables.size(); ++i) { Object check = drop_tables.get(i); for (int n = i + 1; n < drop_tables.size(); ++n) { if (drop_tables.get(n).equals(check)) { throw new DatabaseException("Duplicate table in drop: " + check); } } } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); int list_size = drop_tables.size(); ArrayList resolved_tables = new ArrayList(list_size); // Check the user has privs to delete these tables... for (int i = 0; i < list_size; ++i) { String table_name = drop_tables.get(i).toString(); TableName tname = resolveTableName(table_name, database); // Does the table exist? if (!only_if_exists && !database.tableExists(tname)) { throw new DatabaseException("Table '" + tname + "' does not exist."); } resolved_tables.add(tname); // Does the user have privs to drop this tables? if (!database.getDatabase().canUserDropTableObject(context, user, tname)) { throw new UserAccessException( "User not permitted to drop table: " + tname); } } // Check there are no referential links to any tables being dropped for (int i = 0; i < list_size; ++i) { TableName tname = (TableName) resolved_tables.get(i); // Any tables that have a referential link to this table. Transaction.ColumnGroupReference[] refs = database.queryTableImportedForeignKeyReferences(tname); for (int n = 0; n < refs.length; ++n) { // If the key table isn't being dropped then error if (!resolved_tables.contains(refs[n].key_table_name)) { throw new DatabaseConstraintViolationException( DatabaseConstraintViolationException.DROP_TABLE_VIOLATION, "Constraint violation (" + refs[n].name + ") dropping table " + tname + " because of referential link from " + refs[n].key_table_name); } } } // If the 'only if exists' flag is false, we need to check tables to drop // exist first. if (!only_if_exists) { // For each table to drop. for (int i = 0; i < list_size; ++i) { // Does the table already exist? // String table_name = drop_tables.get(i).toString(); //// TableName tname = //// TableName.resolve(database.getCurrentSchema(), table_name); // TableName tname = resolveTableName(table_name, database); TableName tname = (TableName) resolved_tables.get(i); // If table doesn't exist, throw an error if (!database.tableExists(tname)) { throw new DatabaseException("Can not drop table '" + tname + "'. It does not exist."); } } } // For each table to drop. int dropped_table_count = 0; GrantManager grant_manager = database.getGrantManager(); for (int i = 0; i < list_size; ++i) { // Does the table already exist? // String table_name = drop_tables.get(i).toString(); // TableName tname = resolveTableName(table_name, database); TableName tname = (TableName) resolved_tables.get(i); if (database.tableExists(tname)) { // Drop table in the transaction database.dropTable(tname); // Drop the grants for this object grant_manager.revokeAllGrantsOnObject( GrantManager.TABLE, tname.toString()); // Drop all constraints from the schema database.dropAllConstraintsForTable(tname); ++dropped_table_count; } } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/DropTrigger.java000066400000000000000000000042601330501023400270260ustar00rootroot00000000000000/** * com.mckoi.database.interpret.DropTrigger 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.util.IntegerVector; import java.util.ArrayList; import java.util.List; /** * A parsed state container for the 'DROP TRIGGER' statement. * * @author Tobias Downer */ public class DropTrigger extends Statement { /** * The name of this trigger. */ String trigger_name; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { trigger_name = (String) cmd.getObject("trigger_name"); } public Table evaluate() throws DatabaseException { String type = (String) cmd.getObject("type"); DatabaseQueryContext context = new DatabaseQueryContext(database); if (type.equals("callback_trigger")) { database.deleteTrigger(trigger_name); } else { // Convert the trigger into a table name, String schema_name = database.getCurrentSchema(); TableName t_name = TableName.resolve(schema_name, trigger_name); t_name = database.tryResolveCase(t_name); ConnectionTriggerManager manager = database.getConnectionTriggerManager(); manager.dropTrigger(t_name.getSchema(), t_name.getName()); // Drop the grants for this object database.getGrantManager().revokeAllGrantsOnObject( GrantManager.TABLE, t_name.toString()); } // Return '0' if we created the trigger. return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/FromClause.java000066400000000000000000000145651330501023400266470ustar00rootroot00000000000000/** * com.mckoi.database.interpret.FromClause 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.Set; import java.util.HashMap; import java.util.ArrayList; import java.util.Collection; /** * A container for the From clause of a select statement. This handles * the different types of joins. * * @author Tobias Downer */ public final class FromClause implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = 565726601314503609L; /** * The JoiningSet object that we have created to represent the joins in this * FROM clause. */ private JoiningSet join_set = new JoiningSet(); /** * A list of all FromTableDef objects in this clause in order of when they * were specified. */ private ArrayList def_list = new ArrayList(); /** * A list of all table names in this from clause. */ private ArrayList all_table_names = new ArrayList(); /** * An id used for making unique names for anonymous inner selects. */ private int table_key = 0; /** * Creates a new unique key string. */ private String createNewKey() { ++table_key; return Integer.toString(table_key); } private void addTableDef(String table_name, FromTableDef def) { if (table_name != null) { if (all_table_names.contains(table_name)) { throw new Error("Duplicate table name in FROM clause: " + table_name); } all_table_names.add(table_name); } // Create a new unique key for this table String key = createNewKey(); def.setUniqueKey(key); // Add the table key to the join set join_set.addTable(new TableName(key)); // Add to the alias def map def_list.add(def); } /** * Adds a table name to this FROM clause. Note that the given name * may be a dot deliminated ref such as (schema.table_name). */ public void addTable(String table_name) { addTableDef(table_name, new FromTableDef(table_name)); } /** * Adds a table name + alias to this FROM clause. */ public void addTable(String table_name, String table_alias) { addTableDef(table_alias, new FromTableDef(table_name, table_alias)); } /** * A generic form of a table declaration. If any parameters are 'null' it * means the information is not available. */ public void addTableDeclaration(String table_name, TableSelectExpression select, String table_alias) { // This is an inner select in the FROM clause if (table_name == null && select != null) { if (table_alias == null) { addTableDef(null, new FromTableDef(select)); } else { addTableDef(table_alias, new FromTableDef(select, table_alias)); } } // This is a standard table reference in the FROM clause else if (table_name != null && select == null) { if (table_alias == null) { addTable(table_name); } else { addTable(table_name, table_alias); } } // Error else { throw new Error("Unvalid declaration parameters."); } } /** * Adds a Join to the from clause. 'type' must be a join type as defined * in JoiningSet. */ public void addJoin(int type) { // System.out.println("Add Join: " + type); join_set.addJoin(type); } /** * Hack, add a joining type to the previous entry from the end. This is * an artifact of how joins are parsed. */ public void addPreviousJoin(int type, Expression on_expression) { join_set.addPreviousJoin(type, on_expression); } /** * Adds a Join to the from clause. 'type' must be a join type as defined * in JoiningSet, and expression represents the ON condition. */ public void addJoin(int type, Expression on_expression) { join_set.addJoin(type, on_expression); } /** * Returns the JoiningSet object for the FROM clause. */ public JoiningSet getJoinSet() { return join_set; } /** * Returns the type of join after table 'n' in the set of tables in the * from clause. Returns, JoiningSet.INNER_JOIN, JoiningSet.FULL_OUTER_JOIN, * etc. */ public int getJoinType(int n) { return getJoinSet().getJoinType(n); } /** * Returns the ON Expression for the type of join after table 'n' in the * set. */ public Expression getOnExpression(int n) { return getJoinSet().getOnExpression(n); } /** * Returns a Set of FromTableDef objects that represent all the tables * that are in this from clause. */ public Collection allTables() { return def_list; } // Implemented from StatementTreeObject public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { // Prepare expressions in the JoiningSet first int size = join_set.getTableCount() - 1; for (int i = 0; i < size; ++i) { Expression exp = join_set.getOnExpression(i); if (exp != null) { exp.prepare(preparer); } } // Prepare the StatementTree sub-queries in the from tables for (int i = 0; i < def_list.size(); ++i) { FromTableDef table_def = (FromTableDef) def_list.get(i); table_def.prepareExpressions(preparer); } } public Object clone() throws CloneNotSupportedException { FromClause v = (FromClause) super.clone(); v.join_set = (JoiningSet) join_set.clone(); ArrayList cloned_def_list = new ArrayList(def_list.size()); v.def_list = cloned_def_list; v.all_table_names = (ArrayList) all_table_names.clone(); for (int i = 0; i < def_list.size(); ++i) { FromTableDef table_def = (FromTableDef) def_list.get(i); cloned_def_list.add(table_def.clone()); } return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/FromTableDef.java000066400000000000000000000102711330501023400270670ustar00rootroot00000000000000/** * com.mckoi.database.interpret.FromTableDef 31 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.ExpressionPreparer; import com.mckoi.database.DatabaseException; import com.mckoi.database.StatementTree; /** * Describes a single table declaration in the from clause of a table * expression (SELECT). * * @author Tobias Downer */ public final class FromTableDef implements java.io.Serializable, Cloneable { static final long serialVersionUID = -606852454508224625L; /** * If this is true, then the table def represents a sub-query table. * The 'getSubSelectStatement' and 'getAlias' method can be used to * get the table information. *

* eg. FROM ( SELECT id, number FROM Part ) AS part_info, .... */ private boolean subquery_table; /** * The unique key name given to this table definition. */ private String unique_key; /** * The name of the table this definition references. */ private String table_name; /** * The alias of the table or null if no alias was defined. */ private String table_alias; /** * The TableSelectExpression if this is a subquery table. */ private TableSelectExpression subselect_table; /** * Constructs the table def. The constructs a table that is aliased under * a different name. */ public FromTableDef(String table_name, String table_alias) { this.table_name = table_name; this.table_alias = table_alias; subselect_table = null; subquery_table = false; } /** * A simple table definition (not aliased). */ public FromTableDef(String table_name) { this(table_name, null); } /** * A table that is a sub-query and given an aliased name. */ public FromTableDef(TableSelectExpression select, String table_alias) { this.subselect_table = select; this.table_name = table_alias; this.table_alias = table_alias; subquery_table = true; } /** * A simple sub-query table definition (not aliased). */ public FromTableDef(TableSelectExpression select) { this.subselect_table = select; this.table_name = null; this.table_alias = null; subquery_table = true; } /** * Sets the unique key. */ public void setUniqueKey(String unique_key) { this.unique_key = unique_key; } /** * Returns the name of the table. */ public String getName() { return table_name; } /** * Returns the alias for this table (or null if no alias given). */ public String getAlias() { return table_alias; } /** * Returns the unique key. */ public String getUniqueKey() { return unique_key; } /** * Returns true if this item in the FROM clause is a subquery table. */ public boolean isSubQueryTable() { return subquery_table; } /** * Returns the TableSelectExpression if this is a subquery table. */ public TableSelectExpression getTableSelectExpression() { return subselect_table; } /** * Prepares the expressions in this table def. */ public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { if (subselect_table != null) { subselect_table.prepareExpressions(preparer); } } /** * Clones the object (deep clone of immutable members). */ public Object clone() throws CloneNotSupportedException { FromTableDef v = (FromTableDef) super.clone(); if (subselect_table != null) { v.subselect_table = (TableSelectExpression) subselect_table.clone(); } return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/FromTableDirectSource.java000066400000000000000000000170341330501023400307700ustar00rootroot00000000000000/** * com.mckoi.database.sql.FromTableDirectSource 20 Jul 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.List; import java.util.Collections; /** * An implementation of FromTableInterface that wraps around an * TableName/AbstractDataTable object. The handles case insensitive * resolution. * * @author Tobias Downer */ public class FromTableDirectSource implements FromTableInterface { /** * The TableQueryDef object that links to the underlying table. */ private TableQueryDef table_query; /** * The DataTableDef object that describes the table. */ private DataTableDef data_table_def; /** * The unique name given to this source. */ private String unique_name; /** * The given TableName of this table. */ private TableName table_name; /** * The root name of the table. For example, if this table is 'Part P' the * root name is 'Part' and 'P' is the aliased name. */ private TableName root_name; /** * Set to true if this should do case insensitive resolutions. */ private boolean case_insensitive = false; /** * Constructs the source. */ public FromTableDirectSource(DatabaseConnection connection, TableQueryDef table_query, String unique_name, TableName given_name, TableName root_name) { this.unique_name = unique_name; this.data_table_def = table_query.getDataTableDef(); this.root_name = root_name; if (given_name != null) { this.table_name = given_name; } else { this.table_name = root_name; } // Is the database case insensitive? this.case_insensitive = connection.isInCaseInsensitiveMode(); this.table_query = table_query; } /** * Returns the given name of the table. For example, if the Part table is * aliased as P this returns P. If there is no given name, returns the * root table name. */ public TableName getGivenTableName() { return table_name; } /** * Returns the root name of the table. This TableName can always be used as * a direct reference to a table in the database. */ public TableName getRootTableName() { return root_name; } /** * Creates a QueryPlanNode to be added into a query tree that fetches the * table source. */ public QueryPlanNode createFetchQueryPlanNode() { return table_query.getQueryPlanNode(); } /** * Toggle the case sensitivity flag. */ public void setCaseInsensitive(boolean status) { case_insensitive = status; } private boolean stringCompare(String str1, String str2) { if (!case_insensitive) { return str1.equals(str2); } return str1.equalsIgnoreCase(str2); } // ---------- Implemented from FromTableInterface ---------- public String getUniqueName() { return unique_name; } public boolean matchesReference(String catalog, String schema, String table) { // System.out.println("Matches reference: " + schema + " " + table); // System.out.println(table_name.getName()); // Does this table name represent the correct schema? if (schema != null && !stringCompare(schema, table_name.getSchema())) { // If schema is present and we can't resolve to this schema then false return false; } if (table != null && !stringCompare(table, table_name.getName())) { // If table name is present and we can't resolve to this table name // then return false return false; } // System.out.println("MATCHED!"); // Match was successful, return true; } public int resolveColumnCount(String catalog, String schema, String table, String column) { // NOTE: With this type, we can only ever return either 1 or 0 because // it's impossible to have an ambiguous reference // NOTE: Currently 'catalog' is ignored. // Does this table name represent the correct schema? if (schema != null && !stringCompare(schema, table_name.getSchema())) { // If schema is present and we can't resolve to this schema then return 0 return 0; } if (table != null && !stringCompare(table, table_name.getName())) { // If table name is present and we can't resolve to this table name then // return 0 return 0; } if (column != null) { if (!case_insensitive) { // Can we resolve the column in this table? int i = data_table_def.fastFindColumnName(column); // If i doesn't equal -1 then we've found our column return i == -1 ? 0 : 1; } else { // Case insensitive search (this is slower than case sensitive). int resolve_count = 0; int col_count = data_table_def.columnCount(); for (int i = 0; i < col_count; ++i) { if (data_table_def.columnAt(i).getName().equalsIgnoreCase(column)) { ++resolve_count; } } return resolve_count; } } else { // if (column == null) // Return the column count return data_table_def.columnCount(); } } public Variable resolveColumn(String catalog, String schema, String table, String column) { // Does this table name represent the correct schema? if (schema != null && !stringCompare(schema, table_name.getSchema())) { // If schema is present and we can't resolve to this schema throw new Error("Incorrect schema."); } if (table != null && !stringCompare(table, table_name.getName())) { // If table name is present and we can't resolve to this table name throw new Error("Incorrect table."); } if (column != null) { if (!case_insensitive) { // Can we resolve the column in this table? int i = data_table_def.fastFindColumnName(column); if (i == -1) { throw new Error("Could not resolve '" + column + "'"); } return new Variable(table_name, column); } else { // Case insensitive search (this is slower than case sensitive). int col_count = data_table_def.columnCount(); for (int i = 0; i < col_count; ++i) { String col_name = data_table_def.columnAt(i).getName(); if (col_name.equalsIgnoreCase(column)) { return new Variable(table_name, col_name); } } throw new Error("Could not resolve '" + column + "'"); } } else { // if (column == null) // Return the first column in the table return new Variable(table_name, data_table_def.columnAt(0).getName()); } } public Variable[] allColumns() { int col_count = data_table_def.columnCount(); Variable[] vars = new Variable[col_count]; for (int i = 0; i < col_count; ++i) { vars[i] = new Variable(table_name, data_table_def.columnAt(i).getName()); } return vars; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/FromTableInterface.java000066400000000000000000000064071330501023400302770ustar00rootroot00000000000000/** * com.mckoi.database.sql.FromTableInterface 20 Jul 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; /** * A single table resource item in a query which handles the behaviour * of resolving references to columns as well as providing various base * utility methods for resolving general variable names. *

* Each instance of this interface represents a single 'FROM' resource. * * @author Tobias Downer */ public interface FromTableInterface { /** * Returns a unique name given to this table source. No other sources * will share this identifier string. */ String getUniqueName(); /** * Returns true if this source will match the given catalog, schema and * table. If any arguments are null then it is not included in the match. *

* Used for 'Part.*' type glob searches. */ boolean matchesReference(String catalog, String schema, String table); /** * Returns the number of instances we can resolve the given catalog, schema, * table and column name to a column or columns within this item. Note that * if catalog, schema, table or column is 'null' then it means it doesn't * matter. *

* For example, say we need to resolve the column 'id' the arguments are * null, null, null, "id". This may resolve to multiple columns if there is * a mixture of tables with "id" as a column. *

* Note that parameters of 'null, null, null, null', * 'null, null, null, not null', 'null, null, not null, not null', * 'null, not null, not null, not null', and * 'not null, not null, not null, not null' are only accepted. */ int resolveColumnCount(String catalog, String schema, String table, String column); /** * Returns a Variable that is a fully resolved form of the given column in * this table set. This method does not have to check whether the parameters * reference more than one column. If more than one column is referenced, * the actual column returned is implementation specific. */ Variable resolveColumn(String catalog, String schema, String table, String column); /** * Returns an array of Variable objects that references each column * available in this table set item in order from left column to * right column. */ Variable[] allColumns(); // /** // * Returns a Queriable object that can be evaluated to return a tangible // * Table object to use in a query. // *

// * Note that this method would generally only be used at the end of the // * lifespan of this instance. // */ // Queriable getQueriable(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/FromTableSubQuerySource.java000066400000000000000000000147231330501023400313370ustar00rootroot00000000000000/** * com.mckoi.database.sql.FromTableSubQuerySource 21 Jul 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; /** * An implementation of FromTableInterface that wraps around a * TableSelectExpression object as a sub-query source. * * @author Tobias Downer */ public class FromTableSubQuerySource implements FromTableInterface { /** * The wrapped object. */ private TableSelectExpression table_expression; /** * The fully prepared TableExpressionFromSet object that is used to * qualify variables in the table. */ private TableExpressionFromSet from_set; /** * The TableName that this source is generated to (aliased name). If null, * we inherit from the root set. */ private TableName end_table_name; /** * A unique name given to this source that is used to reference it in a * TableSet. */ private String unique_key; /** * The list of all variable names in the resultant source. */ private Variable[] vars; /** * Set to true if this should do case insensitive resolutions. */ private boolean case_insensitive = false; /** * Constructs the source. */ public FromTableSubQuerySource(DatabaseConnection connection, String unique_key, TableSelectExpression table_expression, TableExpressionFromSet from_set, TableName aliased_table_name) { this.unique_key = unique_key; this.table_expression = table_expression; this.from_set = from_set; this.end_table_name = aliased_table_name; // Is the database case insensitive? this.case_insensitive = connection.isInCaseInsensitiveMode(); } /** * Returns the TableSelectExpression for this sub-query. */ TableSelectExpression getTableExpression() { return table_expression; } /** * Returns the TableExpressionFromSet for this sub-query. */ TableExpressionFromSet getFromSet() { return from_set; } /** * Returns the aliased table name of this sub-query or null if it is left * as-is. */ TableName getAliasedName() { return end_table_name; } /** * Makes sure the 'vars' list is created correctly. */ private void ensureVarList() { if (vars == null) { vars = from_set.generateResolvedVariableList(); // for (int i = 0; i < vars.length; ++i) { // System.out.println("+ " + vars[i]); // } // System.out.println("0000"); // Are the variables aliased to a table name? if (end_table_name != null) { for (int i = 0; i < vars.length; ++i) { vars[i].setTableName(end_table_name); } } } } /** * Returns the unique name of this source. */ public String getUniqueKey() { return unique_key; } /** * Toggle the case sensitivity flag. */ public void setCaseInsensitive(boolean status) { case_insensitive = status; } private boolean stringCompare(String str1, String str2) { if (!case_insensitive) { return str1.equals(str2); } return str1.equalsIgnoreCase(str2); } /** * If the given Variable matches the reference then this method returns * true. */ private boolean matchesVar(Variable v, String catalog, String schema, String table, String column) { TableName tn = v.getTableName(); String cn = v.getName(); if (column == null) { return true; } if (!stringCompare(cn, column)) { return false; } if (table == null) { return true; } if (tn == null) { return false; } String tname = tn.getName(); if (tname != null && !stringCompare(tname, table)) { return false; } if (schema == null) { return true; } String sname = tn.getSchema(); if (sname != null && !stringCompare(sname, schema)) { return false; } // Currently we ignore catalog return true; } // ---------- Implemented from FromTableInterface ---------- public String getUniqueName() { return getUniqueKey(); } public boolean matchesReference(String catalog, String schema, String table) { if (schema == null && table == null) { return true; } if (end_table_name != null) { String ts = end_table_name.getSchema(); String tt = end_table_name.getName(); if (schema == null) { if (stringCompare(tt, table)) { return true; } } else { if (stringCompare(tt, table) && stringCompare(ts, schema)) { return true; } } } // No way to determine if there is a match return false; } public int resolveColumnCount(String catalog, String schema, String table, String column) { ensureVarList(); if (catalog == null && schema == null && table == null && column == null) { // Return the column count return vars.length; } int matched_count = 0; for (int i = 0; i < vars.length; ++i) { Variable v = vars[i]; if (matchesVar(v, catalog, schema, table, column)) { ++matched_count; } } return matched_count; } public Variable resolveColumn(String catalog, String schema, String table, String column) { ensureVarList(); // System.out.println("resolveColumn: " + catalog + ", " + schema + ", " + // table + ", " + column); for (int i = 0; i < vars.length; ++i) { Variable v = vars[i]; if (matchesVar(v, catalog, schema, table, column)) { // System.out.println("Result: " + v); return v; } } throw new Error("Couldn't resolve to a column."); } public Variable[] allColumns() { ensureVarList(); return vars; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Function.java000066400000000000000000000131271330501023400263650ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Function 30 Mar 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * A handler for defining and dropping functions. * * @author Tobias Downer */ public class Function extends Statement { /** * The type of command we are running through this Function object. */ private String type; /** * The name of the function. */ private TableName fun_name; // ----------- Implemented from Statement ---------- public void prepare() throws DatabaseException { type = (String) cmd.getObject("type"); String function_name = (String) cmd.getObject("function_name"); // Resolve the function name into a TableName object. String schema_name = database.getCurrentSchema(); fun_name = TableName.resolve(schema_name, function_name); fun_name = database.tryResolveCase(fun_name); } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Does the schema exist? boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(fun_name.getSchema(), ignore_case); if (schema == null) { throw new DatabaseException("Schema '" + fun_name.getSchema() + "' doesn't exist."); } else { fun_name = new TableName(schema.getName(), fun_name.getName()); } if (type.equals("create")) { // Does the user have privs to create this function? if (!database.getDatabase().canUserCreateProcedureObject(context, user, fun_name)) { throw new UserAccessException( "User not permitted to create function: " + fun_name); } // Does a table already exist with this name? if (database.tableExists(fun_name)) { throw new DatabaseException("Database object with name '" + fun_name + "' already exists."); } // Get the information about the function we are creating List arg_names = (List) cmd.getObject("arg_names"); List arg_types = (List) cmd.getObject("arg_types"); TObject loc_name = (TObject) cmd.getObject("location_name"); TType return_type = (TType) cmd.getObject("return_type"); // Note that we currently ignore the arg_names list. // Convert arg types to an array TType[] arg_type_array = (TType[]) arg_types.toArray(new TType[arg_types.size()]); // We must parse the location name into a class name, and method name String java_specification = loc_name.getObject().toString(); // Resolve the java_specification to an invokation method. java.lang.reflect.Method proc_method = ProcedureManager.javaProcedureMethod(java_specification, arg_type_array); if (proc_method == null) { throw new DatabaseException("Unable to find invokation method for " + "Java stored procedure name: " + java_specification); } // Convert the information into an easily digestible form. ProcedureName proc_name = new ProcedureName(fun_name); int sz = arg_types.size(); TType[] arg_list = new TType[sz]; for (int i = 0; i < sz; ++i) { arg_list[i] = (TType) arg_types.get(i); } // Create the (Java) function, ProcedureManager manager = database.getProcedureManager(); manager.defineJavaProcedure(proc_name, java_specification, return_type, arg_list, user.getUserName()); // The initial grants for a procedure is to give the user who created it // full access. database.getGrantManager().addGrant( Privileges.PROCEDURE_ALL_PRIVS, GrantManager.TABLE, proc_name.toString(), user.getUserName(), true, Database.INTERNAL_SECURE_USERNAME); } else if (type.equals("drop")) { // Does the user have privs to create this function? if (!database.getDatabase().canUserDropProcedureObject(context, user, fun_name)) { throw new UserAccessException( "User not permitted to drop function: " + fun_name); } // Drop the function ProcedureName proc_name = new ProcedureName(fun_name); ProcedureManager manager = database.getProcedureManager(); manager.deleteProcedure(proc_name); // Drop the grants for this object database.getGrantManager().revokeAllGrantsOnObject( GrantManager.TABLE, proc_name.toString()); } else { throw new RuntimeException("Unknown type: " + type); } // Return an update result table. return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Insert.java000066400000000000000000000244051330501023400260450ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Insert 13 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.*; import com.mckoi.database.*; import com.mckoi.util.IntegerVector; /** * The instance class that stores all the information about an insert * statement for processing. * * @author Tobias Downer */ public class Insert extends Statement { String table_name; ArrayList col_list; ArrayList values_list; //list contains List of elements to insert StatementTree select; ArrayList column_sets; boolean from_values = false; boolean from_select = false; boolean from_set = false; // ----- /** * The table we are inserting stuff to. */ private DataTable insert_table; /** * For 'from_values' and 'from_select', this is a list of indices into the * 'insert_table' for the columns that we are inserting data into. */ private int[] col_index_list; /** * The list of Variable objects the represent the list of columns being * inserted into in this query. */ private Variable[] col_var_list; /** * The TableName we are inserting into. */ private TableName tname; /** * If this is a 'from_select' insert, the prepared Select object. */ private Select prepared_select; /** * Tables that are relationally linked to the table being inserted into, set * after 'prepare'. This is used to determine the tables we need to read * lock because we need to validate relational constraints on the tables. */ private ArrayList relationally_linked_tables; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Prepare this object from the StatementTree table_name = (String) cmd.getObject("table_name"); col_list = (ArrayList) cmd.getObject("col_list"); values_list = (ArrayList) cmd.getObject("data_list"); select = (StatementTree) cmd.getObject("select"); column_sets = (ArrayList) cmd.getObject("assignments"); String type = (String) cmd.getObject("type"); from_values = type.equals("from_values"); from_select = type.equals("from_select"); from_set = type.equals("from_set"); // --- // Check 'values_list' contains all same size size insert element arrays. int first_len = -1; for (int n = 0; n < values_list.size(); ++n) { List exp_list = (List) values_list.get(n); if (first_len == -1 || first_len == exp_list.size()) { first_len = exp_list.size(); } else { throw new DatabaseException("The insert data list varies in size."); } } tname = resolveTableName(table_name, database); // Does the table exist? if (!database.tableExists(tname)) { throw new DatabaseException("Table '" + tname + "' does not exist."); } // Add the from table direct source for this table TableQueryDef table_query_def = database.getTableQueryDef(tname, null); addTable(new FromTableDirectSource(database, table_query_def, "INSERT_TABLE", tname, tname)); // Get the table we are inserting to insert_table = database.getTable(tname); // If column list is empty, then fill it with all columns from table. if (from_values || from_select) { // If 'col_list' is empty we must pick every entry from the insert // table. if (col_list.size() == 0) { for (int i = 0; i < insert_table.getColumnCount(); ++i) { col_list.add(insert_table.getColumnDefAt(i).getName()); } } // Resolve 'col_list' into a list of column indices into the insert // table. col_index_list = new int[col_list.size()]; col_var_list = new Variable[col_list.size()]; for (int i = 0; i < col_list.size(); ++i) { // Variable col = Variable.resolve(tname, (String) col_list.get(i)); Variable in_var = Variable.resolve((String) col_list.get(i)); Variable col = resolveColumn(in_var); int index = insert_table.fastFindFieldName(col); if (index == -1) { throw new DatabaseException("Can't find column: " + col); } col_index_list[i] = index; col_var_list[i] = col; } } // Make the 'from_values' clause into a 'from_set' if (from_values) { // If values to insert is different from columns list, if (col_list.size() != ((List) values_list.get(0)).size()) { throw new DatabaseException("Number of columns to insert is " + "different from columns selected to insert to."); } // Resolve all expressions in the added list. // For each value for (int i = 0; i < values_list.size(); ++i) { // Each value is a list of either expressions or "DEFAULT" List insert_elements = (List) values_list.get(i); int sz = insert_elements.size(); for (int n = 0; n < sz; ++n) { Object elem = insert_elements.get(n); if (elem instanceof Expression) { Expression exp = (Expression) elem; List elem_list = exp.allElements(); for (int p = 0; p < elem_list.size(); ++p) { Object ob = elem_list.get(p); if (ob instanceof Select) { throw new DatabaseException( "Illegal to have sub-select in expression."); } } // Resolve the expression. resolveExpression(exp); } } } } else if (from_select) { // Prepare the select statement prepared_select = new Select(); prepared_select.init(database, select, null); prepared_select.prepare(); } // If from a set, then resolve all values, else if (from_set) { // If there's a sub select in an expression in the 'SET' clause then // throw an error. for (int i = 0; i < column_sets.size(); ++i) { Assignment assignment = (Assignment) column_sets.get(i); Expression exp = assignment.getExpression(); List elem_list = exp.allElements(); for (int n = 0; n < elem_list.size(); ++n) { Object ob = elem_list.get(n); if (ob instanceof Select) { throw new DatabaseException( "Illegal to have sub-select in SET clause."); } } // Resolve the column names in the columns set. Variable v = assignment.getVariable(); Variable resolved_v = resolveVariableName(v); v.set(resolved_v); resolveExpression(assignment.getExpression()); } } // Resolve all tables linked to this TableName[] linked_tables = database.queryTablesRelationallyLinkedTo(tname); relationally_linked_tables = new ArrayList(linked_tables.length); for (int i = 0; i < linked_tables.length; ++i) { relationally_linked_tables.add(database.getTable(linked_tables[i])); } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Check that this user has privs to insert into the table. if (!database.getDatabase().canUserInsertIntoTableObject( context, user, tname, col_var_list)) { throw new UserAccessException( "User not permitted to insert in to table: " + table_name); } // Are we inserting from a select statement or from a 'set' assignment // list? int insert_count = 0; if (from_values) { // Set each row from the VALUES table, for (int i = 0; i < values_list.size(); ++i) { List insert_elements = (List) values_list.get(i); RowData row_data = insert_table.createRowDataObject(context); row_data.setupEntire(col_index_list, insert_elements, context); insert_table.add(row_data); ++insert_count; } } else if (from_select) { // Insert rows from the result select table. Table result = prepared_select.evaluate(); if (result.getColumnCount() != col_index_list.length) { throw new DatabaseException( "Number of columns in result don't match columns to insert."); } // Copy row list into an intermediate IntegerVector list. // (A RowEnumeration for a table being modified is undefined). IntegerVector row_list = new IntegerVector(); RowEnumeration en = result.rowEnumeration(); while (en.hasMoreRows()) { row_list.addInt(en.nextRowIndex()); } // For each row of the select table. int sz = row_list.size(); for (int i = 0; i < sz; ++i) { int rindex = row_list.intAt(i); RowData row_data = insert_table.createRowDataObject(context); for (int n = 0; n < col_index_list.length; ++n) { TObject cell = result.getCellContents(n, rindex); row_data.setColumnData(col_index_list[n], cell); } row_data.setDefaultForRest(context); insert_table.add(row_data); ++insert_count; } } else if (from_set) { // Insert rows from the set assignments. RowData row_data = insert_table.createRowDataObject(context); Assignment[] assignments = (Assignment[]) column_sets.toArray(new Assignment[column_sets.size()]); row_data.setupEntire(assignments, context); insert_table.add(row_data); ++insert_count; } // Notify TriggerManager that we've just done an update. if (insert_count > 0) { database.notifyTriggerEvent(new TriggerEvent( TriggerEvent.INSERT, tname.toString(), insert_count)); } // Return the number of rows we inserted. return FunctionTable.resultTable(context, insert_count); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Misc.java000066400000000000000000000041151330501023400254700ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Misc 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * Misc statements that I couldn't be bothered to roll a new Statement class * for. These have to be exceptional statements that do not read or write * to any tables and run in exclusive mode. * * @author Tobias Downer */ public class Misc extends Statement { /** * Set to true if this statement is a shutdown statement. */ boolean shutdown = false; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { Object command = cmd.getObject("command"); shutdown = command.equals("shutdown"); } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Is this a shutdown statement? if (shutdown == true) { // Check the user has privs to shutdown... if (!database.getDatabase().canUserShutDown(context, user)) { throw new UserAccessException( "User not permitted to shut down the database."); } // Shut down the database system. database.getDatabase().startShutDownThread(); // Return 0 to indicate we going to be closing shop! return FunctionTable.resultTable(context, 0); } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/NoOp.java000066400000000000000000000023751330501023400254560ustar00rootroot00000000000000/** * com.mckoi.database.interpret.NoOp 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * A no operation statement. * * @author Tobias Downer */ public class NoOp extends Statement { // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Nothing to prepare } public Table evaluate() throws DatabaseException { // No-op returns a result value of '0' return FunctionTable.resultTable(new DatabaseQueryContext(database), 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Planner.java000066400000000000000000003051611330501023400262010ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Planner 12 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.Collections; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import com.mckoi.database.*; import com.mckoi.util.BigNumber; import java.util.Random; /** * Various methods for forming query plans on SQL queries. * * @author Tobias Downer */ public class Planner { /** * The name of the GROUP BY function table. */ private static TableName GROUP_BY_FUNCTION_TABLE = new TableName( "FUNCTIONTABLE"); /** * Used to generate unique marker names. */ private static Random marker_randomizer = new Random(); /** * Returns a randomly generated outer join name. */ private static String createRandomeOuterJoinName() { long v1 = marker_randomizer.nextLong(); long v2 = marker_randomizer.nextLong(); return "OUTER_JOIN_" + Long.toHexString(v1) + ":" + Long.toHexString(v2); } /** * Prepares the given SearchExpression object. This goes through each * element of the Expression. If the element is a variable it is qualified. * If the element is a TableSelectExpression it's converted to a * SelectQueriable object and prepared. */ private static void prepareSearchExpression( final DatabaseConnection db, final TableExpressionFromSet from_set, SearchExpression expression) throws DatabaseException { // This is used to prepare sub-queries and qualify variables in a // search expression such as WHERE or HAVING. // Prepare the sub-queries first expression.prepare(new ExpressionPreparer() { public boolean canPrepare(Object element) { return element instanceof TableSelectExpression; } public Object prepare(Object element) throws DatabaseException { TableSelectExpression sq_expr = (TableSelectExpression) element; TableExpressionFromSet sq_from_set = generateFromSet(sq_expr, db); sq_from_set.setParent(from_set); QueryPlanNode sq_plan = formQueryPlan(db, sq_expr, sq_from_set, null); // Form this into a query plan type return new TObject(TType.QUERY_PLAN_TYPE, new QueryPlan.CachePointNode(sq_plan)); } }); // Then qualify all the variables. Note that this will not qualify // variables in the sub-queries. expression.prepare(from_set.expressionQualifier()); } /** * Given a HAVING clause expression, this will generate a new HAVING clause * expression with all aggregate expressions put into the given extra * function list. */ private static Expression filterHavingClause(Expression having_expr, ArrayList aggregate_list, QueryContext context) { if (having_expr.size() > 1) { Operator op = (Operator) having_expr.last(); // If logical, split and filter the left and right expressions Expression[] exps = having_expr.split(); Expression new_left = filterHavingClause(exps[0], aggregate_list, context); Expression new_right = filterHavingClause(exps[1], aggregate_list, context); Expression expr = new Expression(new_left, op, new_right); return expr; } else { // Not logical so determine if the expression is an aggregate or not if (having_expr.hasAggregateFunction(context)) { // Has aggregate functions so we must put this expression on the // aggregate list. aggregate_list.add(having_expr); // And substitute it with a variable reference. Variable v = Variable.resolve("FUNCTIONTABLE.HAVINGAG_" + aggregate_list.size()); return new Expression(v); } else { // No aggregate functions so leave it as is. return having_expr; } } } /** * Given a TableExpression, generates a TableExpressionFromSet object. This * object is used to help qualify variable references. This */ static TableExpressionFromSet generateFromSet( TableSelectExpression select_expression, DatabaseConnection db) { // Get the 'from_clause' from the table expression FromClause from_clause = select_expression.from_clause; // Prepares the from_clause joining set. from_clause.getJoinSet().prepare(db); // Create a TableExpressionFromSet for this table expression TableExpressionFromSet from_set = new TableExpressionFromSet(db); // Add all tables from the 'from_clause' Iterator tables = from_clause.allTables().iterator(); while (tables.hasNext()) { FromTableDef ftdef = (FromTableDef) tables.next(); String unique_key = ftdef.getUniqueKey(); String alias = ftdef.getAlias(); // If this is a sub-query table, if (ftdef.isSubQueryTable()) { // eg. FROM ( SELECT id FROM Part ) TableSelectExpression sub_query = ftdef.getTableSelectExpression(); TableExpressionFromSet sub_query_from_set = generateFromSet(sub_query, db); // The aliased name of the table TableName alias_table_name = null; if (alias != null) { alias_table_name = new TableName(alias); } FromTableSubQuerySource source = new FromTableSubQuerySource(db, unique_key, sub_query, sub_query_from_set, alias_table_name); // Add to list of subquery tables to add to query, from_set.addTable(source); } // Else must be a standard query table, else { String name = ftdef.getName(); // Resolve to full table name TableName table_name = db.resolveTableName(name); if (!db.tableExists(table_name)) { throw new StatementException( "Table '" + table_name + "' was not found."); } TableName given_name = null; if (alias != null) { given_name = new TableName(alias); } // Get the TableQueryDef object for this table name (aliased). TableQueryDef table_query_def = db.getTableQueryDef(table_name, given_name); FromTableDirectSource source = new FromTableDirectSource(db, table_query_def, unique_key, given_name, table_name); from_set.addTable(source); } } // while (tables.hasNext()) // Set up functions, aliases and exposed variables for this from set, // The list of columns being selected (SelectColumn). ArrayList columns = select_expression.columns; // For each column being selected for (int i = 0; i < columns.size(); ++i) { SelectColumn col = (SelectColumn) columns.get(i); // Is this a glob? (eg. Part.* ) if (col.glob_name != null) { // Find the columns globbed and add to the 's_col_list' result. if (col.glob_name.equals("*")) { from_set.exposeAllColumns(); } else { // Otherwise the glob must be of the form '[table name].*' String tname = col.glob_name.substring(0, col.glob_name.indexOf(".*")); TableName tn = TableName.resolve(tname); from_set.exposeAllColumnsFromSource(tn); } } else { // Otherwise must be a standard column reference. Note that at this // time we aren't sure if a column expression is correlated and is // referencing an outer source. This means we can't verify if the // column expression is valid or not at this point. // If this column is aliased, add it as a function reference to the // TableExpressionFromSet. String alias = col.alias; Variable v = col.expression.getVariable(); boolean alias_match_v = (v != null && alias != null && from_set.stringCompare(v.getName(), alias)); if (alias != null && !alias_match_v) { from_set.addFunctionRef(alias, col.expression); from_set.exposeVariable(new Variable(alias)); } else if (v != null) { Variable resolved = from_set.resolveReference(v); if (resolved == null) { from_set.exposeVariable(v); } else { from_set.exposeVariable(resolved); } } else { String fun_name = col.expression.text().toString(); from_set.addFunctionRef(fun_name, col.expression); from_set.exposeVariable(new Variable(fun_name)); } } } // for each column selected return from_set; } /** * Forms a query plan (QueryPlanNode) from the given TableSelectExpression * and TableExpressionFromSet. The TableSelectExpression describes the * SELECT query (or sub-query), and the TableExpressionFromSet is used to * resolve expression references. *

* The 'order_by' argument is a list of ByColumn objects that represent * an optional ORDER BY clause. If this is null or the list is empty, no * ordering is done. */ public static QueryPlanNode formQueryPlan(DatabaseConnection db, TableSelectExpression expression, TableExpressionFromSet from_set, ArrayList order_by) throws DatabaseException { QueryContext context = new DatabaseQueryContext(db); // ----- Resolve the SELECT list // What we are selecting QuerySelectColumnSet column_set = new QuerySelectColumnSet(from_set); // The list of columns being selected (SelectColumn). ArrayList columns = expression.columns; // If there are 0 columns selected, then we assume the result should // show all of the columns in the result. boolean do_subset_column = (columns.size() != 0); // For each column being selected for (int i = 0; i < columns.size(); ++i) { SelectColumn col = (SelectColumn) columns.get(i); // Is this a glob? (eg. Part.* ) if (col.glob_name != null) { // Find the columns globbed and add to the 's_col_list' result. if (col.glob_name.equals("*")) { column_set.selectAllColumnsFromAllSources(); } else { // Otherwise the glob must be of the form '[table name].*' String tname = col.glob_name.substring(0, col.glob_name.indexOf(".*")); TableName tn = TableName.resolve(tname); column_set.selectAllColumnsFromSource(tn); } } else { // Otherwise must be a standard column reference. column_set.selectSingleColumn(col); } } // for each column selected // Prepare the column_set, column_set.prepare(context); // ----- // Resolve any numerical references in the ORDER BY list (eg. // '1' will be a reference to column 1. if (order_by != null) { ArrayList prepared_col_set = column_set.s_col_list; for (int i = 0; i < order_by.size(); ++i) { ByColumn col = (ByColumn) order_by.get(i); Expression exp = col.exp; if (exp.size() == 1) { Object last_elem = exp.last(); if (last_elem instanceof TObject) { BigNumber bnum = ((TObject) last_elem).toBigNumber(); if (bnum.getScale() == 0) { int col_ref = bnum.intValue() - 1; if (col_ref >= 0 && col_ref < prepared_col_set.size()) { SelectColumn scol = (SelectColumn) prepared_col_set.get(col_ref); col.exp = new Expression(scol.expression); } } } } } } // ----- // Set up plans for each table in the from clause of the query. For // sub-queries, we recurse. QueryTableSetPlanner table_planner = new QueryTableSetPlanner(); for (int i = 0; i < from_set.setCount(); ++i) { FromTableInterface table = from_set.getTable(i); if (table instanceof FromTableSubQuerySource) { // This represents a sub-query in the FROM clause FromTableSubQuerySource sq_table = (FromTableSubQuerySource) table; TableSelectExpression sq_expr = sq_table.getTableExpression(); TableExpressionFromSet sq_from_set = sq_table.getFromSet(); // Form a plan for evaluating the sub-query FROM QueryPlanNode sq_plan = formQueryPlan(db, sq_expr, sq_from_set, null); // The top should always be a SubsetNode, if (sq_plan instanceof QueryPlan.SubsetNode) { QueryPlan.SubsetNode subset_node = (QueryPlan.SubsetNode) sq_plan; subset_node.setGivenName(sq_table.getAliasedName()); } else { throw new RuntimeException("Top plan is not a SubsetNode!"); } table_planner.addTableSource(sq_plan, sq_table); } else if (table instanceof FromTableDirectSource) { // This represents a direct referencable table in the FROM clause FromTableDirectSource ds_table = (FromTableDirectSource) table; TableName given_name = ds_table.getGivenTableName(); TableName root_name = ds_table.getRootTableName(); String aliased_name = null; if (!root_name.equals(given_name)) { aliased_name = given_name.getName(); } QueryPlanNode ds_plan = ds_table.createFetchQueryPlanNode(); table_planner.addTableSource(ds_plan, ds_table); } else { throw new RuntimeException( "Unknown table source instance: " + table.getClass()); } } // ----- // The WHERE and HAVING clauses SearchExpression where_clause = expression.where_clause; SearchExpression having_clause = expression.having_clause; // Look at the join set and resolve the ON Expression to this statement JoiningSet join_set = expression.from_clause.getJoinSet(); // Perform a quick scan and see if there are any outer joins in the // expression. boolean all_inner_joins = true; for (int i = 0; i < join_set.getTableCount() - 1; ++i) { int type = join_set.getJoinType(i); if (type != JoiningSet.INNER_JOIN) { all_inner_joins = false; } } // Prepare the joins for (int i = 0; i < join_set.getTableCount() - 1; ++i) { int type = join_set.getJoinType(i); Expression on_expression = join_set.getOnExpression(i); if (all_inner_joins) { // If the whole join set is inner joins then simply move the on // expression (if there is one) to the WHERE clause. if (on_expression != null) { where_clause.appendExpression(on_expression); } } else { // Not all inner joins, if (type == JoiningSet.INNER_JOIN && on_expression == null) { // Regular join with no ON expression, so no preparation necessary } else { // Either an inner join with an ON expression, or an outer join with // ON expression if (on_expression == null) { throw new RuntimeException("No ON expression in join."); } // Resolve the on_expression on_expression.prepare(from_set.expressionQualifier()); // And set it in the planner table_planner.setJoinInfoBetweenSources(i, type, on_expression); } } } // Prepare the WHERE and HAVING clause, qualifies all variables and // prepares sub-queries. prepareSearchExpression(db, from_set, where_clause); prepareSearchExpression(db, from_set, having_clause); // Any extra AGGREGATE functions that are part of the HAVING clause that // we need to add. This is a list of a name followed by the expression // that contains the aggregate function. ArrayList extra_aggregate_functions = new ArrayList(); Expression new_having_clause = null; if (having_clause.getFromExpression() != null) { new_having_clause = filterHavingClause(having_clause.getFromExpression(), extra_aggregate_functions, context); having_clause.setFromExpression(new_having_clause); } // Any GROUP BY functions, ArrayList group_by_functions = new ArrayList(); // Resolve the GROUP BY variable list references in this from set ArrayList group_list_in = expression.group_by; int gsz = group_list_in.size(); Variable[] group_by_list = new Variable[gsz]; for (int i = 0; i < gsz; ++i) { ByColumn by_column = (ByColumn) group_list_in.get(i); Expression exp = by_column.exp; // Prepare the group by expression exp.prepare(from_set.expressionQualifier()); // Is the group by variable a complex expression? Variable v = exp.getVariable(); Expression group_by_expression; if (v == null) { group_by_expression = exp; } else { // Can we dereference the variable to an expression in the SELECT? group_by_expression = from_set.dereferenceAssignment(v); } if (group_by_expression != null) { if (group_by_expression.hasAggregateFunction(context)) { throw new StatementException("Aggregate expression '" + group_by_expression.text().toString() + "' is not allowed in GROUP BY clause."); } // Complex expression so add this to the function list. int group_by_fun_num = group_by_functions.size(); group_by_functions.add(group_by_expression); v = new Variable(GROUP_BY_FUNCTION_TABLE, "#GROUPBY-" + group_by_fun_num); } group_by_list[i] = v; } // Resolve GROUP MAX variable to a reference in this from set Variable groupmax_column = expression.group_max; if (groupmax_column != null) { Variable v = from_set.resolveReference(groupmax_column); if (v == null) { throw new StatementException("Could find GROUP MAX reference '" + groupmax_column + "'"); } groupmax_column = v; } // ----- // Now all the variables should be resolved and correlated variables set // up as appropriate. // If nothing in the FROM clause then simply evaluate the result of the // select if (from_set.setCount() == 0) { if (column_set.aggregate_count > 0) { throw new StatementException( "Invalid use of aggregate function in select with no FROM clause"); } // Make up the lists ArrayList s_col_list = column_set.s_col_list; int sz = s_col_list.size(); String[] col_names = new String[sz]; Expression[] exp_list = new Expression[sz]; Variable[] subset_vars = new Variable[sz]; Variable[] aliases = new Variable[sz]; for (int i = 0; i < sz; ++i) { SelectColumn scol = (SelectColumn) s_col_list.get(i); exp_list[i] = scol.expression; col_names[i] = scol.internal_name.getName(); subset_vars[i] = new Variable(scol.internal_name); aliases[i] = new Variable(scol.resolved_name); } return new QueryPlan.SubsetNode( new QueryPlan.CreateFunctionsNode( new QueryPlan.SingleRowTableNode(), exp_list, col_names), subset_vars, aliases); } // Plan the where clause. The returned node is the plan to evaluate the // WHERE clause. QueryPlanNode node = table_planner.planSearchExpression(expression.where_clause); // Make up the functions list, ArrayList functions_list = column_set.function_col_list; int fsz = functions_list.size(); ArrayList complete_fun_list = new ArrayList(); for (int i = 0; i < fsz; ++i) { SelectColumn scol = (SelectColumn) functions_list.get(i); complete_fun_list.add(scol.expression); complete_fun_list.add(scol.internal_name.getName()); } for (int i = 0; i < extra_aggregate_functions.size(); ++i) { complete_fun_list.add(extra_aggregate_functions.get(i)); complete_fun_list.add("HAVINGAG_" + (i + 1)); } int fsz2 = complete_fun_list.size() / 2; Expression[] def_fun_list = new Expression[fsz2]; String[] def_fun_names = new String[fsz2]; for (int i = 0; i < fsz2; ++i) { def_fun_list[i] = (Expression) complete_fun_list.get(i * 2); def_fun_names[i] = (String) complete_fun_list.get((i * 2) + 1); } // If there is more than 1 aggregate function or there is a group by // clause, then we must add a grouping plan. if (column_set.aggregate_count > 0 || gsz > 0) { // If there is no GROUP BY clause then assume the entire result is the // group. if (gsz == 0) { node = new QueryPlan.GroupNode(node, groupmax_column, def_fun_list, def_fun_names); } else { // Do we have any group by functions that need to be planned first? int gfsz = group_by_functions.size(); if (gfsz > 0) { Expression[] group_fun_list = new Expression[gfsz]; String[] group_fun_name = new String[gfsz]; for (int i = 0; i < gfsz; ++i) { group_fun_list[i] = (Expression) group_by_functions.get(i); group_fun_name[i] = "#GROUPBY-" + i; } node = new QueryPlan.CreateFunctionsNode(node, group_fun_list, group_fun_name); } // Otherwise we provide the 'group_by_list' argument node = new QueryPlan.GroupNode(node, group_by_list, groupmax_column, def_fun_list, def_fun_names); } } else { // Otherwise no grouping is occuring. We simply need create a function // node with any functions defined in the SELECT. // Plan a FunctionsNode with the functions defined in the SELECT. if (fsz > 0) { node = new QueryPlan.CreateFunctionsNode(node, def_fun_list, def_fun_names); } } // The result column list ArrayList s_col_list = column_set.s_col_list; int sz = s_col_list.size(); // Evaluate the having clause if necessary if (expression.having_clause.getFromExpression() != null) { // Before we evaluate the having expression we must substitute all the // aliased variables. Expression having_expr = having_clause.getFromExpression(); substituteAliasedVariables(having_expr, s_col_list); PlanTableSource source = table_planner.getSingleTableSource(); source.updatePlan(node); node = table_planner.planSearchExpression(having_clause); } // Do we have a composite select expression to process? QueryPlanNode right_composite = null; if (expression.next_composite != null) { TableSelectExpression composite_expr = expression.next_composite; // Generate the TableExpressionFromSet hierarchy for the expression, TableExpressionFromSet composite_from_set = generateFromSet(composite_expr, db); // Form the right plan right_composite = formQueryPlan(db, composite_expr, composite_from_set, null); } // Do we do a final subset column? Variable[] aliases = null; if (do_subset_column) { // Make up the lists Variable[] subset_vars = new Variable[sz]; aliases = new Variable[sz]; for (int i = 0; i < sz; ++i) { SelectColumn scol = (SelectColumn) s_col_list.get(i); subset_vars[i] = new Variable(scol.internal_name); aliases[i] = new Variable(scol.resolved_name); } // If we are distinct then add the DistinctNode here if (expression.distinct) { node = new QueryPlan.DistinctNode(node, subset_vars); } // Process the ORDER BY? // Note that the ORDER BY has to occur before the subset call, but // after the distinct because distinct can affect the ordering of the // result. if (right_composite == null && order_by != null) { node = planForOrderBy(node, order_by, from_set, s_col_list); } // Rename the columns as specified in the SELECT node = new QueryPlan.SubsetNode(node, subset_vars, aliases); } else { // Process the ORDER BY? if (right_composite == null && order_by != null) { node = planForOrderBy(node, order_by, from_set, s_col_list); } } // Do we have a composite to merge in? if (right_composite != null) { // For the composite node = new QueryPlan.CompositeNode(node, right_composite, expression.composite_function, expression.is_composite_all); // Final order by? if (order_by != null) { node = planForOrderBy(node, order_by, from_set, s_col_list); } // Ensure a final subset node if (!(node instanceof QueryPlan.SubsetNode) && aliases != null) { node = new QueryPlan.SubsetNode(node, aliases, aliases); } } return node; } /** * Plans an ORDER BY set. This is given its own function because we may * want to plan this at the end of a number of composite functions. *

* NOTE: s_col_list is optional. */ public static QueryPlanNode planForOrderBy(QueryPlanNode plan, ArrayList order_by, TableExpressionFromSet from_set, ArrayList s_col_list) throws DatabaseException { TableName FUNCTION_TABLE = new TableName("FUNCTIONTABLE"); // Sort on the ORDER BY clause if (order_by.size() > 0) { int sz = order_by.size(); Variable[] order_list = new Variable[sz]; boolean[] ascending_list = new boolean[sz]; ArrayList function_orders = new ArrayList(); for (int i = 0; i < sz; ++i) { ByColumn column = (ByColumn) order_by.get(i); Expression exp = column.exp; ascending_list[i] = column.ascending; Variable v = exp.getVariable(); if (v != null) { Variable new_v = from_set.resolveReference(v); if (new_v == null) { throw new StatementException( "Can not resolve ORDER BY variable: " + v); } substituteAliasedVariable(new_v, s_col_list); order_list[i] = new_v; } else { // Otherwise we must be ordering by an expression such as // '0 - a'. // Resolve the expression, exp.prepare(from_set.expressionQualifier()); // Make sure we substitute any aliased columns in the order by // columns. substituteAliasedVariables(exp, s_col_list); // The new ordering functions are called 'FUNCTIONTABLE.#ORDER-n' // where n is the number of the ordering expression. order_list[i] = new Variable(FUNCTION_TABLE, "#ORDER-" + function_orders.size()); function_orders.add(exp); } // System.out.println(exp); } // If there are functional orderings, // For this we must define a new FunctionTable with the expressions, // then order by those columns, and then use another SubsetNode // query node. int fsz = function_orders.size(); if (fsz > 0) { Expression[] funs = new Expression[fsz]; String[] fnames = new String[fsz]; for (int n = 0; n < fsz; ++n) { funs[n] = (Expression) function_orders.get(n); fnames[n] = "#ORDER-" + n; } if (plan instanceof QueryPlan.SubsetNode) { // If the top plan is a QueryPlan.SubsetNode then we use the // information from it to create a new SubsetNode that // doesn't include the functional orders we have attached here. QueryPlan.SubsetNode top_subset_node = (QueryPlan.SubsetNode) plan; Variable[] mapped_names = top_subset_node.getNewColumnNames(); // Defines the sort functions plan = new QueryPlan.CreateFunctionsNode(plan, funs, fnames); // Then plan the sort plan = new QueryPlan.SortNode(plan, order_list, ascending_list); // Then plan the subset plan = new QueryPlan.SubsetNode(plan, mapped_names, mapped_names); } else { // Defines the sort functions plan = new QueryPlan.CreateFunctionsNode(plan, funs, fnames); // Plan the sort plan = new QueryPlan.SortNode(plan, order_list, ascending_list); } } else { // No functional orders so we only need to sort by the columns // defined. plan = new QueryPlan.SortNode(plan, order_list, ascending_list); } } return plan; } /** * Substitutes any aliased variables in the given expression with the * function name equivalent. For example, if we have a 'SELECT 3 + 4 Bah' * then resolving on variable Bah will be subsituted to the function column * that represents the result of 3 + 4. */ private static void substituteAliasedVariables( Expression expression, ArrayList s_col_list) { List all_vars = expression.allVariables(); for (int i = 0; i < all_vars.size(); ++i) { Variable v = (Variable) all_vars.get(i); substituteAliasedVariable(v, s_col_list); } } private static void substituteAliasedVariable(Variable v, ArrayList s_col_list) { if (s_col_list != null) { int sz = s_col_list.size(); for (int n = 0; n < sz; ++n) { SelectColumn scol = (SelectColumn) s_col_list.get(n); if (v.equals(scol.resolved_name)) { v.set(scol.internal_name); } } } } // ---------- Inner classes ---------- /** * A container object for the set of SelectColumn objects selected in a * query. */ private static class QuerySelectColumnSet { /** * The name of the table where functions are defined. */ private static TableName FUNCTION_TABLE_NAME = new TableName("FUNCTIONTABLE"); /** * The tables we are selecting from. */ private TableExpressionFromSet from_set; /** * The list of SelectColumn. */ ArrayList s_col_list; /** * The list of functions in this column set. */ ArrayList function_col_list; /** * The current number of 'FUNCTIONTABLE.' columns in the table. This is * incremented for each custom column. */ private int running_fun_number = 0; /** * The count of aggregate and constant columns included in the result set. * Aggregate columns are, (count(*), avg(cost_of) * 0.75, etc). Constant * columns are, (9 * 4, 2, (9 * 7 / 4) + 4, etc). */ int aggregate_count = 0, constant_count = 0; /** * Constructor. */ public QuerySelectColumnSet(TableExpressionFromSet from_set) { this.from_set = from_set; s_col_list = new ArrayList(); function_col_list = new ArrayList(); } /** * Adds a single SelectColumn to the list of output columns from the * query. *

* Note that at this point the the information in the given SelectColumn * may not be completely qualified. */ void selectSingleColumn(SelectColumn col) { s_col_list.add(col); } /** * Adds all columns from the given FromTableInterface object. */ void addAllFromTable(FromTableInterface table) { // Select all the tables Variable[] vars = table.allColumns(); int s_col_list_max = s_col_list.size(); for (int i = 0; i < vars.length; ++i) { // The Variable Variable v = vars[i]; // Make up the SelectColumn SelectColumn ncol = new SelectColumn(); Expression e = new Expression(v); e.text().append(v.toString()); ncol.alias = null; ncol.expression = e; ncol.resolved_name = v; ncol.internal_name = v; // Add to the list of columns selected selectSingleColumn(ncol); } } /** * Adds all column from the given table object. This is used to set up the * columns that are to be viewed as the result of the select statement. */ void selectAllColumnsFromSource(TableName table_name) { // Attempt to find the table in the from set. FromTableInterface table = from_set.findTable( table_name.getSchema(), table_name.getName()); if (table == null) { throw new StatementException(table_name.toString() + ".* is not a valid reference."); } addAllFromTable(table); } /** * Sets up this queriable with all columns from all table sources. */ void selectAllColumnsFromAllSources() { for (int p = 0; p < from_set.setCount(); ++p) { FromTableInterface table = from_set.getTable(p); addAllFromTable(table); } } /** * Adds a new hidden function into the column set. This is intended * to be used for implied functions. For example, a query may have a * function in a GROUP BY clause. It's desirable to include the function * in the column set but not in the final result. *

* Returns an absolute Variable object that can be used to reference * this hidden column. */ Variable addHiddenFunction(String fun_alias, Expression function, QueryContext context) { SelectColumn scol = new SelectColumn(); scol.resolved_name = new Variable(fun_alias); scol.alias = fun_alias; scol.expression = function; scol.internal_name = new Variable(FUNCTION_TABLE_NAME, fun_alias); // If this is an aggregate column then add to aggregate count. if (function.hasAggregateFunction(context)) { ++aggregate_count; } // If this is a constant column then add to constant cound. else if (function.isConstant()) { ++constant_count; } function_col_list.add(scol); return scol.internal_name; } /** * Prepares the given SelectColumn by fully qualifying the expression and * allocating it correctly within this context. */ private void prepareSelectColumn(SelectColumn col, QueryContext context) throws DatabaseException { // Check to see if we have any Select statements in the // Expression. This is necessary, because we can't have a // sub-select evaluated during list table downloading. List exp_elements = col.expression.allElements(); for (int n = 0; n < exp_elements.size(); ++n) { if (exp_elements.get(n) instanceof TableSelectExpression) { throw new StatementException( "Sub-query not allowed in column list."); } } // First fully qualify the select expression col.expression.prepare(from_set.expressionQualifier()); // If the expression isn't a simple variable, then add to // function list. Variable v = col.expression.getVariable(); if (v == null) { // This means we have a complex expression. ++running_fun_number; String agg_str = Integer.toString(running_fun_number); // If this is an aggregate column then add to aggregate count. if (col.expression.hasAggregateFunction(context)) { ++aggregate_count; // Add '_A' code to end of internal name of column to signify this is // an aggregate column agg_str += "_A"; } // If this is a constant column then add to constant cound. else if (col.expression.isConstant()) { ++constant_count; } else { // Must be an expression with variable's embedded ( eg. // (part_id + 3) * 4, (id * value_of_part), etc ) } function_col_list.add(col); col.internal_name = new Variable(FUNCTION_TABLE_NAME, agg_str); if (col.alias == null) { col.alias = new String(col.expression.text()); } col.resolved_name = new Variable(col.alias); } else { // Not a complex expression col.internal_name = v; if (col.alias == null) { col.resolved_name = v; } else { col.resolved_name = new Variable(col.alias); } } } /** * Resolves all variable objects in each column. */ void prepare(QueryContext context) throws DatabaseException { // Prepare each of the columns selected. // NOTE: A side-effect of this is that it qualifies all the Expressions // that are functions in TableExpressionFromSet. After this section, // we can dereference variables for their function Expression. for (int i = 0; i < s_col_list.size(); ++i) { SelectColumn column = (SelectColumn) s_col_list.get(i); prepareSelectColumn(column, context); } } } /** * A table set planner that maintains a list of table dependence lists and * progressively constructs a plan tree from the bottom up. */ private static class QueryTableSetPlanner { /** * The list of PlanTableSource objects for each source being planned. */ private ArrayList table_list; /** * If a join has occurred since the planner was constructed or copied then * this is set to true. */ private boolean has_join_occurred; /** * Constructor. */ public QueryTableSetPlanner() { this.table_list = new ArrayList(); has_join_occurred = false; } /** * Add a PlanTableSource to this planner. */ private void addPlanTableSource(PlanTableSource source) { table_list.add(source); has_join_occurred = true; } /** * Returns true if a join has occurred ('table_list' has been modified). */ public boolean hasJoinOccured() { return has_join_occurred; } /** * Adds a new table source to the planner given a Plan that 'creates' * the source, and a FromTableInterface that describes the source created * by the plan. */ public void addTableSource(QueryPlanNode plan, FromTableInterface from_def) { Variable[] all_cols = from_def.allColumns(); String[] unique_names = new String[] { from_def.getUniqueName() }; addPlanTableSource(new PlanTableSource(plan, all_cols, unique_names)); } /** * Returns the index of the given PlanTableSource in the table list. */ private int indexOfPlanTableSource(PlanTableSource source) { int sz = table_list.size(); for (int i = 0; i < sz; ++i) { if (table_list.get(i) == source) { return i; } } return -1; } /** * Links the last added table source to the previous added table source * through this joining information. *

* 'between_index' represents the point in between the table sources that * the join should be setup for. For example, to set the join between * TableSource 0 and 1, use 0 as the between index. A between index of 3 * represents the join between TableSource index 2 and 2. */ public void setJoinInfoBetweenSources( int between_index, int join_type, Expression on_expr) { PlanTableSource plan_left = (PlanTableSource) table_list.get(between_index); PlanTableSource plan_right = (PlanTableSource) table_list.get(between_index + 1); plan_left.setRightJoinInfo(plan_right, join_type, on_expr); plan_right.setLeftJoinInfo(plan_left, join_type, on_expr); } /** * Forms a new PlanTableSource that's the concatination of the given * two PlanTableSource objects. */ public static PlanTableSource concatTableSources( PlanTableSource left, PlanTableSource right, QueryPlanNode plan) { // Merge the variable list Variable[] new_var_list = new Variable[left.var_list.length + right.var_list.length]; int i = 0; for (int n = 0; n < left.var_list.length; ++n) { new_var_list[i] = left.var_list[n]; ++i; } for (int n = 0; n < right.var_list.length; ++n) { new_var_list[i] = right.var_list[n]; ++i; } // Merge the unique table names list String[] new_unique_list = new String[left.unique_names.length + right.unique_names.length]; i = 0; for (int n = 0; n < left.unique_names.length; ++n) { new_unique_list[i] = left.unique_names[n]; ++i; } for (int n = 0; n < right.unique_names.length; ++n) { new_unique_list[i] = right.unique_names[n]; ++i; } // Return the new table source plan. return new PlanTableSource(plan, new_var_list, new_unique_list); } /** * Joins two tables when a plan is generated for joining the two tables. */ public PlanTableSource mergeTables( PlanTableSource left, PlanTableSource right, QueryPlanNode merge_plan) { // Remove the sources from the table list. table_list.remove(left); table_list.remove(right); // Add the concatination of the left and right tables. PlanTableSource c_plan = concatTableSources(left, right, merge_plan); c_plan.setJoinInfoMergedBetween(left, right); c_plan.setUpdated(); addPlanTableSource(c_plan); // Return the name plan return c_plan; } /** * Finds and returns the PlanTableSource in the list of tables that * contains the given Variable reference. */ public PlanTableSource findTableSource(Variable ref) { int sz = table_list.size(); // If there is only 1 plan then assume the variable is in there. if (sz == 1) { return (PlanTableSource) table_list.get(0); } for (int i = 0; i < sz; ++i) { PlanTableSource source = (PlanTableSource) table_list.get(i); if (source.containsVariable(ref)) { return source; } } throw new RuntimeException( "Unable to find table with variable reference: " + ref); } /** * Finds a common PlanTableSource that contains the list of variables * given. If the list is 0 or there is no common source then null is * returned. */ public PlanTableSource findCommonTableSource(List var_list) { if (var_list.size() == 0) { return null; } PlanTableSource plan = findTableSource((Variable) var_list.get(0)); int i = 1; int sz = var_list.size(); while (i < sz) { PlanTableSource p2 = findTableSource((Variable) var_list.get(i)); if (plan != p2) { return null; } ++i; } return plan; } /** * Finds and returns the PlanTableSource in the list of table that * contains the given unique key name. */ public PlanTableSource findTableSourceWithUniqueKey(String key) { int sz = table_list.size(); for (int i = 0; i < sz; ++i) { PlanTableSource source = (PlanTableSource) table_list.get(i); if (source.containsUniqueKey(key)) { return source; } } throw new RuntimeException( "Unable to find table with unique key: " + key); } /** * Returns the single PlanTableSource for this planner. */ private PlanTableSource getSingleTableSource() { if (table_list.size() != 1) { throw new RuntimeException("Not a single table source."); } return (PlanTableSource) table_list.get(0); } /** * Sets a CachePointNode with the given key on all of the plan table * sources in 'table_list'. Note that this does not change the 'update' * status of the table sources. If there is currently a CachePointNode * on any of the sources then no update is made. */ private void setCachePoints() { int sz = table_list.size(); for (int i = 0; i < sz; ++i) { PlanTableSource plan = (PlanTableSource) table_list.get(i); if (!(plan.getPlan() instanceof QueryPlan.CachePointNode)) { plan.plan = new QueryPlan.CachePointNode(plan.getPlan()); } } } /** * Creates a single PlanTableSource that encapsulates all the given * variables in a single table. If this means a table must be joined with * another using the natural join conditions then this happens here. *

* The intention of this function is to produce a plan that encapsulates * all the variables needed to perform a specific evaluation. *

* Note, this has the potential to cause 'natural join' situations which * are bad performance. It is a good idea to perform joins using other * methods before this is used. *

* Note, this will change the 'table_list' variable in this class if tables * are joined. */ private PlanTableSource joinAllPlansWithVariables(List all_vars) { // Collect all the plans that encapsulate these variables. ArrayList touched_plans = new ArrayList(); int sz = all_vars.size(); for (int i = 0; i < sz; ++i) { Variable v = (Variable) all_vars.get(i); PlanTableSource plan = findTableSource(v); if (!touched_plans.contains(plan)) { touched_plans.add(plan); } } // Now 'touched_plans' contains a list of PlanTableSource for each // plan to be joined. return joinAllPlansToSingleSource(touched_plans); } /** * Returns true if it is possible to naturally join the two plans. Two * plans can be joined under the following sitations; * 1) The left or right plan of the first source points to the second * source. * 2) Either one has no left plan and the other has no right plan, or * one has no right plan and the other has no left plan. */ private int canPlansBeNaturallyJoined( PlanTableSource plan1, PlanTableSource plan2) { if (plan1.left_plan == plan2 || plan1.right_plan == plan2) { return 0; } else if (plan1.left_plan != null && plan2.left_plan != null) { // This is a left clash return 2; } else if (plan1.right_plan != null && plan2.right_plan != null) { // This is a right clash return 1; } else if ((plan1.left_plan == null && plan2.right_plan == null) || (plan1.right_plan == null && plan2.left_plan == null)) { // This means a merge between the plans is fine return 0; } else { // Must be a left and right clash return 2; } } /** * Given a list of PlanTableSource objects, this will produce a plan that * naturally joins all the tables together into a single plan. The * join algorithm used is determined by the information in the FROM clause. * An OUTER JOIN, for example, will join depending on the conditions * provided in the ON clause. If no explicit join method is provided then * a natural join will be planned. *

* Care should be taken with this because this method can produce natural * joins which are often optimized out by more appropriate join expressions * that can be processed before this is called. *

* Note, this will change the 'table_list' variable in this class if tables * are joined. *

* Returns null if no plans are provided. */ private PlanTableSource joinAllPlansToSingleSource(List all_plans) { // If there are no plans then return null if (all_plans.size() == 0) { return null; } // Return early if there is only 1 table. else if (all_plans.size() == 1) { return (PlanTableSource) all_plans.get(0); } // Make a working copy of the plan list. ArrayList working_plan_list = new ArrayList(all_plans.size()); for (int i = 0; i < all_plans.size(); ++i) { working_plan_list.add(all_plans.get(i)); } // We go through each plan in turn. while (working_plan_list.size() > 1) { PlanTableSource left_plan = (PlanTableSource) working_plan_list.get(0); PlanTableSource right_plan = (PlanTableSource) working_plan_list.get(1); // First we need to determine if the left and right plan can be // naturally joined. int status = canPlansBeNaturallyJoined(left_plan, right_plan); if (status == 0) { // Yes they can so join them PlanTableSource new_plan = naturallyJoinPlans(left_plan, right_plan); // Remove the left and right plan from the list and add the new plan working_plan_list.remove(left_plan); working_plan_list.remove(right_plan); working_plan_list.add(0, new_plan); } else if (status == 1) { // No we can't because of a right join clash, so we join the left // plan right in hopes of resolving the clash. PlanTableSource new_plan = naturallyJoinPlans(left_plan, left_plan.right_plan); working_plan_list.remove(left_plan); working_plan_list.remove(left_plan.right_plan); working_plan_list.add(0, new_plan); } else if (status == 2) { // No we can't because of a left join clash, so we join the left // plan left in hopes of resolving the clash. PlanTableSource new_plan = naturallyJoinPlans(left_plan, left_plan.left_plan); working_plan_list.remove(left_plan); working_plan_list.remove(left_plan.left_plan); working_plan_list.add(0, new_plan); } else { throw new RuntimeException("Unknown status: " + status); } } // Return the working plan of the merged tables. return (PlanTableSource) working_plan_list.get(0); } /** * Naturally joins two PlanTableSource objects in this planner. When this * method returns the actual plans will be joined together. This method * modifies 'table_list'. */ private PlanTableSource naturallyJoinPlans( PlanTableSource plan1, PlanTableSource plan2) { int join_type; Expression on_expr; PlanTableSource left_plan, right_plan; // Are the plans linked by common join information? if (plan1.right_plan == plan2) { join_type = plan1.right_join_type; on_expr = plan1.right_on_expr; left_plan = plan1; right_plan = plan2; } else if (plan1.left_plan == plan2) { join_type = plan1.left_join_type; on_expr = plan1.left_on_expr; left_plan = plan2; right_plan = plan1; } else { // Assertion - make sure no join clashes! if ((plan1.left_plan != null && plan2.left_plan != null) || (plan1.right_plan != null && plan2.right_plan != null)) { throw new RuntimeException( "Assertion failed - plans can not be naturally join because " + "the left/right join plans clash."); } // Else we must assume a non-dependant join (not an outer join). // Perform a natural join QueryPlanNode node = new QueryPlan.NaturalJoinNode( plan1.getPlan(), plan2.getPlan()); return mergeTables(plan1, plan2, node); } // This means plan1 and plan2 are linked by a common join and ON // expression which we evaluate now. String outer_join_name; if (join_type == JoiningSet.LEFT_OUTER_JOIN) { outer_join_name = createRandomeOuterJoinName(); // Mark the left plan left_plan.updatePlan(new QueryPlan.MarkerNode( left_plan.getPlan(), outer_join_name)); } else if (join_type == JoiningSet.RIGHT_OUTER_JOIN) { outer_join_name = createRandomeOuterJoinName(); // Mark the right plan right_plan.updatePlan(new QueryPlan.MarkerNode( right_plan.getPlan(), outer_join_name)); } else if (join_type == JoiningSet.INNER_JOIN) { // Inner join with ON expression outer_join_name = null; } else { throw new RuntimeException( "Join type (" + join_type + ") is not supported."); } // Make a Planner object for joining these plans. QueryTableSetPlanner planner = new QueryTableSetPlanner(); planner.addPlanTableSource(left_plan.copy()); planner.addPlanTableSource(right_plan.copy()); // planner.printDebugInfo(); // Evaluate the on expression QueryPlanNode node = planner.logicalEvaluate(on_expr); // If outer join add the left outer join node if (outer_join_name != null) { node = new QueryPlan.LeftOuterJoinNode(node, outer_join_name); } // And merge the plans in this set with the new node. return mergeTables(plan1, plan2, node); // System.out.println("OUTER JOIN: " + on_expr); // throw new RuntimeException("PENDING"); } /** * Plans all outer joins. *

* Note, this will change the 'table_list' variable in this class if tables * are joined. */ private void planAllOuterJoins() { // new Error().printStackTrace(); int sz = table_list.size(); if (sz <= 1) { return; } // Make a working copy of the plan list. ArrayList working_plan_list = new ArrayList(sz); for (int i = 0; i < sz; ++i) { working_plan_list.add(table_list.get(i)); } // System.out.println("----"); PlanTableSource plan1 = (PlanTableSource) working_plan_list.get(0); for (int i = 1; i < sz; ++i) { PlanTableSource plan2 = (PlanTableSource) working_plan_list.get(i); // System.out.println("Joining: " + plan1); // System.out.println(" with: " + plan2); if (plan1.right_plan == plan2) { plan1 = naturallyJoinPlans(plan1, plan2); } else { plan1 = plan2; } } } /** * Naturally joins all remaining tables sources to make a final single * plan which is returned. *

* Note, this will change the 'table_list' variable in this class if tables * are joined. */ private PlanTableSource naturalJoinAll() { int sz = table_list.size(); if (sz == 1) { return (PlanTableSource) table_list.get(0); } // Produce a plan that naturally joins all tables. return joinAllPlansToSingleSource(table_list); } /** * Convenience class that stores an expression to evaluate for a table. */ private static class SingleVarPlan { PlanTableSource table_source; Variable single_var; Variable variable; Expression expression; } /** * Adds a single var plan to the given list. */ private void addSingleVarPlanTo(ArrayList list, PlanTableSource table, Variable variable, Variable single_var, Expression[] exp_parts, Operator op) { Expression exp = new Expression(exp_parts[0], op, exp_parts[1]); // Is this source in the list already? int sz = list.size(); for (int i = 0; i < sz; ++i) { SingleVarPlan plan = (SingleVarPlan) list.get(i); if (plan.table_source == table && (variable == null || plan.variable.equals(variable))) { // Append to end of current expression plan.variable = variable; plan.expression = new Expression(plan.expression, Operator.get("and"), exp); return; } } // Didn't find so make a new entry in the list. SingleVarPlan plan = new SingleVarPlan(); plan.table_source = table; plan.variable = variable; plan.single_var = single_var; plan.expression = exp; list.add(plan); return; } // ---- // An expression plan for a constant expression. These are very // optimizable indeed. private class ConstantExpressionPlan extends ExpressionPlan { private Expression expression; public ConstantExpressionPlan(Expression e) { expression = e; } public void addToPlanTree() { // Each currently open branch must have this constant expression added // to it. for (int n = 0; n < table_list.size(); ++n) { PlanTableSource plan = (PlanTableSource) table_list.get(n); plan.updatePlan(new QueryPlan.ConstantSelectNode( plan.getPlan(), expression)); } } } private class SimpleSelectExpressionPlan extends ExpressionPlan { private Variable single_var; private Operator op; private Expression expression; public SimpleSelectExpressionPlan(Variable v, Operator op, Expression e) { single_var = v; this.op = op; expression = e; } public void addToPlanTree() { // Find the table source for this variable PlanTableSource table_source = findTableSource(single_var); table_source.updatePlan(new QueryPlan.SimpleSelectNode( table_source.getPlan(), single_var, op, expression)); } } private class SimpleSingleExpressionPlan extends ExpressionPlan { private Variable single_var; private Expression expression; public SimpleSingleExpressionPlan(Variable v, Expression e) { single_var = v; expression = e; } public void addToPlanTree() { // Find the table source for this variable PlanTableSource table_source = findTableSource(single_var); table_source.updatePlan(new QueryPlan.RangeSelectNode( table_source.getPlan(), expression)); } } private class ComplexSingleExpressionPlan extends ExpressionPlan { private Variable single_var; private Expression expression; public ComplexSingleExpressionPlan(Variable v, Expression e) { single_var = v; expression = e; } public void addToPlanTree() { // Find the table source for this variable PlanTableSource table_source = findTableSource(single_var); table_source.updatePlan(new QueryPlan.ExhaustiveSelectNode( table_source.getPlan(), expression)); } } private class SimplePatternExpressionPlan extends ExpressionPlan { private Variable single_var; private Expression expression; public SimplePatternExpressionPlan(Variable v, Expression e) { single_var = v; expression = e; } public void addToPlanTree() { // Find the table source for this variable PlanTableSource table_source = findTableSource(single_var); table_source.updatePlan(new QueryPlan.SimplePatternSelectNode( table_source.getPlan(), expression)); } } private class ExhaustiveSelectExpressionPlan extends ExpressionPlan { private Expression expression; public ExhaustiveSelectExpressionPlan(Expression e) { expression = e; } public void addToPlanTree() { // Get all the variables of this expression. List all_vars = expression.allVariables(); // Find the table source for this set of variables. PlanTableSource table_source = joinAllPlansWithVariables(all_vars); // Perform the exhaustive select table_source.updatePlan(new QueryPlan.ExhaustiveSelectNode( table_source.getPlan(), expression)); } } private class ExhaustiveSubQueryExpressionPlan extends ExpressionPlan { private List all_vars; private Expression expression; public ExhaustiveSubQueryExpressionPlan(List vars, Expression e) { this.all_vars = vars; this.expression = e; } public void addToPlanTree() { PlanTableSource table_source = joinAllPlansWithVariables(all_vars); // Update the plan table_source.updatePlan(new QueryPlan.ExhaustiveSelectNode( table_source.getPlan(), expression)); } } private class SimpleSubQueryExpressionPlan extends ExpressionPlan { private Expression expression; public SimpleSubQueryExpressionPlan(Expression e) { this.expression = e; } public void addToPlanTree() { Operator op = (Operator) expression.last(); Expression[] exps = expression.split(); Variable left_var = exps[0].getVariable(); QueryPlanNode right_plan = exps[1].getQueryPlanNode(); // Find the table source for this variable PlanTableSource table_source = findTableSource(left_var); // The left branch QueryPlanNode left_plan = table_source.getPlan(); // Update the plan table_source.updatePlan( new QueryPlan.NonCorrelatedAnyAllNode( left_plan, right_plan, new Variable[] { left_var }, op)); } } private class ExhaustiveJoinExpressionPlan extends ExpressionPlan { private Expression expression; public ExhaustiveJoinExpressionPlan(Expression e) { this.expression = e; } public void addToPlanTree() { // Get all the variables in the expression List all_vars = expression.allVariables(); // Merge it into one plan (possibly performing natural joins). PlanTableSource all_plan = joinAllPlansWithVariables(all_vars); // And perform the exhaustive select, all_plan.updatePlan(new QueryPlan.ExhaustiveSelectNode( all_plan.getPlan(), expression)); } } private class StandardJoinExpressionPlan extends ExpressionPlan { private Expression expression; public StandardJoinExpressionPlan(Expression e) { this.expression = e; } public void addToPlanTree() { // Get the expression with the multiple variables Expression[] exps = expression.split(); // Get the list of variables in the left hand and right hand side Variable lhs_v = exps[0].getVariable(); Variable rhs_v = exps[1].getVariable(); List lhs_vars = exps[0].allVariables(); List rhs_vars = exps[1].allVariables(); // Get the operator Operator op = (Operator) expression.last(); // Get the left and right plan for the variables in the expression. // Note that these methods may perform natural joins on the table. PlanTableSource lhs_plan = joinAllPlansWithVariables(lhs_vars); PlanTableSource rhs_plan = joinAllPlansWithVariables(rhs_vars); // If the lhs and rhs plans are different (there is a joining // situation). if (lhs_plan != rhs_plan) { // If either the LHS or the RHS is a single variable then we can // optimize the join. if (lhs_v != null || rhs_v != null) { // If rhs_v is a single variable and lhs_v is not then we must // reverse the expression. QueryPlan.JoinNode join_node; if (lhs_v == null && rhs_v != null) { // Reverse the expressions and the operator join_node = new QueryPlan.JoinNode( rhs_plan.getPlan(), lhs_plan.getPlan(), rhs_v, op.reverse(), exps[0]); mergeTables(rhs_plan, lhs_plan, join_node); } else { // Otherwise, use it as it is. join_node = new QueryPlan.JoinNode( lhs_plan.getPlan(), rhs_plan.getPlan(), lhs_v, op, exps[1]); mergeTables(lhs_plan, rhs_plan, join_node); } // Return because we are done return; } } // if lhs and rhs plans are different // If we get here either both the lhs and rhs are complex expressions // or the lhs and rhs of the variable are not different plans, or // the operator is not a conditional. Either way, we must evaluate // this via a natural join of the variables involved coupled with an // exhaustive select. These types of queries are poor performing. // Get all the variables in the expression List all_vars = expression.allVariables(); // Merge it into one plan (possibly performing natural joins). PlanTableSource all_plan = joinAllPlansWithVariables(all_vars); // And perform the exhaustive select, all_plan.updatePlan(new QueryPlan.ExhaustiveSelectNode( all_plan.getPlan(), expression)); } } private class SubLogicExpressionPlan extends ExpressionPlan { private Expression expression; public SubLogicExpressionPlan(Expression e) { this.expression = e; } public void addToPlanTree() { planForExpression(expression); } } /** * Evaluates a list of constant conditional exressions of the form * '3 + 2 = 0', 'true = true', etc. */ void evaluateConstants(ArrayList constant_vars, ArrayList evaluate_order) { // For each constant variable for (int i = 0; i < constant_vars.size(); ++i) { Expression expr = (Expression) constant_vars.get(i); // Add the exression plan ExpressionPlan exp_plan = new ConstantExpressionPlan(expr); exp_plan.setOptimizableValue(0f); evaluate_order.add(exp_plan); } } /** * Evaluates a list of single variable conditional expressions of the * form a = 3, a > 1 + 2, a - 2 = 1, 3 = a, concat(a, 'a') = '3a', etc. * The rule is there must be only one variable, a conditional operator, * and a constant on one side. *

* This method takes the list and modifies the plan as necessary. */ void evaluateSingles(ArrayList single_vars, ArrayList evaluate_order) { // The list of simple expression plans (lhs = single) ArrayList simple_plan_list = new ArrayList(); // The list of complex function expression plans (lhs = expression) ArrayList complex_plan_list = new ArrayList(); // For each single variable expression for (int i = 0; i < single_vars.size(); ++i) { Expression andexp = (Expression) single_vars.get(i); // The operator Operator op = (Operator) andexp.last(); // Split the expression Expression[] exps = andexp.split(); // The single var Variable single_var; // If the operator is a sub-query we must be of the form, // 'a in ( 1, 2, 3 )' if (op.isSubQuery()) { single_var = exps[0].getVariable(); if (single_var != null) { ExpressionPlan exp_plan = new SimpleSelectExpressionPlan( single_var, op, exps[1]); exp_plan.setOptimizableValue(0.2f); evaluate_order.add(exp_plan); } else { single_var = (Variable) exps[0].allVariables().get(0); ExpressionPlan exp_plan = new ComplexSingleExpressionPlan( single_var, andexp); exp_plan.setOptimizableValue(0.8f); evaluate_order.add(exp_plan); } } else { // Put the variable on the LHS, constant on the RHS List all_vars = exps[0].allVariables(); if (all_vars.size() == 0) { // Reverse the expressions and the operator Expression temp_exp = exps[0]; exps[0] = exps[1]; exps[1] = temp_exp; op = op.reverse(); single_var = (Variable) exps[0].allVariables().get(0); } else { single_var = (Variable) all_vars.get(0); } // The table source PlanTableSource table_source = findTableSource(single_var); // Simple LHS? Variable v = exps[0].getVariable(); if (v != null) { addSingleVarPlanTo(simple_plan_list, table_source, v, single_var, exps, op); } else { // No, complex lhs addSingleVarPlanTo(complex_plan_list, table_source, null, single_var, exps, op); } } } // We now have a list of simple and complex plans for each table, int sz = simple_plan_list.size(); for (int i = 0; i < sz; ++i) { SingleVarPlan var_plan = (SingleVarPlan) simple_plan_list.get(i); ExpressionPlan exp_plan = new SimpleSingleExpressionPlan( var_plan.single_var, var_plan.expression); exp_plan.setOptimizableValue(0.2f); evaluate_order.add(exp_plan); } sz = complex_plan_list.size(); for (int i = 0; i < sz; ++i) { SingleVarPlan var_plan = (SingleVarPlan) complex_plan_list.get(i); ExpressionPlan exp_plan = new ComplexSingleExpressionPlan( var_plan.single_var, var_plan.expression); exp_plan.setOptimizableValue(0.8f); evaluate_order.add(exp_plan); } } /** * Evaluates a list of expressions that are pattern searches (eg. LIKE, * NOT LIKE and REGEXP). Note that the LHS or RHS may be complex * expressions with variables, but we are guarenteed that there are * no sub-expressions in the expression. */ void evaluatePatterns(ArrayList pattern_exprs, ArrayList evaluate_order) { // Split the patterns into simple and complex plans. A complex plan // may require that a join occurs. for (int i = 0; i < pattern_exprs.size(); ++i) { Expression expr = (Expression) pattern_exprs.get(i); Expression[] exps = expr.split(); // If the LHS is a single variable and the RHS is a constant then // the conditions are right for a simple pattern search. Variable lhs_v = exps[0].getVariable(); if (expr.isConstant()) { ExpressionPlan expr_plan = new ConstantExpressionPlan(expr); expr_plan.setOptimizableValue(0f); evaluate_order.add(expr_plan); } else if (lhs_v != null && exps[1].isConstant()) { ExpressionPlan expr_plan = new SimplePatternExpressionPlan(lhs_v, expr); expr_plan.setOptimizableValue(0.25f); evaluate_order.add(expr_plan); } else { // Otherwise we must assume a complex pattern search which may // require a join. For example, 'a + b LIKE 'a%'' or // 'a LIKE b'. At the very least, this will be an exhaustive // search and at the worst it will be a join + exhaustive search. // So we should evaluate these at the end of the evaluation order. ExpressionPlan expr_plan = new ExhaustiveSelectExpressionPlan(expr); expr_plan.setOptimizableValue(0.82f); evaluate_order.add(expr_plan); } } } /** * Evaluates a list of expressions containing sub-queries. Non-correlated * sub-queries can often be optimized in to fast searches. Correlated * queries, or expressions containing multiple sub-queries are put * through the ExhaustiveSelect plan. */ void evaluateSubQueries(ArrayList expressions, ArrayList evaluate_order) { // For each sub-query expression for (int i = 0; i < expressions.size(); ++i) { Expression andexp = (Expression) expressions.get(i); boolean is_exhaustive; Variable left_var = null; QueryPlanNode right_plan = null; // Is this an easy sub-query? Operator op = (Operator) andexp.last(); if (op.isSubQuery()) { // Split the expression. Expression[] exps = andexp.split(); // Check that the left is a simple enough variable reference left_var = exps[0].getVariable(); if (left_var != null) { // Check that the right is a sub-query plan. right_plan = exps[1].getQueryPlanNode(); if (right_plan != null) { // Finally, check if the plan is correlated or not ArrayList cv = right_plan.discoverCorrelatedVariables(1, new ArrayList()); // System.out.println("Right Plan: " + right_plan); // System.out.println("Correlated variables: " + cv); if (cv.size() == 0) { // No correlated variables so we are a standard, non-correlated // query! is_exhaustive = false; } else { is_exhaustive = true; } } else { is_exhaustive = true; } } else { is_exhaustive = true; } } else { // Must be an exhaustive sub-query is_exhaustive = true; } // If this is an exhaustive operation, if (is_exhaustive) { // This expression could involve multiple variables, so we may need // to join. List all_vars = andexp.allVariables(); // Also find all correlated variables. List all_correlated = andexp.discoverCorrelatedVariables(0, new ArrayList()); int sz = all_correlated.size(); // If there are no variables (and no correlated variables) then this // must be a constant select, For example, 3 in ( select ... ) if (all_vars.size() == 0 && sz == 0) { ExpressionPlan expr_plan = new ConstantExpressionPlan(andexp); expr_plan.setOptimizableValue(0f); evaluate_order.add(expr_plan); } else { for (int n = 0; n < sz; ++n) { CorrelatedVariable cv = (CorrelatedVariable) all_correlated.get(n); all_vars.add(cv.getVariable()); } // An exhaustive expression plan which might require a join or a // slow correlated search. This should be evaluated after the // multiple variables are processed. ExpressionPlan exp_plan = new ExhaustiveSubQueryExpressionPlan( all_vars, andexp); exp_plan.setOptimizableValue(0.85f); evaluate_order.add(exp_plan); } } else { // This is a simple sub-query expression plan with a single LHS // variable and a single RHS sub-query. ExpressionPlan exp_plan = new SimpleSubQueryExpressionPlan(andexp); exp_plan.setOptimizableValue(0.3f); evaluate_order.add(exp_plan); } } // For each 'and' expression } /** * Evaluates a list of expressions containing multiple variable expression. * For example, 'a = b', 'a > b + c', 'a + 5 * b = 2', etc. If an * expression represents a simple join condition then a join plan is * made to the query plan tree. If an expression represents a more * complex joining condition then an exhaustive search must be used. */ void evaluateMultiples(ArrayList multi_vars, ArrayList evaluate_order) { // FUTURE OPTIMIZATION: // This join order planner is a little primitive in design. It orders // optimizable joins first and least optimizable last, but does not // take into account other factors that we could use to optimize // joins in the future. // For each single variable expression for (int i = 0; i < multi_vars.size(); ++i) { // Get the expression with the multiple variables Expression expr = (Expression) multi_vars.get(i); Expression[] exps = expr.split(); // Get the list of variables in the left hand and right hand side Variable lhs_v = exps[0].getVariable(); Variable rhs_v = exps[1].getVariable(); // Work out how optimizable the join is. // The calculation is as follows; // a) If both the lhs and rhs are a single variable then the // optimizable value is set to 0.6f. // b) If only one of lhs or rhs is a single variable then the // optimizable value is set to 0.64f. // c) Otherwise it is set to 0.68f (exhaustive select guarenteed). if (lhs_v == null && rhs_v == null) { // Neither lhs or rhs are single vars ExpressionPlan exp_plan = new ExhaustiveJoinExpressionPlan(expr); exp_plan.setOptimizableValue(0.68f); evaluate_order.add(exp_plan); } else if (lhs_v != null && rhs_v != null) { // Both lhs and rhs are a single var (most optimizable type of // join). ExpressionPlan exp_plan = new StandardJoinExpressionPlan(expr); exp_plan.setOptimizableValue(0.60f); evaluate_order.add(exp_plan); } else { // Either lhs or rhs is a single var ExpressionPlan exp_plan = new StandardJoinExpressionPlan(expr); exp_plan.setOptimizableValue(0.64f); evaluate_order.add(exp_plan); } } // for each expression we are 'and'ing against } /** * Evaluates a list of expressions that are sub-expressions themselves. * This is typically called when we have OR queries in the expression. */ void evaluateSubLogic(ArrayList sublogic_exprs, ArrayList evaluate_order) { each_logic_expr: for (int i = 0; i < sublogic_exprs.size(); ++i) { Expression expr = (Expression) sublogic_exprs.get(i); // Break the expression down to a list of OR expressions, ArrayList or_exprs = expr.breakByOperator(new ArrayList(), "or"); // An optimizations here; // If all the expressions we are ORing together are in the same table // then we should execute them before the joins, otherwise they // should go after the joins. // The reason for this is because if we can lesson the amount of work a // join has to do then we should. The actual time it takes to perform // an OR search shouldn't change if it is before or after the joins. PlanTableSource common = null; for (int n = 0; n < or_exprs.size(); ++n) { Expression or_expr = (Expression) or_exprs.get(n); List vars = or_expr.allVariables(); // If there are no variables then don't bother with this expression if (vars.size() > 0) { // Find the common table source (if any) PlanTableSource ts = findCommonTableSource(vars); boolean or_after_joins = false; if (ts == null) { // No common table, so OR after the joins or_after_joins = true; } else if (common == null) { common = ts; } else if (common != ts) { // No common table with the vars in this OR list so do this OR // after the joins. or_after_joins = true; } if (or_after_joins) { ExpressionPlan exp_plan = new SubLogicExpressionPlan(expr); exp_plan.setOptimizableValue(0.70f); evaluate_order.add(exp_plan); // Continue to the next logic expression continue each_logic_expr; } } } // Either we found a common table or there are no variables in the OR. // Either way we should evaluate this after the join. ExpressionPlan exp_plan = new SubLogicExpressionPlan(expr); exp_plan.setOptimizableValue(0.58f); evaluate_order.add(exp_plan); } } // ----- /** * Generates a plan to evaluate the given list of expressions * (logically separated with AND). */ void planForExpressionList(List and_list) { ArrayList sub_logic_expressions = new ArrayList(); // The list of expressions that have a sub-select in them. ArrayList sub_query_expressions = new ArrayList(); // The list of all constant expressions ( true = true ) ArrayList constants = new ArrayList(); // The list of pattern matching expressions (eg. 't LIKE 'a%') ArrayList pattern_expressions = new ArrayList(); // The list of all expressions that are a single variable on one // side, a conditional operator, and a constant on the other side. ArrayList single_vars = new ArrayList(); // The list of multi variable expressions (possible joins) ArrayList multi_vars = new ArrayList(); // Separate out each condition type. for (int i = 0; i < and_list.size(); ++i) { Object el = and_list.get(i); Expression andexp = (Expression) el; // If we end with a logical operator then we must recurse them // through this method. Object lob = andexp.last(); Operator op; // If the last is not an operator, then we imply // '[expression] = true' if (!(lob instanceof Operator) || ((Operator) lob).isMathematical()) { Operator EQUAL_OP = Operator.get("="); andexp.addElement(TObject.booleanVal(true)); andexp.addOperator(EQUAL_OP); op = EQUAL_OP; } else { op = (Operator) lob; } // If the last is logical (eg. AND, OR) then we must process the // sub logic expression if (op.isLogical()) { sub_logic_expressions.add(andexp); } // Does the expression have a sub-query? (eg. Another select // statement somewhere in it) else if (andexp.hasSubQuery()) { sub_query_expressions.add(andexp); } else if (op.isPattern()) { pattern_expressions.add(andexp); } else { //if (op.isCondition()) { // The list of variables in the expression. List vars = andexp.allVariables(); if (vars.size() == 0) { // These are ( 54 + 9 = 9 ), ( "z" > "a" ), ( 9.01 - 2 ), etc constants.add(andexp); } else if (vars.size() == 1) { // These are ( id = 90 ), ( 'a' < number ), etc single_vars.add(andexp); } else if (vars.size() > 1) { // These are ( id = part_id ), // ( cost_of + value_of < sold_at ), ( id = part_id - 10 ) multi_vars.add(andexp); } else { throw new Error("Hmm, vars list size is negative!"); } } } // The order in which expression are evaluated, // (ExpressionPlan) ArrayList evaluate_order = new ArrayList(); // Evaluate the constants. These should always be evaluated first // because they always evaluate to either true or false or null. evaluateConstants(constants, evaluate_order); // Evaluate the singles. If formed well these can be evaluated // using fast indices. eg. (a > 9 - 3) is more optimal than // (a + 3 > 9). evaluateSingles(single_vars, evaluate_order); // Evaluate the pattern operators. Note that some patterns can be // optimized better than others, but currently we keep this near the // middle of our evaluation sequence. evaluatePatterns(pattern_expressions, evaluate_order); // Evaluate the sub-queries. These are queries of the form, // (a IN ( SELECT ... )), (a = ( SELECT ... ) = ( SELECT ... )), etc. evaluateSubQueries(sub_query_expressions, evaluate_order); // Evaluate multiple variable expressions. It's possible these are // joins. evaluateMultiples(multi_vars, evaluate_order); // Lastly evaluate the sub-logic expressions. These expressions are // OR type expressions. evaluateSubLogic(sub_logic_expressions, evaluate_order); // Sort the evaluation list by how optimizable the expressions are, Collections.sort(evaluate_order); // And add each expression to the plan for (int i = 0; i < evaluate_order.size(); ++i) { ExpressionPlan plan = (ExpressionPlan) evaluate_order.get(i); plan.addToPlanTree(); } } /** * Evaluates the search Expression clause and alters the banches of * the plans in this object as necessary. Unlike the 'logicalEvaluate' * method, this does not result in a single QueryPlanNode. It is the * responsibility of the callee to join branches as required. */ void planForExpression(Expression exp) { if (exp == null) { return; } Object ob = exp.last(); if (ob instanceof Operator && ((Operator) ob).isLogical()) { Operator last_op = (Operator) ob; if (last_op.is("or")) { // parsing an OR block // Split left and right of logical operator. Expression[] exps = exp.split(); // If we are an 'or' then evaluate left and right and union the // result. // Before we branch set cache points. setCachePoints(); // Make copies of the left and right planner QueryTableSetPlanner left_planner = copy(); QueryTableSetPlanner right_planner = copy(); // Plan the left and right side of the OR left_planner.planForExpression(exps[0]); right_planner.planForExpression(exps[1]); // Fix the left and right planner so that they represent the same // 'group'. // The current implementation naturally joins all sources if the // number of sources is different than the original size. int left_sz = left_planner.table_list.size(); int right_sz = right_planner.table_list.size(); if (left_sz != right_sz || left_planner.hasJoinOccured() || right_planner.hasJoinOccured()) { // Naturally join all in the left and right plan left_planner.naturalJoinAll(); right_planner.naturalJoinAll(); } // Union all table sources, but only if they have changed. ArrayList left_table_list = left_planner.table_list; ArrayList right_table_list = right_planner.table_list; int sz = left_table_list.size(); // First we must determine the plans that need to be joined in the // left and right plan. ArrayList left_join_list = new ArrayList(); ArrayList right_join_list = new ArrayList(); for (int i = 0; i < sz; ++i) { PlanTableSource left_plan = (PlanTableSource) left_table_list.get(i); PlanTableSource right_plan = (PlanTableSource) right_table_list.get(i); if (left_plan.isUpdated() || right_plan.isUpdated()) { left_join_list.add(left_plan); right_join_list.add(right_plan); } } // Make sure the plans are joined in the left and right planners left_planner.joinAllPlansToSingleSource(left_join_list); right_planner.joinAllPlansToSingleSource(right_join_list); // Since the planner lists may have changed we update them here. left_table_list = left_planner.table_list; right_table_list = right_planner.table_list; sz = left_table_list.size(); ArrayList new_table_list = new ArrayList(sz); for (int i = 0; i < sz; ++i) { PlanTableSource left_plan = (PlanTableSource) left_table_list.get(i); PlanTableSource right_plan = (PlanTableSource) right_table_list.get(i); PlanTableSource new_plan; // If left and right plan updated so we need to union them if (left_plan.isUpdated() || right_plan.isUpdated()) { // In many causes, the left and right branches will contain // identical branches that would best be optimized out. // Take the left plan, add the logical union to it, and make it // the plan for this. QueryPlanNode node = new QueryPlan.LogicalUnionNode( left_plan.getPlan(), right_plan.getPlan()); // Update the plan in this table list left_plan.updatePlan(node); new_plan = left_plan; } else { // If the left and right plan didn't update, then use the // left plan (it doesn't matter if we use left or right because // they are the same). new_plan = left_plan; } // Add the left plan to the new table list we are creating new_table_list.add(new_plan); } // Set the new table list table_list = new_table_list; } else if (last_op.is("and")) { // parsing an AND block // The list of AND expressions that are here ArrayList and_list = new ArrayList(); and_list = createAndList(and_list, exp); planForExpressionList(and_list); } else { throw new RuntimeException("Unknown logical operator: " + ob); } } else { // Not a logical expression so just plan for this single expression. ArrayList exp_list = new ArrayList(1); exp_list.add(exp); planForExpressionList(exp_list); } } /** * Evaluates a search Expression clause. Note that is some cases this * will generate a plan tree that has many identical branches that can be * optimized out. */ QueryPlanNode logicalEvaluate(Expression exp) { // System.out.println("Logical Evaluate: " + exp); if (exp == null) { // Naturally join everything and return the plan. naturalJoinAll(); // Return the plan return getSingleTableSource().getPlan(); } // Plan the expression planForExpression(exp); // Naturally join any straggling tables naturalJoinAll(); // Return the plan return getSingleTableSource().getPlan(); } /** * Given an Expression, this will return a list of expressions that can be * safely executed as a set of 'and' operations. For example, an * expression of 'a=9 and b=c and d=2' would return the list; 'a=9','b=c', * 'd=2'. *

* If non 'and' operators are found then the reduction stops. */ private ArrayList createAndList(ArrayList list, Expression exp) { return exp.breakByOperator(list, "and"); } /** * Evalutes the WHERE clause of the table expression. */ QueryPlanNode planSearchExpression(SearchExpression search_expression) { // First perform all outer tables. planAllOuterJoins(); QueryPlanNode node = logicalEvaluate(search_expression.getFromExpression()); return node; } /** * Makes an exact duplicate copy (deep clone) of this planner object. */ private QueryTableSetPlanner copy() { QueryTableSetPlanner copy = new QueryTableSetPlanner(); int sz = table_list.size(); for (int i = 0; i < sz; ++i) { copy.table_list.add(((PlanTableSource) table_list.get(i)).copy()); } // Copy the left and right links in the PlanTableSource for (int i = 0; i < sz; ++i) { PlanTableSource src = (PlanTableSource) table_list.get(i); PlanTableSource mod = (PlanTableSource) copy.table_list.get(i); // See how the left plan links to which index, if (src.left_plan != null) { int n = indexOfPlanTableSource(src.left_plan); mod.setLeftJoinInfo((PlanTableSource) copy.table_list.get(n), src.left_join_type, src.left_on_expr); } // See how the right plan links to which index, if (src.right_plan != null) { int n = indexOfPlanTableSource(src.right_plan); mod.setRightJoinInfo((PlanTableSource) copy.table_list.get(n), src.right_join_type, src.right_on_expr); } } return copy; } void printDebugInfo() { StringBuffer buf = new StringBuffer(); buf.append("PLANNER:\n"); for (int i = 0; i < table_list.size(); ++i) { buf.append("TABLE " + i + "\n"); ((PlanTableSource) table_list.get(i)).getPlan().debugString(2, buf); buf.append("\n"); } System.out.println(buf); } } /** * Represents a single table source being planned. */ private static class PlanTableSource { /** * The Plan for this table source. */ private QueryPlanNode plan; /** * The list of fully qualified Variable objects that are accessable within * this plan. */ private final Variable[] var_list; /** * The list of unique key names of the tables in this plan. */ private final String[] unique_names; /** * Set to true when this source has been updated from when it was * constructed or copied. */ private boolean is_updated; /** * How this plan is naturally joined to other plans in the source. A * plan either has no dependance, a left or a right dependance, or a left * and right dependance. */ PlanTableSource left_plan, right_plan; int left_join_type, right_join_type; Expression left_on_expr, right_on_expr; /** * Constructor. */ public PlanTableSource(QueryPlanNode plan, Variable[] var_list, String[] table_unique_names) { this.plan = plan; this.var_list = var_list; this.unique_names = table_unique_names; left_join_type = -1; right_join_type = -1; is_updated = false; } /** * Sets the left join information for this plan. */ void setLeftJoinInfo(PlanTableSource left_plan, int join_type, Expression on_expr) { this.left_plan = left_plan; this.left_join_type = join_type; this.left_on_expr = on_expr; } /** * Sets the right join information for this plan. */ void setRightJoinInfo(PlanTableSource right_plan, int join_type, Expression on_expr) { this.right_plan = right_plan; this.right_join_type = join_type; this.right_on_expr = on_expr; } /** * This is called when two plans are merged together to set up the * left and right join information for the new plan. This sets the left * join info from the left plan and the right join info from the right * plan. */ void setJoinInfoMergedBetween( PlanTableSource left, PlanTableSource right) { if (left.right_plan != right) { if (left.right_plan != null) { setRightJoinInfo(left.right_plan, left.right_join_type, left.right_on_expr); right_plan.left_plan = this; } if (right.left_plan != null) { setLeftJoinInfo(right.left_plan, right.left_join_type, right.left_on_expr); left_plan.right_plan = this; } } if (left.left_plan != right) { if (left_plan == null && left.left_plan != null) { setLeftJoinInfo(left.left_plan, left.left_join_type, left.left_on_expr); left_plan.right_plan = this; } if (right_plan == null && right.right_plan != null) { setRightJoinInfo(right.right_plan, right.right_join_type, right.right_on_expr); right_plan.left_plan = this; } } } /** * Returns true if this table source contains the variable reference. */ public boolean containsVariable(Variable v) { // System.out.println("Looking for: " + v); for (int i = 0; i < var_list.length; ++i) { // System.out.println(var_list[i]); if (var_list[i].equals(v)) { return true; } } return false; } /** * Returns true if this table source contains the unique table name * reference. */ public boolean containsUniqueKey(String name) { for (int i = 0; i < unique_names.length; ++i) { if (unique_names[i].equals(name)) { return true; } } return false; } /** * Sets the updated flag. */ public void setUpdated() { is_updated = true; } /** * Updates the plan. */ public void updatePlan(QueryPlanNode node) { plan = node; setUpdated(); } /** * Returns the plan for this table source. */ public QueryPlanNode getPlan() { return plan; } /** * Returns true if the planner was updated. */ public boolean isUpdated() { return is_updated; } /** * Makes a copy of this table source. */ public PlanTableSource copy() { return new PlanTableSource(plan, var_list, unique_names); } } /** * An abstract class that represents an expression to be added into a plan. * Many sets of expressions can be added into the plan tree in any order, * however it is often desirable to add some more intensive expressions * higher up the branches. This object allows us to order expressions by * optimization value. More optimizable expressions are put near the leafs * of the plan tree and least optimizable and put near the top. */ static abstract class ExpressionPlan implements Comparable { /** * How optimizable an expression is. A value of 0 indicates most * optimizable and 1 indicates least optimizable. */ private float optimizable_value; /** * Sets the optimizable value of this plan. */ public void setOptimizableValue(float v) { optimizable_value = v; } /** * Returns the optimizable value for this plan. */ public float getOptimizableValue() { return optimizable_value; } /** * Adds this expression into the plan tree. */ public abstract void addToPlanTree(); public int compareTo(Object ob) { ExpressionPlan dest_plan = (ExpressionPlan) ob; float dest_val = dest_plan.optimizable_value; if (optimizable_value > dest_val) { return 1; } else if (optimizable_value < dest_val) { return -1; } else { return 0; } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/PrivManager.java000066400000000000000000000161121330501023400270100ustar00rootroot00000000000000/** * com.mckoi.database.interpret.PrivManager 21 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * Handler for grant/revoke queries for setting up grant information in the * database. * * @author Tobias Downer */ public class PrivManager extends Statement { // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Nothing to do here } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); String command_type = (String) cmd.getObject("command"); ArrayList priv_list = (ArrayList) cmd.getObject("priv_list"); String priv_object = (String) cmd.getObject("priv_object"); int grant_object; String grant_param; // Parse the priv object, if (priv_object.startsWith("T:")) { // Granting to a table object String table_name_str = priv_object.substring(2); TableName table_name = database.resolveTableName(table_name_str); // Check the table exists if (!database.tableExists(table_name)) { throw new DatabaseException("Table '" + table_name + "' doesn't exist."); } grant_object = GrantManager.TABLE; grant_param = table_name.toString(); } else if (priv_object.startsWith("S:")) { // Granting to a schema object String schema_name_str = priv_object.substring(2); SchemaDef schema_name = database.resolveSchemaName(schema_name_str); // Check the schema exists if (schema_name == null || !database.schemaExists(schema_name.toString())) { schema_name_str = schema_name == null ? schema_name_str : schema_name.toString(); throw new DatabaseException("Schema '" + schema_name_str + "' doesn't exist."); } grant_object = GrantManager.SCHEMA; grant_param = schema_name.toString(); } else { throw new Error("Priv object formatting error."); } if (command_type.equals("GRANT")) { ArrayList grant_to = (ArrayList) cmd.getObject("grant_to"); boolean grant_option = cmd.getBoolean("grant_option"); // Get the grant manager. GrantManager manager = context.getGrantManager(); // Get the grant options this user has on the given object. Privileges options_privs = manager.userGrantOptions( grant_object, grant_param, user.getUserName()); // Is the user permitted to give out these privs? Privileges grant_privs = Privileges.EMPTY_PRIVS; for (int i = 0; i < priv_list.size(); ++i) { String priv = ((String) priv_list.get(i)).toUpperCase(); int priv_bit; if (priv.equals("ALL")) { if (grant_object == GrantManager.TABLE) { priv_bit = Privileges.TABLE_ALL_PRIVS.toInt(); } else if (grant_object == GrantManager.SCHEMA) { priv_bit = Privileges.SCHEMA_ALL_PRIVS.toInt(); } else { throw new Error("Unrecognised grant object."); } } else { priv_bit = Privileges.parseString(priv); } if (!options_privs.permits(priv_bit)) { throw new UserAccessException( "User is not permitted to grant '" + priv + "' access on object " + grant_param); } grant_privs = grant_privs.add(priv_bit); } // Do the users exist? for (int i = 0; i < grant_to.size(); ++i) { String name = (String) grant_to.get(i); if (!name.equalsIgnoreCase("public") && !database.getDatabase().userExists(context, name)) { throw new DatabaseException("User '" + name + "' doesn't exist."); } } // Everything checks out so add the grants to the users. for (int i = 0; i < grant_to.size(); ++i) { String name = (String) grant_to.get(i); if (name.equalsIgnoreCase("public")) { // Add a public grant, manager.addGrant(grant_privs, grant_object, grant_param, GrantManager.PUBLIC_USERNAME_STR, grant_option, user.getUserName()); } else { // Add a user grant. manager.addGrant(grant_privs, grant_object, grant_param, name, grant_option, user.getUserName()); } } // All done. } else if (command_type.equals("REVOKE")) { ArrayList revoke_from = (ArrayList) cmd.getObject("revoke_from"); boolean revoke_grant_option = cmd.getBoolean("revoke_grant_option"); // Get the grant manager. GrantManager manager = context.getGrantManager(); // Is the user permitted to give out these privs? Privileges revoke_privs = Privileges.EMPTY_PRIVS; for (int i = 0; i < priv_list.size(); ++i) { String priv = ((String) priv_list.get(i)).toUpperCase(); int priv_bit; if (priv.equals("ALL")) { if (grant_object == GrantManager.TABLE) { priv_bit = Privileges.TABLE_ALL_PRIVS.toInt(); } else if (grant_object == GrantManager.SCHEMA) { priv_bit = Privileges.SCHEMA_ALL_PRIVS.toInt(); } else { throw new Error("Unrecognised grant object."); } } else { priv_bit = Privileges.parseString(priv); } revoke_privs = revoke_privs.add(priv_bit); } // Revoke the grants for the given users for (int i = 0; i < revoke_from.size(); ++i) { String name = (String) revoke_from.get(i); if (name.equalsIgnoreCase("public")) { // Revoke a public grant, manager.removeGrant(revoke_privs, grant_object, grant_param, GrantManager.PUBLIC_USERNAME_STR, revoke_grant_option, user.getUserName()); } else { // Revoke a user grant. manager.removeGrant(revoke_privs, grant_object, grant_param, name, revoke_grant_option, user.getUserName()); } } // All done. } else { throw new Error("Unknown priv manager command: " + command_type); } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/SQLQueryExecutor.java000066400000000000000000000107251330501023400300050ustar00rootroot00000000000000/** * com.mckoi.database.interpret.SQLQueryExecutor 25 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.database.jdbc.SQLQuery; import com.mckoi.database.sql.SQL; import com.mckoi.database.sql.ParseException; import java.io.StringReader; import java.sql.SQLException; /** * An object used to execute SQL queries against a given DatabaseConnection * object. The object maintains an SQL parser object as state which is * reused as necessary. *

* This object is a convenient way to execute SQL queries. * * @author Tobias Downer */ public class SQLQueryExecutor { /** * The SQL parser state. */ private static SQL sql_parser; static { // Set up the sql parser. sql_parser = new SQL(new StringReader("")); } /** * Constructs the executor. */ public SQLQueryExecutor() { } /** * Executes the given SQLQuery object on the given DatabaseConnection object. * Returns a Table object that contains the result of the execution. *

* Note that this method does not perform any locking. Any locking must have * happened before this method is called. *

* Also note that the returned Table object is onld valid within the * life-time of the lock unless the root lock requirements are satisified. */ public Table execute(DatabaseConnection connection, SQLQuery query) throws SQLException, DatabaseException, TransactionException, ParseException { // StatementTree caching // Create a new parser and set the parameters... String query_str = query.getQuery(); StatementTree statement_tree = null; StatementCache statement_cache = connection.getSystem().getStatementCache(); if (statement_cache != null) { // Is this query cached? statement_tree = statement_cache.get(query_str); } if (statement_tree == null) { synchronized (sql_parser) { sql_parser.ReInit(new StringReader(query_str)); sql_parser.reset(); // Parse the statement. statement_tree = sql_parser.Statement(); } // Put the statement tree in the cache if (statement_cache != null) { statement_cache.put(query_str, statement_tree); } } // Substitute all parameter substitutions in the statement tree. final Object[] vars = query.getVars(); ExpressionPreparer preparer = new ExpressionPreparer() { public boolean canPrepare(Object element) { return (element instanceof ParameterSubstitution); } public Object prepare(Object element) { ParameterSubstitution ps = (ParameterSubstitution) element; int param_id = ps.getID(); return TObject.objectVal(vars[param_id]); } }; statement_tree.prepareAllExpressions(preparer); // Convert the StatementTree to a statement object Statement statement; String statement_class = statement_tree.getClassName(); try { Class c = Class.forName(statement_class); statement = (Statement) c.newInstance(); } catch (ClassNotFoundException e) { throw new SQLException( "Could not find statement class: " + statement_class); } catch (InstantiationException e) { throw new SQLException( "Could not instantiate class: " + statement_class); } catch (IllegalAccessException e) { throw new SQLException( "Could not access class: " + statement_class); } // Initialize the statement statement.init(connection, statement_tree, query); // Automated statement tree preparation statement.resolveTree(); // Prepare the statement. statement.prepare(); // Evaluate the SQL statement. Table result = statement.evaluate(); return result; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Schema.java000066400000000000000000000076401330501023400260030ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Schema 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.ArrayList; import java.util.List; import com.mckoi.database.*; /** * Statement container that handles the CREATE SCHEMA and DROP SCHEMA * statements. * * @author Tobias Downer */ public class Schema extends Statement { /** * The type (either 'create' or 'drop'). */ String type; /** * The name of the schema. */ String schema_name; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { type = (String) cmd.getObject("type"); schema_name = (String) cmd.getObject("schema_name"); } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); String com = type.toLowerCase(); if (!database.getDatabase().canUserCreateAndDropSchema( context, user, schema_name)) { throw new UserAccessException( "User not permitted to create or drop schema."); } // Is this a create schema command? if (com.equals("create")) { boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(schema_name, ignore_case); if (schema == null) { // Create the schema database.createSchema(schema_name, "USER"); // Set the default grants for the schema database.getGrantManager().addGrant(Privileges.SCHEMA_ALL_PRIVS, GrantManager.SCHEMA, schema_name, user.getUserName(), true, Database.INTERNAL_SECURE_USERNAME); } else { throw new DatabaseException("Schema '" + schema_name + "' already exists."); } } // Is this a drop schema command? else if (com.equals("drop")) { boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(schema_name, ignore_case); // Only allow user to drop USER typed schemas if (schema == null) { throw new DatabaseException( "Schema '" + schema_name + "' does not exist."); } else if (schema.getType().equals("USER")) { // Check if the schema is empty. TableName[] all_tables = database.getTableList(); String resolved_schema_name = schema.getName(); for (int i = 0; i < all_tables.length; ++i) { if (all_tables[i].getSchema().equals(resolved_schema_name)) { throw new DatabaseException( "Schema '" + schema_name + "' is not empty."); } } // Drop the schema database.dropSchema(schema.getName()); // Revoke all the grants for the schema database.getGrantManager().revokeAllGrantsOnObject( GrantManager.SCHEMA, schema.getName()); } else { throw new DatabaseException( "Can not drop schema '" + schema_name + "'"); } } else { throw new DatabaseException("Unrecognised schema command."); } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/SearchExpression.java000066400000000000000000000074231330501023400300670ustar00rootroot00000000000000/** * com.mckoi.database.interpret.SearchExpression 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.*; /** * Search expression is a form of an Expression that is split up into * component parts that can be easily formed into a search query. * * @author Tobias Downer */ public final class SearchExpression implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = 2888486150597671440L; /** * The originating expression. */ private Expression search_expression; /** * Sets this search expression from the given expression. */ public void setFromExpression(Expression expression) { this.search_expression = expression; } /** * Returns the search expression as an Expression object. */ public Expression getFromExpression() { return search_expression; } /** * Concatinates a new expression to the end of this expression and uses the * 'AND' operator to seperate the expressions. This is very useful for * adding new logical conditions to the expression at runtime. */ void appendExpression(Expression expression) { if (search_expression == null) { search_expression = expression; } else { search_expression = new Expression(search_expression, Operator.get("and"), expression); } } // /** // * Given a SelectStatement, this will resolve all the conditions found in // * this expression (reversively) to their proper full name. If any // * ambiguity is found then an error is thrown. // */ // void resolveColumnNames(Statement statement) { // if (search_expression != null) { // statement.resolveExpression(search_expression); // } // } // // /** // * Evaluates the search expression. // */ // TableSet evaluate(TableSet table_in, JoiningSet join_set) { // // Evalute the expression as a set of logical parts. // table_in.logicalEvaluate(search_expression, join_set); // return table_in; // } /** * Prepares the expression. */ public void prepare(ExpressionPreparer preparer) throws DatabaseException { if (search_expression != null) { search_expression.prepare(preparer); } } /** * Returns all the Elements from all expressions in this condition tree. */ List allElements() { if (search_expression != null) { return search_expression.allElements(); } else { return new ArrayList(); } } // Implemented from StatementTreeObject public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { prepare(preparer); } public Object clone() throws CloneNotSupportedException { SearchExpression v = (SearchExpression) super.clone(); if (search_expression != null) { v.search_expression = (Expression) search_expression.clone(); } return v; } public String toString() { if (search_expression != null) { return search_expression.toString(); } else { return "NO SEARCH EXPRESSION"; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Select.java000066400000000000000000000130411330501023400260120ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Select 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.debug.*; import com.mckoi.util.IntegerVector; import java.util.Set; import java.util.List; import java.util.Vector; import java.util.Iterator; import java.util.ArrayList; import java.util.Collections; /** * Logic for interpreting an SQL SELECT statement. * * @author Tobias Downer */ public class Select extends Statement { /** * The TableSelectExpression representing the select query itself. */ private TableSelectExpression select_expression; /** * The list of all columns to order by. (ByColumn) */ private ArrayList order_by; /** * The list of columns in the 'order_by' clause fully resolved. */ private Variable[] order_cols; /** * The plan for evaluating this select expression. */ private QueryPlanNode plan; /** * Checks the permissions for this user to determine if they are allowed to * select (read) from tables in this plan. If the user is not allowed to * select from a table in the plan, a UserAccessException is thrown. This is * a static method. */ static final void checkUserSelectPermissions( DatabaseQueryContext context, User user, QueryPlanNode plan) throws UserAccessException, DatabaseException { // Discover the list of TableName objects this query touches, ArrayList touched_tables = plan.discoverTableNames(new ArrayList()); Database dbase = context.getDatabase(); // Check that the user is allowed to select from these tables. for (int i = 0; i < touched_tables.size(); ++i) { TableName t = (TableName) touched_tables.get(i); if (!dbase.canUserSelectFromTableObject(context, user, t, null)) { throw new UserAccessException( "User not permitted to select from table: " + t); } } } /** * Prepares the select statement with a Database object. This sets up * internal state so that it correctly maps to a database. Also, this * checks format to ensure there are no run-time syntax problems. This must * be called because we 'evaluate' the statement. *

* NOTE: Care must be taken to ensure that all methods called here are safe * in as far as modifications to the data occuring. The rules for * safety should be as follows. If the database is in EXCLUSIVE mode, * then we need to wait until it's switched back to SHARED mode * before this method is called. * All collection of information done here should not involve any table * state info. except for column count, column names, column types, etc. * Queries such as obtaining the row count, selectable scheme information, * and certainly 'getCellContents' must never be called during prepare. * When prepare finishes, the affected tables are locked and the query ia * safe to 'evaluate' at which time table state is safe to inspect. */ public void prepare() throws DatabaseException { DatabaseConnection db = database; // Prepare this object from the StatementTree, // The select expression itself select_expression = (TableSelectExpression) cmd.getObject("table_expression"); // The order by information order_by = (ArrayList) cmd.getObject("order_by"); // Generate the TableExpressionFromSet hierarchy for the expression, TableExpressionFromSet from_set = Planner.generateFromSet(select_expression, db); // Form the plan plan = Planner.formQueryPlan(db, select_expression, from_set, order_by); } /** * Evaluates the select statement with the given Database context. */ public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Check the permissions for this user to select from the tables in the // given plan. checkUserSelectPermissions(context, user, plan); boolean error = true; try { Table t = plan.evaluate(context); error = false; return t; } finally { // If an error occured, dump the query plan to the debug log. // Or just dump the query plan if debug level = INFORMATION if (Debug().isInterestedIn(Lvl.INFORMATION) || (error && Debug().isInterestedIn(Lvl.WARNING))) { StringBuffer buf = new StringBuffer(); plan.debugString(0, buf); Debug().write(Lvl.WARNING, this, "Query Plan debug:\n" + buf.toString()); } } } /** * Outputs information for debugging. */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[ SELECT: expression="); buf.append(select_expression.toString()); buf.append(" ORDER_BY="); buf.append(order_by); buf.append(" ]"); return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/SelectColumn.java000066400000000000000000000063071330501023400271770ustar00rootroot00000000000000/** * com.mckoi.database.interpret.SelectColumn 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; /** * Represents a column selected to be in the output of a select statement. * This includes being either an aggregate function, a column or "*" which * is the entire set of columns. * * @author Tobias Downer */ public final class SelectColumn implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = 2507375247510606004L; /** * If the column represents a glob of columns (eg. 'Part.*' or '*') then * this is set to the glob string and 'expression' is left blank. */ public String glob_name; /** * The fully resolved name that this column is given in the resulting table. */ public Variable resolved_name; /** * The alias of this column string. */ public String alias; /** * The expression of this column. This is only NOT set when name == "*" * indicating all the columns. */ public Expression expression; /** * The name of this column used internally to reference it. */ public Variable internal_name; // /** // * Makes a deep clone of this object. // */ // SelectColumn deepClone() { // SelectColumn sc = new SelectColumn(); // sc.glob_name = glob_name; // sc.resolved_name = resolved_name; // sc.alias = alias; // sc.expression = new Expression(expression); // sc.internal_name = internal_name; // return sc; // } // Implemented from StatementTreeObject public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { if (expression != null) { expression.prepare(preparer); } } public Object clone() throws CloneNotSupportedException { SelectColumn v = (SelectColumn) super.clone(); if (resolved_name != null) { v.resolved_name = (Variable) resolved_name.clone(); } if (expression != null) { v.expression = (Expression) expression.clone(); } if (internal_name != null) { v.internal_name = (Variable) internal_name.clone(); } return v; } public String toString() { String str = ""; if (glob_name != null) str += " GLOB_NAME = " + glob_name; if (resolved_name != null) str += " RESOLVED_NAME = " + resolved_name; if (alias != null) str += " ALIAS = " + alias; if (expression != null) str += " EXPRESSION = " + expression; if (internal_name != null) str += " INTERNAL_NAME = " + internal_name; return str; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Sequence.java000066400000000000000000000132071330501023400263470ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Sequence 07 Apr 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import com.mckoi.debug.*; /** * A statement tree for creating and dropping sequence generators. * * @author Tobias Downer */ public class Sequence extends Statement { String type; TableName seq_name; Expression increment; Expression min_value; Expression max_value; Expression start_value; Expression cache_value; boolean cycle; // ----------- Implemented from Statement ---------- public void prepare() throws DatabaseException { type = (String) cmd.getObject("type"); String sname = (String) cmd.getObject("seq_name"); String schema_name = database.getCurrentSchema(); seq_name = TableName.resolve(schema_name, sname); seq_name = database.tryResolveCase(seq_name); if (type.equals("create")) { // Resolve the function name into a TableName object. increment = (Expression) cmd.getObject("increment"); min_value = (Expression) cmd.getObject("min_value"); max_value = (Expression) cmd.getObject("max_value"); start_value = (Expression) cmd.getObject("start"); cache_value = (Expression) cmd.getObject("cache"); cycle = cmd.getObject("cycle") != null; } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Does the schema exist? boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(seq_name.getSchema(), ignore_case); if (schema == null) { throw new DatabaseException("Schema '" + seq_name.getSchema() + "' doesn't exist."); } else { seq_name = new TableName(schema.getName(), seq_name.getName()); } if (type.equals("create")) { // Does the user have privs to create this sequence generator? if (!database.getDatabase().canUserCreateSequenceObject(context, user, seq_name)) { throw new UserAccessException( "User not permitted to create sequence: " + seq_name); } // Does a table already exist with this name? if (database.tableExists(seq_name)) { throw new DatabaseException("Database object with name '" + seq_name + "' already exists."); } // Resolve the expressions, long v_start_value = 0; if (start_value != null) { v_start_value = start_value.evaluate(null, null, context).toBigNumber().longValue(); } long v_increment_by = 1; if (increment != null) { v_increment_by = increment.evaluate(null, null, context).toBigNumber().longValue(); } long v_min_value = 0; if (min_value != null) { v_min_value = min_value.evaluate(null, null, context).toBigNumber().longValue(); } long v_max_value = Long.MAX_VALUE; if (max_value != null) { v_max_value = max_value.evaluate(null, null, context).toBigNumber().longValue(); } long v_cache = 16; if (cache_value != null) { v_cache = cache_value.evaluate(null, null, context).toBigNumber().longValue(); if (v_cache <= 0) { throw new DatabaseException("Cache size can not be <= 0"); } } if (v_min_value >= v_max_value) { throw new DatabaseException("Min value can not be >= the max value."); } if (v_start_value < v_min_value || v_start_value >= v_max_value) { throw new DatabaseException( "Start value is outside the min/max sequence bounds."); } database.createSequenceGenerator(seq_name, v_start_value, v_increment_by, v_min_value, v_max_value, v_cache, cycle); // The initial grants for a sequence is to give the user who created it // full access. database.getGrantManager().addGrant( Privileges.PROCEDURE_ALL_PRIVS, GrantManager.TABLE, seq_name.toString(), user.getUserName(), true, Database.INTERNAL_SECURE_USERNAME); } else if (type.equals("drop")) { // Does the user have privs to create this sequence generator? if (!database.getDatabase().canUserDropSequenceObject(context, user, seq_name)) { throw new UserAccessException( "User not permitted to drop sequence: " + seq_name); } database.dropSequenceGenerator(seq_name); // Drop the grants for this object database.getGrantManager().revokeAllGrantsOnObject( GrantManager.TABLE, seq_name.toString()); } else { throw new RuntimeException("Unknown type: " + type); } // Return an update result table. return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Set.java000066400000000000000000000055611330501023400253360ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Set 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.ArrayList; import java.util.List; import java.math.BigDecimal; import com.mckoi.database.*; /** * The SQL SET statement. Sets properties within the current local database * connection such as auto-commit mode. * * @author Tobias Downer */ public class Set extends Statement { /** * The type of set this is. */ String type; /** * The variable name of this set statement. */ String var_name; /** * The Expression that is the value to assign the variable to * (if applicable). */ Expression exp; /** * The value to assign the value to (if applicable). */ String value; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { type = (String) cmd.getObject("type"); var_name = (String) cmd.getObject("var_name"); exp = (Expression) cmd.getObject("exp"); value = (String) cmd.getObject("value"); } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); String com = type.toLowerCase(); if (com.equals("varset")) { database.setVar(var_name, exp); } else if (com.equals("isolationset")) { value = value.toLowerCase(); database.setTransactionIsolation(value); } else if (com.equals("autocommit")) { value = value.toLowerCase(); if (value.equals("on") || value.equals("1")) { database.setAutoCommit(true); } else if (value.equals("off") || value.equals("0")) { database.setAutoCommit(false); } else { throw new DatabaseException("Unrecognised value for SET AUTO COMMIT"); } } else if (com.equals("schema")) { // It's particularly important that this is done during exclusive // lock because SELECT requires the schema name doesn't change in // mid-process. // Change the connection to the schema database.setDefaultSchema(value); } else { throw new DatabaseException("Unrecognised set command."); } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Show.java000066400000000000000000000241271330501023400255220ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Show 13 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.List; import java.util.ArrayList; import java.util.Date; import java.util.Properties; import java.util.Arrays; import java.util.Collections; import java.sql.SQLException; import com.mckoi.database.*; import com.mckoi.database.sql.ParseException; import com.mckoi.util.Stats; import com.mckoi.database.global.Types; import com.mckoi.database.global.StandardMessages; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.jdbc.SQLQuery; /** * Statement that handles SHOW and DESCRIBE sql commands. * * @author Tobias Downer */ public class Show extends Statement { // Various show statics, static final int TABLES = 1; static final int STATUS = 2; static final int DESCRIBE_TABLE = 3; static final int CONNECTIONS = 4; static final int PRODUCT = 5; static final int CONNECTION_INFO = 6; /** * The name the table that we are to update. */ String table_name; /** * The type of information that we are to show. */ String show_type; /** * Arguments of the show statement. */ Expression[] args; /** * The search expression for the show statement (where clause). */ SearchExpression where_clause = new SearchExpression(); /** * Convenience, creates an empty table with the given column names. */ TemporaryTable createEmptyTable(Database d, String name, String[] cols) throws DatabaseException { // Describe the given table... DataTableColumnDef[] fields = new DataTableColumnDef[cols.length]; for (int i = 0; i < cols.length; ++i) { fields[i] = DataTableColumnDef.createStringColumn(cols[i]); } TemporaryTable temp_table = new TemporaryTable(d, name, fields); // No entries... temp_table.setupAllSelectableSchemes(); return temp_table; } // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Get the show variables from the query model show_type = (String) cmd.getObject("show"); show_type = show_type.toLowerCase(); table_name = (String) cmd.getObject("table_name"); args = (Expression[]) cmd.getObject("args"); where_clause = (SearchExpression) cmd.getObject("where_clause"); } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); Database d = database.getDatabase(); // Construct an executor for interpreting SQL queries inside here. SQLQueryExecutor executor = new SQLQueryExecutor(); // The table we are showing, TemporaryTable show_table; try { // How we order the result set int[] order_set = null; if (show_type.equals("schema")) { SQLQuery query = new SQLQuery( " SELECT \"name\" AS \"schema_name\", " + " \"type\", " + " \"other\" AS \"notes\" " + " FROM SYS_JDBC.ThisUserSchemaInfo " + "ORDER BY \"schema_name\""); return executor.execute(database, query); } else if (show_type.equals("tables")) { String current_schema = database.getCurrentSchema(); SQLQuery query = new SQLQuery( " SELECT \"Tables.TABLE_NAME\" AS \"table_name\", " + " I_PRIVILEGE_STRING(\"agg_priv_bit\") AS \"user_privs\", " + " \"Tables.TABLE_TYPE\" as \"table_type\" " + " FROM SYS_JDBC.Tables, " + " ( SELECT AGGOR(\"priv_bit\") agg_priv_bit, " + " \"object\", \"param\" " + " FROM SYS_JDBC.ThisUserSimpleGrant " + " WHERE \"object\" = 1 " + " GROUP BY \"param\" )" + " WHERE \"Tables.TABLE_SCHEM\" = ? " + " AND CONCAT(\"Tables.TABLE_SCHEM\", '.', \"Tables.TABLE_NAME\") = \"param\" " + "ORDER BY \"Tables.TABLE_NAME\""); query.addVar(current_schema); return executor.execute(database, query); } else if (show_type.equals("status")) { SQLQuery query = new SQLQuery( " SELECT \"stat_name\" AS \"name\", " + " \"value\" " + " FROM SYS_INFO.sUSRDatabaseStatistics "); return executor.execute(database, query); } else if (show_type.equals("describe_table")) { TableName tname = resolveTableName(table_name, database); if (!database.tableExists(tname)) { throw new StatementException( "Unable to find table '" + table_name + "'"); } SQLQuery query = new SQLQuery( " SELECT \"column\" AS \"name\", " + " i_sql_type(\"type_desc\", \"size\", \"scale\") AS \"type\", " + " \"not_null\", " + " \"index_str\" AS \"index\", " + " \"default\" " + " FROM SYS_JDBC.ThisUserTableColumns " + " WHERE \"schema\" = ? " + " AND \"table\" = ? " + "ORDER BY \"seq_no\" "); query.addVar(tname.getSchema()); query.addVar(tname.getName()); return executor.execute(database, query); } else if (show_type.equals("connections")) { SQLQuery query = new SQLQuery( "SELECT * FROM SYS_INFO.sUSRCurrentConnections"); return executor.execute(database, query); } else if (show_type.equals("product")) { SQLQuery query = new SQLQuery( "SELECT \"name\", \"version\" FROM " + " ( SELECT \"value\" AS \"name\" FROM SYS_INFO.sUSRProductInfo " + " WHERE \"var\" = 'name' ), " + " ( SELECT \"value\" AS \"version\" FROM SYS_INFO.sUSRProductInfo " + " WHERE \"var\" = 'version' ) " ); return executor.execute(database, query); } else if (show_type.equals("connection_info")) { SQLQuery query = new SQLQuery( "SELECT * FROM SYS_INFO.sUSRConnectionInfo" ); return executor.execute(database, query); } else if (show_type.equals("jdbc_procedures")) { // Need implementing? show_table = createEmptyTable(d, "JDBCProcedures", new String[] { "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", "R1", "R2", "R3", "REMARKS", "PROCEDURE_TYPE" }); } else if (show_type.equals("jdbc_procedure_columns")) { // Need implementing? show_table = createEmptyTable(d, "JDBCProcedureColumns", new String[] { "PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", "COLUMN_NAME", "COLUMN_TYPE", "DATA_TYPE", "TYPE_NAME", "PRECISION", "LENGTH", "SCALE", "RADIX", "NULLABLE", "REMARKS" }); } else if (show_type.equals("jdbc_catalogs")) { // Need implementing? show_table = createEmptyTable(d, "JDBCCatalogs", new String[] { "TABLE_CAT" }); } else if (show_type.equals("jdbc_table_types")) { // Describe the given table... DataTableColumnDef[] fields = new DataTableColumnDef[1]; fields[0] = DataTableColumnDef.createStringColumn("TABLE_TYPE"); TemporaryTable temp_table = new TemporaryTable(d, "JDBCTableTypes", fields); String[] supported_types = { "TABLE", "VIEW", "SYSTEM TABLE", "TRIGGER", "FUNCTION", "SEQUENCE" }; for (int i = 0; i < supported_types.length; ++i) { temp_table.newRow(); temp_table.setRowObject(TObject.stringVal(supported_types[i]), "JDBCTableTypes.TABLE_TYPE"); } temp_table.setupAllSelectableSchemes(); show_table = temp_table; order_set = new int[] { 0 }; } else if (show_type.equals("jdbc_best_row_identifier")) { // Need implementing? show_table = createEmptyTable(d, "JDBCBestRowIdentifier", new String[] { "SCOPE", "COLUMN_NAME", "DATA_TYPE", "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH", "DECIMAL_DIGITS", "PSEUDO_COLUMN" }); } else if (show_type.equals("jdbc_version_columns")) { // Need implementing? show_table = createEmptyTable(d, "JDBCVersionColumn", new String[] { "SCOPE", "COLUMN_NAME", "DATA_TYPE", "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH", "DECIMAL_DIGITS", "PSEUDO_COLUMN" }); } else if (show_type.equals("jdbc_index_info")) { // Need implementing? show_table = createEmptyTable(d, "JDBCIndexInfo", new String[] { "TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", "INDEX_NAME", "TYPE", "ORDINAL_POSITION", "COLUMN_NAME", "ASC_OR_DESC", "CARDINALITY", "PAGES", "FILTER_CONDITION" }); } else { throw new StatementException("Unknown SHOW identifier: " + show_type); } } catch (SQLException e) { throw new DatabaseException("SQL Error: " + e.getMessage()); } catch (ParseException e) { throw new DatabaseException("Parse Error: " + e.getMessage()); } catch (TransactionException e) { throw new DatabaseException("Transaction Error: " + e.getMessage()); } return show_table; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/Statement.java000066400000000000000000000246431330501023400265510ustar00rootroot00000000000000/** * com.mckoi.database.interpret.Statement 09 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.jdbc.SQLQuery; import com.mckoi.database.*; import com.mckoi.debug.DebugLogger; import java.util.*; /** * Provides a set of useful utility functions to use by all the * interpretted statements. * * @author Tobias Downer */ public abstract class Statement { /** * The Database context. */ protected DatabaseConnection database; /** * The user context. */ protected User user; /** * The StatementTree object that is the container for the query. */ protected StatementTree cmd; /** * The SQLQuery object that was used to produce this statement. */ protected SQLQuery query; /** * The list of all FromTableInterface objects of resources referenced in * this query. (FromTableInterface) */ protected Vector table_list = new Vector(); /** * Returns a DebugLogger object used to log debug commands. */ public final DebugLogger Debug() { return database.Debug(); } /** * Resets this statement so it may be re-prepared and evaluated again. * Useful for repeating a query multiple times. */ void reset() { database = null; user = null; table_list = new Vector(); } /** * Performs initial preparation on the contents of the StatementTree by * resolving all sub queries and mapping functions to their executable * forms. *

* Given a StatementTree and a Database context, this method will convert * all sub-queries found in the StatementTree to a Queriable object. In * other words, all StatementTree are converted to Select objects. The given * 'database' object is used as the session to prepare the sub-queries * against. *

* This is called after 'init' and before 'prepare'. */ public final void resolveTree() throws DatabaseException { // For every expression in this select we must go through and resolve // any sub-queries we find to the correct Select object. // This method will prepare the sub-query substitute the StatementTree // object for a Select object in the expression. ExpressionPreparer preparer = new ExpressionPreparer() { public boolean canPrepare(Object element) { return element instanceof StatementTree; } public Object prepare(Object element) throws DatabaseException { StatementTree stmt_tree = (StatementTree) element; Select stmt = new Select(); stmt.init(database, stmt_tree, null); stmt.resolveTree(); stmt.prepare(); return stmt; } }; cmd.prepareAllExpressions(preparer); } /** * Given a fully resolved table name ( eg. Part.id ) this method will * attempt to find the Table object that the column is in. */ FromTableInterface findTableWithColumn(Variable column_name) { for (int i = 0; i < table_list.size(); ++i) { FromTableInterface table = (FromTableInterface) table_list.elementAt(i); TableName tname = column_name.getTableName(); String sch_name = null; String tab_name = null; String col_name = column_name.getName(); if (tname != null) { sch_name = tname.getSchema(); tab_name = tname.getName(); } int rcc = table.resolveColumnCount(null, sch_name, tab_name, col_name); if (rcc > 0) { return table; } } return null; } /** * Given a fully resolved table name ( eg. Part.id ) this returns true if * there is a table with the given column name, otherwise false. *

* NOTE: Intended to be overwritten... */ boolean existsTableWithColumn(Variable column_name) { return findTableWithColumn(column_name) != null; } /** * Overwrite this method if your statement has some sort of column aliasing * capability (such as a select statement). Returns a list of all fully * qualified Variables that match the alias name, or an empty list if no * matches found. *

* By default, returns an empty list. */ ArrayList resolveAgainstAliases(Variable alias_name) { return new ArrayList(0); } /** * Resolves a TableName string (eg. 'Customer' 'APP.Customer' ) to a * TableName object. If the schema part of the table name is not present * then it is set to the current schema of the database connection. If the * database is ignoring the case then this will correctly resolve the table * to the cased version of the table name. */ TableName resolveTableName(String name, DatabaseConnection db) { return db.resolveTableName(name); } /** * Returns the first FromTableInterface object that matches the given schema, * table reference. Returns null if no objects with the given schema/name * reference match. */ FromTableInterface findTableInQuery(String schema, String name) { for (int p = 0; p < table_list.size(); ++p) { FromTableInterface table = (FromTableInterface) table_list.get(p); if (table.matchesReference(null, schema, name)) { return table; } } return null; } /** * Attempts to resolve an ambiguous column name such as 'id' into a * Variable from the tables in this statement. */ Variable resolveColumn(Variable v) { // Try and resolve against alias names first, ArrayList list = new ArrayList(); list.addAll(resolveAgainstAliases(v)); TableName tname = v.getTableName(); String sch_name = null; String tab_name = null; String col_name = v.getName(); if (tname != null) { sch_name = tname.getSchema(); tab_name = tname.getName(); } int matches_found = 0; // Find matches in our list of tables sources, for (int i = 0; i < table_list.size(); ++i) { FromTableInterface table = (FromTableInterface) table_list.elementAt(i); int rcc = table.resolveColumnCount(null, sch_name, tab_name, col_name); if (rcc == 1) { Variable matched = table.resolveColumn(null, sch_name, tab_name, col_name); list.add(matched); } else if (rcc > 1) { throw new StatementException("Ambiguous column name (" + v + ")"); } } int total_matches = list.size(); if (total_matches == 0) { throw new StatementException("Can't find column: " + v); } else if (total_matches == 1) { return (Variable) list.get(0); } else if (total_matches > 1) { // if there more than one match, check if they all match the identical // resource, throw new StatementException("Ambiguous column name (" + v + ")"); } else { // Should never reach here but we include this exception to keep the // compiler happy. throw new Error("Negative total matches?"); } } /** * Given a Variable object, this will resolve the name into a column name * the database understands (substitutes aliases, etc). */ public Variable resolveVariableName(Variable v) { return resolveColumn(v); } /** * Given an Expression, this will run through the expression and resolve * any variable names via the 'resolveVariableName' method here. */ void resolveExpression(Expression exp) { // NOTE: This gets variables in all function parameters. List vars = exp.allVariables(); for (int i = 0; i < vars.size(); ++i) { Variable v = (Variable) vars.get(i); Variable to_set = resolveVariableName(v); v.set(to_set); } } /** * Add an FromTableInterface that is used within this query. These tables * are used when we try to resolve a column name. */ protected void addTable(FromTableInterface table) { table_list.addElement(table); } /** * Sets up internal variables for this statement for derived classes to use. * This is called before 'prepare' and 'isExclusive' is called. *

* It is assumed that any ? style parameters in the StatementTree will have * been resolved previous to a call to this method. * * @param db the DatabaseConnection that will execute this statement. * @param stree the StatementTree that contains the parsed content of the * statement being executed. */ public final void init(DatabaseConnection db, StatementTree stree, SQLQuery query) { this.database = db; this.user = db.getUser(); this.cmd = stree; this.query = query; } /** * Prepares the statement with the given Database object. This is called * before the statement is evaluated. The prepare statement queries the * database and resolves information about the statement (for example, it * resolves column names and aliases and determines the tables that are * touched by this statement so we can lock the appropriate tables before * we evaluate). *

* NOTE: Care must be taken to ensure that all methods called here are safe * in as far as modifications to the data occuring. The rules for * safety should be as follows. If the database is in EXCLUSIVE mode, * then we need to wait until it's switched back to SHARED mode before * this method is called. * All collection of information done here should not involve any table * state info. except for column count, column names, column types, etc. * Queries such as obtaining the row count, selectable scheme information, * and certainly 'getCellContents' must never be called during prepare. * When prepare finishes, the affected tables are locked and the query is * safe to 'evaluate' at which time table state is safe to inspect. */ public abstract void prepare() throws DatabaseException; /** * Evaluates the statement and returns a table that represents the result * set. This is called after 'prepare'. */ public abstract Table evaluate() throws DatabaseException, TransactionException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/TableExpressionFromSet.java000066400000000000000000000364411330501023400312130ustar00rootroot00000000000000/** * com.mckoi.database.interpret.TableExpressionFromSet 01 Nov 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.Variable; import com.mckoi.database.TableName; import com.mckoi.database.Expression; import com.mckoi.database.DatabaseSystem; import com.mckoi.database.DatabaseException; import com.mckoi.database.StatementException; import com.mckoi.database.ExpressionPreparer; import com.mckoi.database.CorrelatedVariable; import com.mckoi.database.DatabaseConnection; import java.util.ArrayList; /** * A set of tables and function references that make up the resources made * available by a table expression. When a SelectQueriable is prepared this * object is created and is used to dereference names to sources. It also * has the ability to chain to another TableExpressionFromSet and resolve * references over a complex sub-query hierarchy. * * @author Tobias Downer */ class TableExpressionFromSet { /** * The list of table resources in this set. * (FromTableInterface). */ private ArrayList table_resources; /** * The list of function expression resources. For example, one table * expression may expose a function as 'SELECT (a + b) AS c, ....' in which * case we have a virtual assignment of c = (a + b) in this set. */ private ArrayList function_resources; /** * The list of Variable references in this set that are exposed to the * outside, including function aliases. For example, * SELECT a, b, c, (a + 1) d FROM ABCTable * Would be exposing variables 'a', 'b', 'c' and 'd'. */ private ArrayList exposed_variables; /** * Set to true if this should do case insensitive resolutions. */ private boolean case_insensitive = false; /** * The parent TableExpressionFromSet if one exists. This is used for * chaining a set of table sets together. When chained the * 'globalResolveVariable' method can be used to resolve a reference in the * chain. */ private TableExpressionFromSet parent; /** * Constructs the object. */ public TableExpressionFromSet(DatabaseConnection connection) { table_resources = new ArrayList(); function_resources = new ArrayList(); exposed_variables = new ArrayList(); // Is the database case insensitive? this.case_insensitive = connection.isInCaseInsensitiveMode(); } /** * Sets the parent of this expression. parent can be set to null. */ public void setParent(TableExpressionFromSet parent) { this.parent = parent; } /** * Returns the parent of this set. If it has no parent it returns null. */ public TableExpressionFromSet getParent() { return parent; } /** * Toggle the case sensitivity flag. */ public void setCaseInsensitive(boolean status) { case_insensitive = status; } boolean stringCompare(String str1, String str2) { if (!case_insensitive) { return str1.equals(str2); } return str1.equalsIgnoreCase(str2); } /** * Adds a table resource to the set. */ public void addTable(FromTableInterface table_resource) { table_resources.add(table_resource); } /** * Adds a function resource to the set. Note that is possible for there to * be references in the 'expression' that do not reference resources in this * set. For example, a correlated reference. */ public void addFunctionRef(String name, Expression expression) { // System.out.println("addFunctionRef: " + name + ", " + expression); function_resources.add(name); function_resources.add(expression); } /** * Adds a variable in this from set that is exposed to the outside. This * list should contain all references from the SELECT ... part of the * query. For example, SELECT a, b, (a + 1) d exposes variables * a, b and d. */ public void exposeVariable(Variable v) { // System.out.println("exposeVariable: " + v); // new Error().printStackTrace(); exposed_variables.add(v); } /** * Exposes all the columns from the given FromTableInterface. */ public void exposeAllColumnsFromSource(FromTableInterface table) { Variable[] v = table.allColumns(); for (int p = 0; p < v.length; ++p) { exposeVariable(v[p]); } } /** * Exposes all the columns in all the child tables. */ public void exposeAllColumns() { for (int i = 0; i < setCount(); ++i) { exposeAllColumnsFromSource(getTable(i)); } } /** * Exposes all the columns from the given table name. */ public void exposeAllColumnsFromSource(TableName tn) { FromTableInterface table_interface = findTable(tn.getSchema(), tn.getName()); if (table_interface == null) { throw new StatementException("Table name found: " + tn); } exposeAllColumnsFromSource(table_interface); } /** * Returns a Variable[] array for each variable that is exposed in this * from set. This is a list of fully qualified variables that are * referencable from the final result of the table expression. */ public Variable[] generateResolvedVariableList() { int sz = exposed_variables.size(); Variable[] list = new Variable[sz]; for (int i = 0; i < sz; ++i) { list[i] = new Variable((Variable) exposed_variables.get(i)); } return list; } /** * Returns the first FromTableInterface object that matches the given schema, * table reference. Returns null if no objects with the given schema/name * reference match. */ FromTableInterface findTable(String schema, String name) { for (int p = 0; p < setCount(); ++p) { FromTableInterface table = getTable(p); if (table.matchesReference(null, schema, name)) { return table; } } return null; } /** * Returns the number of FromTableInterface objects in this set. */ int setCount() { return table_resources.size(); } /** * Returns the FromTableInterface object at the given index position in this * set. */ FromTableInterface getTable(int i) { return (FromTableInterface) table_resources.get(i); } /** * Dereferences a fully qualified reference that is within this set. For * example, SELECT ( a + b ) AS z given 'z' would return the expression * (a + b). *

* Returns null if unable to dereference assignment because it does not * exist. */ Expression dereferenceAssignment(Variable v) { TableName tname = v.getTableName(); String var_name = v.getName(); // We are guarenteed not to match with a function if the table name part // of a Variable is present. if (tname != null) { return null; } // Search for the function with this name Expression last_found = null; int matches_found = 0; for (int i = 0; i < function_resources.size(); i += 2) { String fun_name = (String) function_resources.get(i); if (stringCompare(fun_name, var_name)) { if (matches_found > 0) { throw new StatementException("Ambiguous reference '" + v + "'"); } last_found = (Expression) function_resources.get(i + 1); ++matches_found; } } return last_found; } /** * Resolves the given Variable object to an assignment if it's possible to do * so within the context of this set. If the variable can not be * unambiguously resolved to a function or aliased column, a * StatementException is thrown. If the variable isn't assigned to any * function or aliased column, 'null' is returned. */ private Variable resolveAssignmentReference(Variable v) { TableName tname = v.getTableName(); String var_name = v.getName(); // We are guarenteed not to match with a function if the table name part // of a Variable is present. if (tname != null) { return null; } // Search for the function with this name Variable last_found = null; int matches_found = 0; for (int i = 0; i < function_resources.size(); i += 2) { String fun_name = (String) function_resources.get(i); if (stringCompare(fun_name, var_name)) { if (matches_found > 0) { throw new StatementException("Ambiguous reference '" + v + "'"); } last_found = new Variable(fun_name); ++matches_found; } } return last_found; } /** * Resolves the given Variable against the table columns in this from set. * If the variable does not resolve to anything 'null' is returned. If the * variable is ambiguous, a StatementException is thrown. *

* Note that the given variable does not have to be fully qualified but the * returned expressions are fully qualified. */ Variable resolveTableColumnReference(Variable v) { TableName tname = v.getTableName(); String sch_name = null; String tab_name = null; String col_name = v.getName(); if (tname != null) { sch_name = tname.getSchema(); tab_name = tname.getName(); } // Find matches in our list of tables sources, Variable matched_var = null; for (int i = 0; i < table_resources.size(); ++i) { FromTableInterface table = (FromTableInterface) table_resources.get(i); int rcc = table.resolveColumnCount(null, sch_name, tab_name, col_name); if (rcc == 0) { // do nothing if no matches } else if (rcc == 1 && matched_var == null) { // If 1 match and matched_var = null matched_var = table.resolveColumn(null, sch_name, tab_name, col_name); } else { // if (rcc >= 1 and matched_var != null) System.out.println(matched_var); System.out.println(rcc); throw new StatementException("Ambiguous reference '" + v + "'"); } } return matched_var; } /** * Resolves the given Variable object to a fully resolved Variable * within the context of this table expression. If the variable does not * resolve to anything 'null' is returned. If the variable is ambiguous, a * StatementException is thrown. *

* If the variable name references a table column, an expression with a * single Variable element is returned. If the variable name references a * function, an expression of the function is returned. *

* Note that the given variable does not have to be fully qualified but the * returned expressions are fully qualified. */ Variable resolveReference(Variable v) { // Try and resolve against alias names first, ArrayList list = new ArrayList(); // Expression exp = dereferenceAssignment(v); // // If this is an alias like 'a AS b' then add 'a' to the list instead of // // adding 'b'. This allows us to handle a number of ambiguous conditions. // if (exp != null) { // Variable v2 = exp.getVariable(); // if (v2 != null) { // list.add(resolveTableColumnReference(v2)); // } // else { // list.add(resolveAssignmentReference(v)); // } // } Variable function_var = resolveAssignmentReference(v); if (function_var != null) { list.add(function_var); } Variable tc_var = resolveTableColumnReference(v); if (tc_var != null) { list.add(tc_var); } // TableName tname = v.getTableName(); // String sch_name = null; // String tab_name = null; // String col_name = v.getName(); // if (tname != null) { // sch_name = tname.getSchema(); // tab_name = tname.getName(); // } // // // Find matches in our list of tables sources, // for (int i = 0; i < table_resources.size(); ++i) { // FromTableInterface table = (FromTableInterface) table_resources.get(i); // int rcc = table.resolveColumnCount(null, sch_name, tab_name, col_name); // if (rcc == 1) { // Variable matched = // table.resolveColumn(null, sch_name, tab_name, col_name); // list.add(matched); // } // else if (rcc > 1) { // throw new StatementException("Ambiguous reference '" + v + "'"); // } // } // Return the variable if we found one unambiguously. int list_size = list.size(); if (list_size == 0) { return null; } else if (list_size == 1) { return (Variable) list.get(0); } else { // // Check if the variables are the same? // Variable cv = (Variable) list.get(0); // for (int i = 1; i < list.size(); ++i) { // if (!cv.equals(list.get(i))) { throw new StatementException("Ambiguous reference '" + v + "'"); // } // } // // If they are all the same return the variable. // return v; } } /** * Resolves the given Variable reference within the chained list of * TableExpressionFromSet objects to a CorrelatedVariable. If the reference * is not found in this set the method recurses to the parent set. The first * unambiguous reference is returned. *

* If resolution is ambiguous within a set, a StatementException is thrown. *

* Returns null if the reference could not be resolved. */ CorrelatedVariable globalResolveReference(int level, Variable v) { Variable nv = resolveReference(v); if (nv == null && getParent() != null) { // If we need to descend to the parent, increment the level. return getParent().globalResolveReference(level + 1, v); } else if (nv != null) { return new CorrelatedVariable(nv, level); } return null; } /** * Attempts to qualify the given Variable object to a value found either * in this from set, or a value in the parent from set. A variable that * is qualified by the parent is called a correlated variable. Any * correlated variables that are successfully qualified are returned as * CorrelatedVariable objects. */ Object qualifyVariable(Variable v_in) { Variable v = resolveReference(v_in); if (v == null) { // If not found, try and resolve in parent set (correlated) if (getParent() != null) { CorrelatedVariable cv = getParent().globalResolveReference(1, v_in); if (cv == null) { throw new StatementException("Reference '" + v_in + "' not found."); } return cv; } if (v == null) { throw new StatementException("Reference '" + v_in + "' not found."); } } return v; } /** * Returns an ExpressionPreparer that qualifies all variables in an * expression to either a qualified Variable or a CorrelatedVariable object. */ ExpressionPreparer expressionQualifier() { return new ExpressionPreparer() { public boolean canPrepare(Object element) { return element instanceof Variable; } public Object prepare(Object element) throws DatabaseException { return qualifyVariable((Variable) element); } }; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/TableExpressionRefResolver.java000066400000000000000000000017221330501023400320640ustar00rootroot00000000000000/** * com.mckoi.database.interpret.TableExpressionRefResolver 31 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; /** * This class is used to resolve a reference name to a concrete table/column * in a table expression. * * @author Tobias Downer */ class TableExpressionRefResolver { } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/TableSelectExpression.java000066400000000000000000000121561330501023400310500ustar00rootroot00000000000000/** * com.mckoi.database.interpret.TableSelectExpression 30 Oct 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.*; /** * A container object for the a table select expression, eg. *

 *               SELECT [columns]
 *                 FROM [tables]
 *                WHERE [search_clause]
 *             GROUP BY [column]
 *               HAVING [search_clause]
 * [composite_function] [table_select_expression]
 * 

* Note that a TableSelectExpression can be nested in the various clauses of * this object. * * @author Tobias Downer */ public final class TableSelectExpression implements java.io.Serializable, StatementTreeObject, Cloneable { static final long serialVersionUID = 6946017316981412561L; /** * True if we only search for distinct elements. */ public boolean distinct = false; /** * The list of columns to select from. * (SelectColumn) */ public ArrayList columns = new ArrayList(); /** * The from clause. */ public FromClause from_clause = new FromClause(); /** * The where clause. */ public SearchExpression where_clause = new SearchExpression(); /** * The list of columns to group by. * (ByColumn) */ public ArrayList group_by = new ArrayList(); /** * The group max variable or null if no group max. */ public Variable group_max = null; /** * The having clause. */ public SearchExpression having_clause = new SearchExpression(); /** * If there is a composite function this is set to the composite enumeration * from CompositeTable. */ int composite_function = -1; // (None) /** * If this is an ALL composite (no removal of duplicate rows) it is true. */ boolean is_composite_all; /** * The composite table itself. */ TableSelectExpression next_composite; /** * Constructor. */ public TableSelectExpression() { } /** * Chains a new composite function to this expression. For example, if * this expression is a UNION ALL with another expression it would be * set through this method. */ public void chainComposite(TableSelectExpression expression, String composite, boolean is_all) { this.next_composite = expression; composite = composite.toLowerCase(); if (composite.equals("union")) { composite_function = CompositeTable.UNION; } else if (composite.equals("intersect")) { composite_function = CompositeTable.INTERSECT; } else if (composite.equals("except")) { composite_function = CompositeTable.EXCEPT; } else { throw new Error("Don't understand composite function '" + composite + "'"); } is_composite_all = is_all; } // ---------- Implemented from StatementTreeObject ---------- /** * Prepares all the expressions in the list. */ private static void prepareAllInList( List list, ExpressionPreparer preparer) throws DatabaseException { for (int n = 0; n < list.size(); ++n) { StatementTreeObject ob = (StatementTreeObject) list.get(n); ob.prepareExpressions(preparer); } } public void prepareExpressions(ExpressionPreparer preparer) throws DatabaseException { prepareAllInList(columns, preparer); from_clause.prepareExpressions(preparer); where_clause.prepareExpressions(preparer); prepareAllInList(group_by, preparer); having_clause.prepareExpressions(preparer); // Go to the next chain if (next_composite != null) { next_composite.prepareExpressions(preparer); } } public Object clone() throws CloneNotSupportedException { TableSelectExpression v = (TableSelectExpression) super.clone(); if (columns != null) { v.columns = (ArrayList) StatementTree.cloneSingleObject(columns); } if (from_clause != null) { v.from_clause = (FromClause) from_clause.clone(); } if (where_clause != null) { v.where_clause = (SearchExpression) where_clause.clone(); } if (group_by != null) { v.group_by = (ArrayList) StatementTree.cloneSingleObject(group_by); } if (group_max != null) { v.group_max = (Variable) group_max.clone(); } if (having_clause != null) { v.having_clause = (SearchExpression) having_clause.clone(); } if (next_composite != null) { v.next_composite = (TableSelectExpression) next_composite.clone(); } return v; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/UpdateTable.java000066400000000000000000000134741330501023400267770ustar00rootroot00000000000000/** * com.mckoi.database.interpret.UpdateTable 14 Sep 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import java.util.*; import com.mckoi.database.*; /** * The instance class that stores all the information about an update * statement for processing. * * @author Tobias Downer */ public class UpdateTable extends Statement { /** * The name the table that we are to update. */ String table_name; /** * An array of Assignment objects which represent what we are changing. */ ArrayList column_sets; /** * If the update statement has a 'where' clause, then this is set here. If * it has no 'where' clause then we apply to the entire table. */ SearchExpression where_condition; /** * The limit of the number of rows that are updated by this statement. A * limit of -1 means there is no limit. */ int limit = -1; /** * Tables that are relationally linked to the table being inserted into, set * after 'prepare'. This is used to determine the tables we need to read * lock because we need to validate relational constraints on the tables. */ private ArrayList relationally_linked_tables; // ----- /** * The DataTable we are updating. */ private DataTable update_table; /** * The TableName object set during 'prepare'. */ private TableName tname; /** * The plan for the set of records we are updating in this query. */ private QueryPlanNode plan; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { table_name = (String) cmd.getObject("table_name"); column_sets = (ArrayList) cmd.getObject("assignments"); where_condition = (SearchExpression) cmd.getObject("where_clause"); limit = cmd.getInt("limit"); // --- // Resolve the TableName object. tname = resolveTableName(table_name, database); // Does the table exist? if (!database.tableExists(tname)) { throw new DatabaseException("Table '" + tname + "' does not exist."); } // Get the table we are updating update_table = database.getTable(tname); // Form a TableSelectExpression that represents the select on the table TableSelectExpression select_expression = new TableSelectExpression(); // Create the FROM clause select_expression.from_clause.addTable(table_name); // Set the WHERE clause select_expression.where_clause = where_condition; // Generate the TableExpressionFromSet hierarchy for the expression, TableExpressionFromSet from_set = Planner.generateFromSet(select_expression, database); // Form the plan plan = Planner.formQueryPlan(database, select_expression, from_set, null); // Resolve the variables in the assignments. for (int i = 0; i < column_sets.size(); ++i) { Assignment assignment = (Assignment) column_sets.get(i); Variable orig_var = assignment.getVariable(); Variable new_var = from_set.resolveReference(orig_var); if (new_var == null) { throw new StatementException("Reference not found: " + orig_var); } orig_var.set(new_var); assignment.prepareExpressions(from_set.expressionQualifier()); } // Resolve all tables linked to this TableName[] linked_tables = database.queryTablesRelationallyLinkedTo(tname); relationally_linked_tables = new ArrayList(linked_tables.length); for (int i = 0; i < linked_tables.length; ++i) { relationally_linked_tables.add(database.getTable(linked_tables[i])); } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); // Generate a list of Variable objects that represent the list of columns // being changed. Variable[] col_var_list = new Variable[column_sets.size()]; for (int i = 0; i < col_var_list.length; ++i) { Assignment assign = (Assignment) column_sets.get(i); col_var_list[i] = assign.getVariable(); } // Check that this user has privs to update the table. if (!database.getDatabase().canUserUpdateTableObject(context, user, tname, col_var_list)) { throw new UserAccessException( "User not permitted to update table: " + table_name); } // Check the user has select permissions on the tables in the plan. Select.checkUserSelectPermissions(context, user, plan); // Evaluate the plan to find the update set. Table update_set = plan.evaluate(context); // Make an array of assignments Assignment[] assign_list = new Assignment[column_sets.size()]; assign_list = (Assignment[]) column_sets.toArray(assign_list); // Update the data table. int update_count = update_table.update(context, update_set, assign_list, limit); // Notify TriggerManager that we've just done an update. if (update_count > 0) { database.notifyTriggerEvent(new TriggerEvent( TriggerEvent.UPDATE, tname.toString(), update_count)); } // Return the number of rows we updated. return FunctionTable.resultTable(context, update_count); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/UserManager.java000066400000000000000000000153031330501023400270070ustar00rootroot00000000000000/** * com.mckoi.database.interpret.UserManager 16 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.List; /** * Handler for User commands for creating, altering and dropping user accounts * in the database. * * @author Tobias Downer */ public class UserManager extends Statement { /** * Private method that sets the user groups and lock status. */ private void internalSetUserGroupsAndLock( DatabaseQueryContext context, String username, Expression[] groups_list, String lock_status) throws DatabaseException { Database db = context.getDatabase(); // Add the user to any groups if (groups_list != null) { // Delete all the groups the user currently belongs to db.deleteAllUserGroups(context, username); for (int i = 0; i < groups_list.length; ++i) { TObject group_tob = groups_list[i].evaluate(null, null, context); String group_str = group_tob.getObject().toString(); db.addUserToGroup(context, username, group_str); } } // Do we lock this user? if (lock_status != null) { if (lock_status.equals("LOCK")) { db.setUserLock(context, user, true); } else { db.setUserLock(context, user, false); } } } /** * Private method that creates a new user. */ private void internalCreateUser( DatabaseQueryContext context, String username, String password_str, Expression[] groups_list, String lock_status) throws DatabaseException { // Create the user Database db = context.getDatabase(); db.createUser(context, username, password_str); internalSetUserGroupsAndLock(context, username, groups_list, lock_status); // Allow all localhost TCP connections. // NOTE: Permissive initial security! db.grantHostAccessToUser(context, username, "TCP", "%"); // Allow all Local connections (from within JVM). db.grantHostAccessToUser(context, username, "Local", "%"); } // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { // Nothing to do here } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); String command_type = (String) cmd.getObject("type"); String username = (String) cmd.getObject("username"); // True if current user is altering their own user record. boolean modify_own_record = command_type.equals("ALTER USER") && user.getUserName().equals(username); // True if current user is allowed to create and drop users. boolean secure_access_privs = context.getDatabase().canUserCreateAndDropUsers(context, user); // Does the user have permissions to do this? They must be part of the // 'secure access' priv group or they are modifying there own record. if (!(modify_own_record || secure_access_privs)) { throw new DatabaseException( "User is not permitted to create, alter or drop user."); } if (username.equalsIgnoreCase("public")) { throw new DatabaseException("Username 'public' is reserved."); } // Are we creating a new user? if (command_type.equals("CREATE USER") || command_type.equals("ALTER USER")) { Expression password = (Expression) cmd.getObject("password_expression"); Expression[] groups_list = (Expression[]) cmd.getObject("groups_list"); String lock_status = (String) cmd.getObject("lock_status"); String password_str = null; if (password != null) { TObject passwd_tob = password.evaluate(null, null, context); password_str = passwd_tob.getObject().toString(); } if (command_type.equals("CREATE USER")) { // -- Creating a new user --- // First try and create the new user, Database db = context.getDatabase(); if (!db.userExists(context, username)) { internalCreateUser(context, username, password_str, groups_list, lock_status); } else { throw new DatabaseException( "User '" + username + "' already exists."); } } else if (command_type.equals("ALTER USER")) { // -- Altering a user -- // If we don't have secure access privs then we need to check that the // user is permitted to change the groups_list and lock_status. // Altering your own password is allowed, but you can't change the // groups you belong to, etc. if (!secure_access_privs) { if (groups_list != null) { throw new DatabaseException( "User is not permitted to alter user groups."); } if (lock_status != null) { throw new DatabaseException( "User is not permitted to alter user lock status."); } } Database db = context.getDatabase(); if (db.userExists(context, username)) { if (password_str != null) { db.alterUserPassword(context, username, password_str); } internalSetUserGroupsAndLock(context, username, groups_list, lock_status); } else { throw new DatabaseException("User '" + username + "' doesn't exist."); } } } else if (command_type.equals("DROP USER")) { Database db = context.getDatabase(); if (db.userExists(context, username)) { // Delete the user db.deleteUser(context, username); } else { throw new DatabaseException("User '" + username + "' doesn't exist."); } } else { throw new DatabaseException("Unknown user manager command: " + command_type); } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/ViewManager.java000066400000000000000000000157631330501023400270150ustar00rootroot00000000000000/** * com.mckoi.database.interpret.ViewManager 24 Aug 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.interpret; import com.mckoi.database.*; import java.util.ArrayList; import java.util.List; /** * Handler for creating and dropping views in the database. * * @author Tobias Downer */ public class ViewManager extends Statement { /** * The type of command we are running through this ViewManager. */ private String type; /** * The view name to create/drop. */ private String view_name; /** * The view name as a TableName object. */ private TableName vname; /** * If this is a create command, the TableSelectExpression that forms the view. */ private TableSelectExpression select_expression; /** * If this is a create command, the QueryPlanNode that represents the view * plan. */ private QueryPlanNode plan; // ---------- Implemented from Statement ---------- public void prepare() throws DatabaseException { type = (String) cmd.getObject("type"); view_name = (String) cmd.getObject("view_name"); String schema_name = database.getCurrentSchema(); vname = TableName.resolve(schema_name, view_name); vname = database.tryResolveCase(vname); if (type.equals("create")) { // Get the select expression select_expression = (TableSelectExpression) cmd.getObject("select_expression"); // Get the column name list ArrayList col_list = (ArrayList) cmd.getObject("column_list"); // Generate the TableExpressionFromSet hierarchy for the expression, TableExpressionFromSet from_set = Planner.generateFromSet(select_expression, database); // Form the plan plan = Planner.formQueryPlan(database, select_expression, from_set, new ArrayList()); // Wrap the result around a SubsetNode to alias the columns in the // table correctly for this view. int sz = (col_list == null) ? 0 : col_list.size(); Variable[] original_vars = from_set.generateResolvedVariableList(); Variable[] new_column_vars = new Variable[original_vars.length]; if (sz > 0) { if (sz != original_vars.length) { throw new StatementException( "Column list is not the same size as the columns selected."); } for (int i = 0; i < sz; ++i) { String col_name = (String) col_list.get(i); new_column_vars[i] = new Variable(vname, col_name); } } else { sz = original_vars.length; for (int i = 0; i < sz; ++i) { new_column_vars[i] = new Variable(vname, original_vars[i].getName()); } } // Check there are no repeat column names in the table. for (int i = 0; i < sz; ++i) { Variable cur_v = new_column_vars[i]; for (int n = i + 1; n < sz; ++n) { if (new_column_vars[n].equals(cur_v)) { throw new DatabaseException( "Duplicate column name '" + cur_v + "' in view. " + "A view may not contain duplicate column names."); } } } // Wrap the plan around a SubsetNode plan plan = new QueryPlan.SubsetNode(plan, original_vars, new_column_vars); } } public Table evaluate() throws DatabaseException { DatabaseQueryContext context = new DatabaseQueryContext(database); if (type.equals("create")) { // Does the user have privs to create this tables? if (!database.getDatabase().canUserCreateTableObject(context, user, vname)) { throw new UserAccessException( "User not permitted to create view: " + view_name); } // Does the schema exist? boolean ignore_case = database.isInCaseInsensitiveMode(); SchemaDef schema = database.resolveSchemaCase(vname.getSchema(), ignore_case); if (schema == null) { throw new DatabaseException("Schema '" + vname.getSchema() + "' doesn't exist."); } else { vname = new TableName(schema.getName(), vname.getName()); } // Check the permissions for this user to select from the tables in the // given plan. Select.checkUserSelectPermissions(context, user, plan); // Does the table already exist? if (database.tableExists(vname)) { throw new DatabaseException("View or table with name '" + vname + "' already exists."); } // Before evaluation, make a clone of the plan, QueryPlanNode plan_copy; try { plan_copy = (QueryPlanNode) plan.clone(); } catch (CloneNotSupportedException e) { Debug().writeException(e); throw new DatabaseException("Clone error: " + e.getMessage()); } // We have to execute the plan to get the DataTableDef that represents the // result of the view execution. Table t = plan.evaluate(context); DataTableDef data_table_def = new DataTableDef(t.getDataTableDef()); data_table_def.setTableName(vname); // Create a ViewDef object, ViewDef view_def = new ViewDef(data_table_def, plan_copy); // And create the view object, database.createView(query, view_def); // The initial grants for a view is to give the user who created it // full access. database.getGrantManager().addGrant( Privileges.TABLE_ALL_PRIVS, GrantManager.TABLE, vname.toString(), user.getUserName(), true, Database.INTERNAL_SECURE_USERNAME); } else if (type.equals("drop")) { // Does the user have privs to drop this tables? if (!database.getDatabase().canUserDropTableObject(context, user, vname)) { throw new UserAccessException( "User not permitted to drop view: " + view_name); } // Drop the view object database.dropView(vname); // Drop the grants for this object database.getGrantManager().revokeAllGrantsOnObject( GrantManager.TABLE, vname.toString()); } else { throw new Error("Unknown view command type: " + type); } return FunctionTable.resultTable(context, 0); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/interpret/package.html000066400000000000000000000004011330501023400262050ustar00rootroot00000000000000 com.mckoi.database.interpret - Interpret specific SQL statements

These classes interpret specific SQL statements, such as Insert and Delete. mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/000077500000000000000000000000001330501023400226175ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/AbstractStreamableObject.java000066400000000000000000000074611330501023400303640ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.AbstractStreamableObject 31 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.SQLException; import com.mckoi.util.PagedInputStream; /** * An abstract class that provides various convenience behaviour for * creating streamable java.sql.Blob and java.sql.Clob classes. A streamable * object is typically a large object that can be fetched in separate pieces * from the server. A streamable object only survives for as long as the * ResultSet that it is part of is open. * * @author Tobias Downer */ abstract class AbstractStreamableObject { /** * The MConnection object that this object was returned as part of the result * of. */ protected final MConnection connection; /** * The result_id of the ResultSet this clob is from. */ protected final int result_set_id; /** * The streamable object identifier. */ private final long streamable_object_id; /** * The type of encoding of the stream. */ private final byte type; /** * The size of the streamable object. */ private final long size; /** * Constructor. */ AbstractStreamableObject(MConnection connection, int result_set_id, byte type, long streamable_object_id, long size) { this.connection = connection; this.result_set_id = result_set_id; this.type = type; this.streamable_object_id = streamable_object_id; this.size = size; } /** * Returns the streamable object identifier for referencing this streamable * object on the server. */ protected long getStreamableId() { return streamable_object_id; } /** * Returns the encoding type of this object. */ protected byte getType() { return type; } /** * Returns the number of bytes in this streamable object. Note that this * may not represent the actual size of the object when it is decoded. For * example, a Clob may be encoded as 2-byte per character (unicode) so the * actual length of the clob with be size / 2. */ protected long rawSize() { return size; } // ---------- Inner classes ---------- /** * An InputStream that is used to read the data from the streamable object as * a basic byte encoding. This maintains an internal buffer. */ class StreamableObjectInputStream extends PagedInputStream { /** * The default size of the buffer. */ private final static int B_SIZE = 64 * 1024; /** * Construct the input stream. */ public StreamableObjectInputStream(long in_size) { super(B_SIZE, in_size); } protected void readPageContent(byte[] buf, long pos, int length) throws IOException { try { // Request a part of the blob from the server StreamableObjectPart part = connection.requestStreamableObjectPart( result_set_id, streamable_object_id, pos, length); System.arraycopy(part.getContents(), 0, buf, 0, length); } catch (SQLException e) { throw new IOException("SQL Error: " + e.getMessage()); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/AsciiInputStream.java000066400000000000000000000037221330501023400267120ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.AsciiInputStream 21 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; /** * An InputStream that converts a Reader to a plain ascii stream. This * cuts out the top 8 bits of the unicode char. * * @author Tobias Downer */ class AsciiInputStream extends InputStream { // extends InputStreamFilter { private Reader reader; public AsciiInputStream(Reader reader) { this.reader = reader; } public AsciiInputStream(String s) { this(new StringReader(s)); } public int read() throws IOException { int i = reader.read(); if (i == -1) return i; else return (i & 0x0FF); } public int read(byte[] b, int off, int len) throws IOException { int end = off + len; int read_count = 0; for (int i = off; i < end; ++i) { int val = read(); if (val == -1) { if (read_count == 0) { return -1; } else { return read_count; } } b[i] = (byte) val; ++read_count; } return read_count; } public long skip(long n) throws IOException { return reader.skip(n); } public int available() throws IOException { // NOTE: This is valid according to JDBC spec. return 0; } public void reset() throws IOException { reader.reset(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/AsciiReader.java000066400000000000000000000045401330501023400256400ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.AsciiReader 01 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.Reader; import java.io.InputStream; import java.io.IOException; /** * A java.io.Reader implementation that wraps around an ascii input stream * (8-bit per char stream). * * @author Tobias Downer */ public final class AsciiReader extends Reader { /** * The 8-bit per character Ascii input straem. */ private InputStream input; /** * Constructs the reader. */ public AsciiReader(InputStream input) { this.input = input; } // ---------- Implemented from Reader ---------- public int read() throws IOException { int v = input.read(); if (v == -1) { return -1; } else { return (char) v; } } public int read(char[] buf, int off, int len) throws IOException { if (len < 0) { throw new IOException("len < 0"); } if (off < 0 || off + len > buf.length) { throw new IOException("Out of bounds."); } if (len == 0) { return 0; } int read = 0; while (len > 0) { int v = input.read(); if (v == -1) { if (read == 0) { return -1; } else { return read; } } buf[off] = (char) v; ++off; ++read; --len; } return read; } public long skip(long n) throws IOException { return input.skip(n); } public boolean ready() throws IOException { return false; } public void mark(int readAheadLimit) throws IOException { input.mark(readAheadLimit); } public void reset() throws IOException { input.reset(); } public void close() throws IOException { input.close(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/BinaryToUnicodeReader.java000066400000000000000000000052571330501023400276540ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.BinaryToUnicodeReader 01 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.IOException; import java.io.InputStream; import java.io.Reader; /** * A Reader implementation that wraps around a unicode encoded input stream * that encodes each unicode character as 2 bytes. See * UnicodeToBinaryStream for the InputStream version of this class. * * @author Tobias Downer */ public final class BinaryToUnicodeReader extends Reader { /** * The wrapped InputStream. */ private InputStream input; /** * Constructor. Note that we would typically assume that the given * InputStream employs some type of buffering and that calls to 'read' are * buffered and therefore work quickly. */ public BinaryToUnicodeReader(InputStream input) { this.input = input; } // ---------- Implemented from Reader ---------- public int read() throws IOException { int v1 = input.read(); if (v1 == -1) { return -1; } int v2 = input.read(); if (v2 == -1) { return -1; } return (v1 << 8) + v2; } public int read(char[] buf, int off, int len) throws IOException { if (len < 0) { throw new IOException("len < 0"); } if (off < 0 || off + len > buf.length) { throw new IOException("Out of bounds."); } if (len == 0) { return 0; } int read = 0; while (len > 0) { int v = read(); if (v == -1) { if (read == 0) { return -1; } else { return read; } } buf[off] = (char) v; ++off; ++read; --len; } return read; } public long skip(long n) throws IOException { return input.skip(n * 2) / 2; } public boolean ready() throws IOException { return false; } public void mark(int readAheadLimit) throws IOException { input.mark(readAheadLimit); } public void reset() throws IOException { input.reset(); } public void close() throws IOException { input.close(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/DatabaseCallBack.java000066400000000000000000000027031330501023400265450ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.DatabaseCallBack 02 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; /** * An interface that is input to the DatabaseInterface as a way to be * notified of event information from inside the database. * * @author Tobias Downer */ public interface DatabaseCallBack { /** * Called when the database has generated an event that this user is * listening for. *

* NOTE: The thread that calls back these events is always a volatile * thread that may not block. It is especially important that no queries * are executed when this calls back. To safely act on events, it is * advisable to dispatch onto another thread such as the * SwingEventDispatcher thread. */ void databaseEvent(int event_type, String event_message); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/DatabaseInterface.java000066400000000000000000000131311330501023400270060ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.DatabaseInterface 15 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.SQLException; /** * The interface with the Database whether it be remotely via TCP/IP or * locally within the current JVM. * * @author Tobias Downer */ public interface DatabaseInterface { /** * Attempts to log in to the database as the given username with the given * password. Only one user may be authenticated per connection. This must * be called before the other methods are used. *

* A DatabaseCallBack implementation must be given here that is notified * of all events from the database. Events are only received if the * login was successful. */ boolean login(String default_schema, String username, String password, DatabaseCallBack call_back) throws SQLException; /** * Pushes a part of a streamable object from the client onto the server. The * server stores the large object for use with a future query. For example, * a sequence of with a query with large objects may operate as follows; *

   * 1) Push 100 MB object (id = 104)
   * 2) execQuery with query that contains a streamable object with id 104
   * 

* Note that the client may push any part of a streamable object onto the * server, however the streamable object must have been completely pushed * for the query to execute correctly. For example, an 100 MB byte array may * be pushed onto the server in blocks of 64K (in 1,600 separate blocks). *

* @param type the StreamableObject type (1 = byte array, 2 = char array) * @param object_id the identifier of the StreamableObject for future queries. * @param object_length the total length of the StreamableObject. * @param buf the byte[] array representing the block of information being * sent. * @param offset the offset into of the object of this block. * @param length the length of the block being pushed. */ void pushStreamableObjectPart(byte type, long object_id, long object_length, byte[] buf, long offset, int length) throws SQLException; /** * Executes the query and returns a QueryResponse object that describes the * result of the query. The QueryResponse object describes the number of * rows, describes the columns, etc. This method will block until the query * has completed. The QueryResponse can be used to obtain the 'result id' * variable that is used in subsequent queries to the engine to retrieve * the actual result of the query. */ QueryResponse execQuery(SQLQuery sql) throws SQLException; /** * Returns a part of a result set. The result set part is referenced via the * 'result id' found in the QueryResponse. This is used to read parts * of the query once it has been found via 'execQuery'. *

* The returned List object contains the result requested. *

* If the result contains any StreamableObject objects, then the server * allocates a channel to the object via the 'getStreamableObjectPart' and * the identifier of the StreamableObject. The channel may only be disposed * if the 'disposeStreamableObject' method is called. */ ResultPart getResultPart(int result_id, int row_number, int row_count) throws SQLException; /** * Disposes of a result of a query on the server. This frees up server side * resources allocated to a query. This should be called when the ResultSet * of a query closes. We should try and use this method as soon as possible * because it frees locks on tables and allows deleted rows to be * reclaimed. */ void disposeResult(int result_id) throws SQLException; /** * Returns a section of a large binary or character stream in a result set. * This is used to stream large values over the connection. For example, if * a row contained a multi megabyte object and the client is only interested * in the first few characters and the last few characters of the stream. * This would require only a few queries to the database and the multi- * megabyte object would not need to be downloaded to the client in its * entirety. */ StreamableObjectPart getStreamableObjectPart(int result_id, long streamable_object_id, long offset, int len) throws SQLException; /** * Disposes a streamable object channel with the given identifier. This * should be called to free any resources on the server associated with the * object. It should be called as soon as possible because it frees locks on * the tables and allows deleted rows to be reclaimed. */ void disposeStreamableObject(int result_id, long streamable_object_id) throws SQLException; /** * Called when the connection is disposed. This will terminate the * connection if there is any connection to terminate. */ void dispose() throws SQLException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/LocalBootable.java000066400000000000000000000046131330501023400261700ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.LocalBootable 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.control.DBConfig; import java.io.File; import java.sql.SQLException; /** * An interface that is implemented by an object that boots up the database. * This is provided as an interface so that we aren't dependant on the * entire database when compiling the JDBC code. * * @author Tobias Downer */ public interface LocalBootable { /** * Attempts to create a new database system with the given name, and the * given username/password as the admin user for the system. Once created, * the newly created database will be booted up. * * @param config the configuration variables. * @returns a DatabaseInterface for talking to the database. */ DatabaseInterface create(String username, String password, DBConfig config) throws SQLException; /** * Boots the database with the given configuration. * * @param config the configuration variables. * @returns a DatabaseInterface for talking to the database. */ DatabaseInterface boot(DBConfig config) throws SQLException; /** * Attempts to test if the database exists or not. Returns true if the * database exists. * * @param config the configuration variables. */ boolean checkExists(DBConfig config) throws SQLException; /** * Returns true if there is a database currently booted in the current * JVM. Otherwise returns false. */ boolean isBooted() throws SQLException; /** * Connects this interface to the database currently running in this JVM. * * @returns a DatabaseInterface for talking to the database. */ DatabaseInterface connectToJVM() throws SQLException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MBlob.java000066400000000000000000000104721330501023400244610ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MBlob 14 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.*; import java.io.*; import com.mckoi.database.global.ByteLongObject; /** * An implementation of an sql.Blob object. This implementation keeps the * entire Blob in memory. *

* NOTE: java.sql.Blob is only available in JDBC 2.0 * * @author Tobias Downer */ class MBlob implements Blob { /** * The ByteLongObject that is a container for the data in this blob. */ private ByteLongObject blob; /** * Constructs the blob. */ MBlob(ByteLongObject blob) { this.blob = blob; } // ---------- Implemented from Blob ---------- public long length() throws SQLException { return blob.length(); } public byte[] getBytes(long pos, int length) throws SQLException { // First byte is at position 1 according to JDBC Spec. --pos; if (pos < 0 || pos + length > length()) { throw new SQLException("Out of bounds."); } byte[] buf = new byte[length]; System.arraycopy(blob.getByteArray(), (int) pos, buf, 0, length); return buf; } public InputStream getBinaryStream() throws SQLException { return new ByteArrayInputStream(blob.getByteArray(), 0, (int) length()); } public long position(byte[] pattern, long start) throws SQLException { byte[] buf = blob.getByteArray(); int len = (int) length(); int max = ((int) length()) - pattern.length; int i = (int) (start - 1); while (true) { // Look for first byte... while (i <= max && buf[i] != pattern[0]) { ++i; } // Reached end so exit.. if (i > max) { return -1; } // Found first character, so look for the rest... int search_from = i; int found_index = 1; while ( found_index < pattern.length && buf[search_from] == pattern[found_index] ) { ++search_from; ++found_index; } ++i; if (found_index >= pattern.length) { return (long) i; } } } public long position(Blob pattern, long start) throws SQLException { byte[] buf; // Optimize if MBlob, if (pattern instanceof MBlob) { buf = ((MBlob) pattern).blob.getByteArray(); } else { buf = pattern.getBytes(0, (int) pattern.length()); } return position(buf, start); } //#IFDEF(JDBC3.0) // -------------------------- JDBC 3.0 ----------------------------------- public int setBytes(long pos, byte[] bytes) throws SQLException { throw new SQLException("BLOB updating is not supported"); } public int setBytes(long pos, byte[] bytes, int offset, int len) throws SQLException { throw new SQLException("BLOB updating is not supported"); } public java.io.OutputStream setBinaryStream(long pos) throws SQLException { throw new SQLException("BLOB updating is not supported"); } public void truncate(long len) throws SQLException { throw new SQLException("BLOB updating is not supported"); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public void free() throws SQLException { } public InputStream getBinaryStream(long pos, long length) throws SQLException { long s = pos; long e = pos + length; if (s > Integer.MAX_VALUE || s < 0 || e > Integer.MAX_VALUE || e < 0 || s > e) { throw new java.lang.IndexOutOfBoundsException(); } return new ByteArrayInputStream(blob.getByteArray(), (int) pos, (int) length); } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MClob.java000066400000000000000000000061731330501023400244650ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MClob 31 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.SQLException; import java.sql.Clob; import java.io.StringReader; import java.io.Reader; /** * An implementation of java.sql.Clob over a java.util.String object. * * @author Tobias Downer */ class MClob implements Clob { /** * The string the Clob is based on. */ private String str; /** * Constructs the Clob implementation. */ public MClob(String str) { this.str = str; } // ---------- Implemented from Clob ---------- public long length() throws SQLException { return str.length(); } public String getSubString(long pos, int length) throws SQLException { int p = (int) (pos - 1); return str.substring(p, p + length); } public Reader getCharacterStream() throws SQLException { return new StringReader(str); } public java.io.InputStream getAsciiStream() throws SQLException { return new AsciiInputStream(getCharacterStream()); } public long position(String searchstr, long start) throws SQLException { throw MSQLException.unsupported(); } public long position(Clob searchstr, long start) throws SQLException { throw MSQLException.unsupported(); } //#IFDEF(JDBC3.0) //---------------------------- JDBC 3.0 ----------------------------------- public int setString(long pos, String str) throws SQLException { throw MSQLException.unsupported(); } public int setString(long pos, String str, int offset, int len) throws SQLException { throw MSQLException.unsupported(); } public java.io.OutputStream setAsciiStream(long pos) throws SQLException { throw MSQLException.unsupported(); } public java.io.Writer setCharacterStream(long pos) throws SQLException { throw MSQLException.unsupported(); } public void truncate(long len) throws SQLException { throw MSQLException.unsupported(); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public void free() throws SQLException { } public Reader getCharacterStream(long pos, long length) throws SQLException { long s = pos; long e = pos + length; if (s > Integer.MAX_VALUE || s < 0 || e > Integer.MAX_VALUE || e < 0 || s > e) { throw new java.lang.IndexOutOfBoundsException(); } return new StringReader(str.substring((int) s, (int) e)); } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MConnection.java000066400000000000000000000760361330501023400257120ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MConnection 20 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.global.ColumnDescription; import com.mckoi.database.global.StreamableObject; import java.io.*; import java.sql.*; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.StringTokenizer; import java.util.concurrent.Executor; /** * JDBC implementation of the connection object to a Mckoi database. The * implementation specifics for how the connection talks with the database * is left up to the implementation of DatabaseInterface. *

* This object is thread safe. It may be accessed safely from concurrent * threads. * * @author Tobias Downer */ public class MConnection implements Connection, DatabaseCallBack { /** * A cache of all rows retrieved from the server. This cuts down the * number of requests to the server by caching rows that are accessed * frequently. Note that cells are only cached within a ResultSet bounds. * Two different ResultSet's will not share cells in the cache. */ private RowCache row_cache; /** * The JDBC URL used to make this connection. */ private String url; /** * SQL warnings for this connection. */ private SQLWarning head_warning; /** * Set to true if the connection is closed. */ private boolean is_closed; /** * Set to true if the connection is in auto-commit mode. (By default, * auto_commit is enabled). */ private boolean auto_commit; /** * The interface to the database. */ private DatabaseInterface db_interface; /** * The list of trigger listeners registered with the connection. */ private final List trigger_list; /** * A Thread that handles all dispatching of trigger events to the JDBC * client. */ private TriggerDispatchThread trigger_thread; /** * If the ResultSet.getObject method should return the raw object type (eg. * BigDecimal for Integer, String for chars, etc) then this is set to false. * If this is true (the default) the 'getObject' methods return the * correct object types as specified by the JDBC specification. */ private boolean strict_get_object; /** * If the ResultSetMetaData.getColumnName method should return a succinct * form of the column name as most JDBC implementations do, this should * be set to false (the default). If old style verbose column names should * be returned for compatibility with older Mckoi applications, this is * set to true. */ private boolean verbose_column_names; /** * This is set to true if the MResultSet column lookup methods are case * insensitive. This should be set to true for any database that has * case insensitive identifiers. */ private boolean case_insensitive_identifiers; /** * A mapping from a streamable object id to InputStream used to represent * the object when being uploaded to the database engine. */ private final Map s_object_hold; /** * An unique id count given to streamable object being uploaded to the * server. */ private long s_object_id; // For synchronization in this object, private final Object lock = new Object(); /** * Constructor. */ public MConnection(String url, DatabaseInterface db_interface, int cache_size, int max_size) { this.url = url; this.db_interface = db_interface; is_closed = true; auto_commit = true; trigger_list = new ArrayList(); strict_get_object = true; verbose_column_names = false; case_insensitive_identifiers = false; row_cache = new RowCache(cache_size, max_size); s_object_hold = new HashMap(); s_object_id = 0; } /** * Toggles strict get object. *

* If the 'getObject' method should return the raw object type (eg. * BigDecimal for Integer, String for chars, etc) then this is set to false. * If this is true (the default) the 'getObject' methods return the * correct object types as specified by the JDBC specification. *

* The default is true. */ public void setStrictGetObject(boolean status) { strict_get_object = status; } /** * Returns true if strict get object is enabled (default). */ public boolean isStrictGetObject() { return strict_get_object; } /** * Toggles verbose column names from ResultSetMetaData. *

* If this is set to true, getColumnName will return 'APP.Part.id' for a * column name. If it is false getColumnName will return 'id'. This * property is for compatibility with older Mckoi applications. */ public void setVerboseColumnNames(boolean status) { verbose_column_names = status; } /** * Returns true if ResultSetMetaData should return verbose column names. */ public boolean verboseColumnNames() { return verbose_column_names; } /** * Toggles whether this connection is handling identifiers as case * insensitive or not. If this is true then 'getString("app.id")' will * match against 'APP.id', etc. */ public void setCaseInsensitiveIdentifiers(boolean status) { case_insensitive_identifiers = status; } /** * Returns true if the database has case insensitive identifiers. */ public boolean isCaseInsensitiveIdentifiers() { return case_insensitive_identifiers; } // private static void printByteArray(byte[] array) { // System.out.println("Length: " + array.length); // for (int i = 0; i < array.length; ++i) { // System.out.print(array[i]); // System.out.print(", "); // } // } /** * Returns the row Cache object for this connection. */ protected final RowCache getRowCache() { return row_cache; } /** * Adds a new SQLWarning to the chain. */ protected final void addSQLWarning(SQLWarning warning) { synchronized (lock) { if (head_warning == null) { head_warning = warning; } else { head_warning.setNextWarning(warning); } } } /** * Closes this connection by calling the 'dispose' method in the database * interface. */ public final void internalClose() throws SQLException { synchronized (lock) { if (!isClosed()) { try { db_interface.dispose(); } finally { is_closed = true; } } } } /** * Returns this MConnection wrapped in a MckoiConnection object. */ MckoiConnection getMckoiConnection() { return new MckoiConnection(this); } /** * Attempts to login to the database interface with the given default schema, * username and password. If the authentication fails an SQL exception is * generated. */ public void login(String default_schema, String username, String password) throws SQLException { synchronized (lock) { if (!is_closed) { throw new SQLException( "Unable to login to connection because it is open."); } } if (username == null || username.equals("") || password == null || password.equals("")) { throw new SQLException("username or password have not been set."); } // Set the default schema to username if it's null if (default_schema == null) { default_schema = username; } // Login with the username/password boolean li = db_interface.login(default_schema, username, password, this); synchronized (lock) { is_closed = !li; } if (!li) { throw new SQLException("User authentication failed for: " + username); } // Determine if this connection is case insensitive or not, setCaseInsensitiveIdentifiers(false); Statement stmt = createStatement(); ResultSet rs = stmt.executeQuery("SHOW CONNECTION_INFO"); while (rs.next()) { String key = rs.getString(1); if (key.equals("case_insensitive_identifiers")) { String val = rs.getString(2); setCaseInsensitiveIdentifiers(val.equals("true")); } else if (key.equals("auto_commit")) { String val = rs.getString(2); auto_commit = val.equals("true"); } } rs.close(); stmt.close(); } // ---------- Package Protected ---------- /** * Returns the url string used to make this connection. */ String getURL() { return url; } /** * Logs into the JDBC server running on a remote machine. Throws an * exception if user authentication fails. */ void login(Properties info, String default_schema) throws SQLException { String username = info.getProperty("user", ""); String password = info.getProperty("password", ""); login(default_schema, username, password); } // /** // * Cancels a result set that is downloading. // */ // void cancelResultSet(MResultSet result_set) throws SQLException { // disposeResult(result_set.getResultID()); // //// connection_thread.disposeResult(result_set.getResultID()); // } /** * Uploads any streamable objects found in an SQLQuery into the database. */ private void uploadStreamableObjects(SQLQuery sql) throws SQLException { // Push any streamable objects that are present in the query onto the // server. Object[] vars = sql.getVars(); try { for (int i = 0; i < vars.length; ++i) { // For each streamable object. if (vars[i] != null && vars[i] instanceof StreamableObject) { // Buffer size is fixed to 64 KB final int BUF_SIZE = 64 * 1024; StreamableObject s_object = (StreamableObject) vars[i]; long offset = 0; final byte type = s_object.getType(); final long total_len = s_object.getSize(); final long id = s_object.getIdentifier(); final byte[] buf = new byte[BUF_SIZE]; // Get the InputStream from the StreamableObject hold Object sob_id = new Long(id); InputStream i_stream = (InputStream) s_object_hold.get(sob_id); if (i_stream == null) { throw new RuntimeException( "Assertion failed: Streamable object InputStream is not available."); } while (offset < total_len) { // Fill the buffer int index = 0; final int block_read = (int) Math.min((long) BUF_SIZE, (total_len - offset)); int to_read = block_read; while (to_read > 0) { int count = i_stream.read(buf, index, to_read); if (count == -1) { throw new IOException("Premature end of stream."); } index += count; to_read -= count; } // Send the part of the streamable object to the database. db_interface.pushStreamableObjectPart(type, id, total_len, buf, offset, block_read); // Increment the offset and upload the next part of the object. offset += block_read; } // Remove the streamable object once it has been written s_object_hold.remove(sob_id); // [ Don't close the input stream - we may only want to put a part of // the stream into the database and keep the file open. ] // // Close the input stream // i_stream.close(); } } } catch (IOException e) { e.printStackTrace(System.err); throw new SQLException("IO Error pushing large object to server: " + e.getMessage()); } } /** * Sends the batch of SQLQuery objects to the database to be executed. The * given array of MResultSet will be the consumer objects for the query * results. If a query succeeds then we are guarenteed to know that size of * the result set. *

* This method blocks until all of the queries have been processed by the * database. */ void executeQueries(SQLQuery[] queries, MResultSet[] results) throws SQLException { // For each query for (int i = 0; i < queries.length; ++i) { executeQuery(queries[i], results[i]); } } /** * Sends the SQL string to the database to be executed. The given MResultSet * is the consumer for the results from the database. We are guarenteed * that if the query succeeds that we know the size of the result set and * at least first first row of the set. *

* This method will block until we have received the result header * information. */ void executeQuery(SQLQuery sql, MResultSet result_set) throws SQLException { uploadStreamableObjects(sql); // Execute the query, QueryResponse resp = db_interface.execQuery(sql); // The format of the result ColumnDescription[] col_list = new ColumnDescription[resp.getColumnCount()]; for (int i = 0; i < col_list.length; ++i) { col_list[i] = resp.getColumnDescription(i); } // Set up the result set to the result format and update the time taken to // execute the query on the server. result_set.connSetup(resp.getResultID(), col_list, resp.getRowCount()); result_set.setQueryTime(resp.getQueryTimeMillis()); } /** * Called by MResultSet to query a part of a result from the server. Returns * a List that represents the result from the server. */ ResultPart requestResultPart(int result_id, int start_row, int count_rows) throws SQLException { return db_interface.getResultPart(result_id, start_row, count_rows); } /** * Requests a part of a streamable object from the server. */ StreamableObjectPart requestStreamableObjectPart(int result_id, long streamable_object_id, long offset, int len) throws SQLException { return db_interface.getStreamableObjectPart(result_id, streamable_object_id, offset, len); } /** * Disposes of the server-side resources associated with the result set with * result_id. This should be called either before we start the download of * a new result set, or when we have finished with the resources of a result * set. */ void disposeResult(int result_id) throws SQLException { // Clear the row cache. // It would be better if we only cleared row entries with this // table_id. We currently clear the entire cache which means there will // be traffic created for other open result sets. // System.out.println(result_id); // row_cache.clear(); // Only dispose if the connection is open if (!is_closed) { db_interface.disposeResult(result_id); } } /** * Adds a TriggerListener that listens for all triggers events with the name * given. Triggers are created with the 'CREATE TRIGGER' syntax. */ void addTriggerListener(String trigger_name, TriggerListener listener) { synchronized (trigger_list) { trigger_list.add(trigger_name); trigger_list.add(listener); } } /** * Removes the TriggerListener for the given trigger name. */ void removeTriggerListener(String trigger_name, TriggerListener listener) { synchronized (trigger_list) { for (int i = trigger_list.size() - 2; i >= 0; i -= 2) { if (trigger_list.get(i).equals(trigger_name) && trigger_list.get(i + 1).equals(listener)) { trigger_list.remove(i); trigger_list.remove(i); } } } } /** * Creates a StreamableObject on the JDBC client side given an InputStream, * and length and a type. When this method returns, a StreamableObject * entry will be added to the hold. */ StreamableObject createStreamableObject(InputStream x, int length, byte type) { long ob_id; synchronized (s_object_hold) { ob_id = s_object_id; ++s_object_id; // Add the stream to the hold and get the unique id s_object_hold.put(new Long(ob_id), x); } // Create and return the StreamableObject return new StreamableObject(type, length, ob_id); } /** * Removes the StreamableObject from the hold on the JDBC client. This should * be called when the MPreparedStatement closes. */ void removeStreamableObject(StreamableObject s_object) { s_object_hold.remove(new Long(s_object.getIdentifier())); } // ---------- Implemented from DatabaseCallBack ---------- // NOTE: For JDBC standalone apps, the thread that calls this will be a // WorkerThread. // For JDBC client/server apps, the thread that calls this will by the // connection thread that listens for data from the server. public void databaseEvent(int event_type, String event_message) { if (event_type == 99) { if (trigger_thread == null) { trigger_thread = new TriggerDispatchThread(); trigger_thread.start(); } trigger_thread.dispatchTrigger(event_message); } else { throw new Error("Unrecognised database event: " + event_type); } // System.out.println("[com.mckoi.jdbc.MConnection] Event received:"); // System.out.println(event_type); // System.out.println(event_message); } // ---------- Implemented from Connection ---------- public Statement createStatement() throws SQLException { return new MStatement(this); } public PreparedStatement prepareStatement(String sql) throws SQLException { return new MPreparedStatement(this, sql); } public CallableStatement prepareCall(String sql) throws SQLException { throw MSQLException.unsupported(); } public String nativeSQL(String sql) throws SQLException { // We don't do any client side parsing of the sql statement. return sql; } public void setAutoCommit(boolean autoCommit) throws SQLException { // The SQL to put into auto-commit mode. ResultSet result; if (autoCommit) { result = createStatement().executeQuery("SET AUTO COMMIT ON"); auto_commit = true; result.close(); } else { result = createStatement().executeQuery("SET AUTO COMMIT OFF"); auto_commit = false; result.close(); } } public boolean getAutoCommit() throws SQLException { return auto_commit; // // Query the database for this info. // ResultSet result; // result = createStatement().executeQuery( // "SHOW CONNECTION_INFO WHERE var = 'auto_commit'"); // boolean auto_commit_mode = false; // if (result.next()) { // auto_commit_mode = result.getString(2).equals("true"); // } // result.close(); // return auto_commit_mode; } public void commit() throws SQLException { ResultSet result; result = createStatement().executeQuery("COMMIT"); result.close(); } public void rollback() throws SQLException { ResultSet result; result = createStatement().executeQuery("ROLLBACK"); result.close(); } public void close() throws SQLException { if (!isClosed()) { internalClose(); } // if (!isClosed()) { // try { // internalClose(); // } // finally { // MDriver.connectionClosed(this); // } // } // synchronized (lock) { // if (!isClosed()) { // try { // db_interface.dispose(); // MDriver.connectionClosed(this); // } // finally { // is_closed = true; // } // } // } } public boolean isClosed() throws SQLException { synchronized (lock) { return is_closed; } } //====================================================================== // Advanced features: public DatabaseMetaData getMetaData() throws SQLException { return new MDatabaseMetaData(this); } public void setReadOnly(boolean readOnly) throws SQLException { // Hint ignored } public boolean isReadOnly() throws SQLException { // Currently we don't support read locked transactions. return false; } public void setCatalog(String catalog) throws SQLException { // Silently ignored ;-) } public String getCatalog() throws SQLException { // Catalog's not supported return null; } public void setTransactionIsolation(int level) throws SQLException { if (level != TRANSACTION_SERIALIZABLE) { throw new SQLException("Only 'TRANSACTION_SERIALIZABLE' supported."); } } public int getTransactionIsolation() throws SQLException { return TRANSACTION_SERIALIZABLE; } public SQLWarning getWarnings() throws SQLException { synchronized (lock) { return head_warning; } } public void clearWarnings() throws SQLException { synchronized (lock) { head_warning = null; } } //#IFDEF(JDBC2.0) //--------------------------JDBC 2.0----------------------------- public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { Statement statement = createStatement(); // PENDING - set default result set type and result set concurrency for // statement return statement; } public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { PreparedStatement statement = prepareStatement(sql); // PENDING - set default result set type and result set concurrency for // statement return statement; } public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { throw MSQLException.unsupported(); } // ISSUE: I can see using 'Map' here is going to break compatibility with // Java 1.1. Even though testing with 1.1.8 on Linux and NT turned out // fine, I have a feeling some verifiers on web browsers aren't going to // like this. public Map getTypeMap() throws SQLException { throw MSQLException.unsupported(); } public void setTypeMap(Map map) throws SQLException { throw MSQLException.unsupported(); } //#ENDIF //#IFDEF(JDBC3.0) //--------------------------JDBC 3.0----------------------------- public void setHoldability(int holdability) throws SQLException { // Currently holdability can not be set to CLOSE_CURSORS_AT_COMMIT though // it could be implemented. if (holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT) { throw new SQLException( "CLOSE_CURSORS_AT_COMMIT holdability is not supported."); } } public int getHoldability() throws SQLException { return ResultSet.HOLD_CURSORS_OVER_COMMIT; } public Savepoint setSavepoint() throws SQLException { throw MSQLException.unsupported(); } public Savepoint setSavepoint(String name) throws SQLException { throw MSQLException.unsupported(); } public void rollback(Savepoint savepoint) throws SQLException { throw MSQLException.unsupported(); } public void releaseSavepoint(Savepoint savepoint) throws SQLException { throw MSQLException.unsupported(); } public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { // Currently holdability can not be set to CLOSE_CURSORS_AT_COMMIT though // it could be implemented. if (resultSetHoldability == ResultSet.CLOSE_CURSORS_AT_COMMIT) { throw new SQLException( "CLOSE_CURSORS_AT_COMMIT holdability is not supported."); } return createStatement(resultSetType, resultSetConcurrency); } public PreparedStatement prepareStatement( String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { // Currently holdability can not be set to CLOSE_CURSORS_AT_COMMIT though // it could be implemented. if (resultSetHoldability == ResultSet.CLOSE_CURSORS_AT_COMMIT) { throw new SQLException( "CLOSE_CURSORS_AT_COMMIT holdability is not supported."); } return prepareStatement(sql, resultSetType, resultSetConcurrency); } public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { throw MSQLException.unsupported(); } public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { throw MSQLException.unsupported(); } public PreparedStatement prepareStatement(String sql, int columnIndexes[]) throws SQLException { throw MSQLException.unsupported(); } public PreparedStatement prepareStatement(String sql, String columnNames[]) throws SQLException { throw MSQLException.unsupported(); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public Clob createClob() throws SQLException { throw MSQLException.unsupported16(); } public Blob createBlob() throws SQLException { throw MSQLException.unsupported16(); } public NClob createNClob() throws SQLException { throw MSQLException.unsupported16(); } public SQLXML createSQLXML() throws SQLException { throw MSQLException.unsupported16(); } public boolean isValid(int timeout) throws SQLException { // Execute a query on the DB. // If it generates an exception, return false, otherwise true. try { Statement stmt = createStatement(); ResultSet rs = stmt.executeQuery("SHOW CONNECTION_INFO"); rs.close(); stmt.close(); return true; } catch (SQLException e) { return false; } } public void setClientInfo(String name, String value) throws SQLClientInfoException { } public void setClientInfo(Properties properties) throws SQLClientInfoException { } public String getClientInfo(String name) throws SQLException { return null; } public Properties getClientInfo() throws SQLException { return new Properties(); } public Array createArrayOf(String typeName, Object[] elements) throws SQLException { throw MSQLException.unsupported16(); } public Struct createStruct(String typeName, Object[] attributes) throws SQLException { throw MSQLException.unsupported16(); } public Object unwrap(Class iface) throws SQLException { throw MSQLException.unsupported16(); } public boolean isWrapperFor(Class iface) throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF //#IFDEF(JDBC5.0) // -------------------------- JDK 1.7 ----------------------------------- public void setSchema(String schema) throws SQLException { // Set the schema, if (schema == null) { throw new NullPointerException(); } PreparedStatement statement = prepareStatement("SET SCHEMA ?"); statement.setString(1, schema); ResultSet rs = statement.executeQuery(); rs.close(); statement.close(); } public String getSchema() throws SQLException { // Use the 'show connection_info' query to determine the current schema, String current_schema = null; Statement stmt = createStatement(); ResultSet rs = stmt.executeQuery("SHOW CONNECTION_INFO"); while (rs.next()) { String key = rs.getString(1); if (key.equals("current_schema")) { current_schema = rs.getString(2); } } rs.close(); stmt.close(); return current_schema; } public void abort(Executor executor) throws SQLException { throw MSQLException.unsupported16(); } public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { throw MSQLException.unsupported16(); } public int getNetworkTimeout() throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF // ---------- Inner classes ---------- /** * The thread that handles all dispatching of trigger events. */ private class TriggerDispatchThread extends Thread { private final List trigger_messages_queue = new ArrayList(); TriggerDispatchThread() { setDaemon(true); setName("Mckoi - Trigger Dispatcher"); } /** * Dispatches a trigger message to the listeners. */ private void dispatchTrigger(String event_message) { synchronized (trigger_messages_queue) { trigger_messages_queue.add(event_message); trigger_messages_queue.notifyAll(); } } // Thread run method public void run() { while (true) { try { String message; synchronized (trigger_messages_queue) { while (trigger_messages_queue.isEmpty()) { try { trigger_messages_queue.wait(); } catch (InterruptedException e) { /* ignore */ } } message = (String) trigger_messages_queue.get(0); trigger_messages_queue.remove(0); } // 'message' is a message to process... // The format of a trigger message is: // "[trigger_name] [trigger_source] [trigger_fire_count]" // System.out.println("TRIGGER EVENT: " + message); StringTokenizer tok = new StringTokenizer(message, " "); String trigger_name = (String) tok.nextElement(); String trigger_source = (String) tok.nextElement(); String trigger_fire_count = (String) tok.nextElement(); List fired_triggers = new ArrayList(); // Create a list of Listener's that are listening for this trigger. synchronized (trigger_list) { for (int i = 0; i < trigger_list.size(); i += 2) { String to_listen_for = (String) trigger_list.get(i); if (to_listen_for.equals(trigger_name)) { TriggerListener listener = (TriggerListener) trigger_list.get(i + 1); // NOTE, we can't call 'listener.triggerFired' here because // it's not a good idea to call user code when we are // synchronized over 'trigger_list' (deadlock concerns). fired_triggers.add(listener); } } } // Fire them triggers. for (int i = 0; i < fired_triggers.size(); ++i) { TriggerListener listener = (TriggerListener) fired_triggers.get(i); listener.triggerFired(trigger_name); } } catch (Throwable t) { t.printStackTrace(System.err); } } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MDatabaseMetaData.java000066400000000000000000001001251330501023400267030ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MDatabaseMetaData 23 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.*; /** * An implementation of JDBC's DatabaseMetaData. * * @author Tobias Downer */ public class MDatabaseMetaData implements DatabaseMetaData { /** * The Connection object associated with this meta data. */ private MConnection connection; /** * The name and version of the database we are connected to. */ private String database_name, database_version; /** * Constructor. */ MDatabaseMetaData(MConnection connection) { this.connection = connection; } /** * Queries product information about the database we are connected to. */ private void queryProductInformation() throws SQLException { if (database_name == null || database_version == null) { Statement statement = connection.createStatement(); ResultSet result = statement.executeQuery("SHOW PRODUCT"); result.next(); database_name = result.getString("name"); database_version = result.getString("version"); statement.close(); result.close(); } } //---------------------------------------------------------------------- // First, a variety of minor information about the target database. public boolean allProceduresAreCallable() throws SQLException { // NOT SUPPORTED return false; } public boolean allTablesAreSelectable() throws SQLException { // No, only tables that the user has read access to, return false; } public String getURL() throws SQLException { return connection.getURL(); } public String getUserName() throws SQLException { Statement statement = connection.createStatement(); ResultSet result_set = statement.executeQuery("SELECT USER()"); result_set.next(); String username = result_set.getString(1); result_set.close(); statement.close(); return username; } public boolean isReadOnly() throws SQLException { Statement statement = connection.createStatement(); ResultSet result_set = statement.executeQuery( " SELECT * FROM SYS_INFO.sUSRDatabaseStatistics " + " WHERE \"stat_name\" = 'DatabaseSystem.read_only' "); boolean read_only = result_set.next(); result_set.close(); statement.close(); return read_only; } public boolean nullsAreSortedHigh() throws SQLException { return false; } public boolean nullsAreSortedLow() throws SQLException { return true; } public boolean nullsAreSortedAtStart() throws SQLException { return false; } public boolean nullsAreSortedAtEnd() throws SQLException { return false; } public String getDatabaseProductName() throws SQLException { queryProductInformation(); return database_name; } public String getDatabaseProductVersion() throws SQLException { queryProductInformation(); return database_version; } public String getDriverName() throws SQLException { return MDriver.DRIVER_NAME; } public String getDriverVersion() throws SQLException { return MDriver.DRIVER_VERSION; } public int getDriverMajorVersion() { return MDriver.DRIVER_MAJOR_VERSION; } public int getDriverMinorVersion() { return MDriver.DRIVER_MINOR_VERSION; } public boolean usesLocalFiles() throws SQLException { // Depends if we are embedded or not, // ISSUE: We need to keep an eye on this for future enhancements to the // Mckoi URL spec. if (getURL().toLowerCase().startsWith(":jdbc:mckoi:local://")) { return true; } else { return false; } } public boolean usesLocalFilePerTable() throws SQLException { // Actually uses 3 files per table. Why would a developer need this info? // Returning false, return false; } public boolean supportsMixedCaseIdentifiers() throws SQLException { return true; } public boolean storesUpperCaseIdentifiers() throws SQLException { return false; } public boolean storesLowerCaseIdentifiers() throws SQLException { return false; } public boolean storesMixedCaseIdentifiers() throws SQLException { return true; } public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { return true; } public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { return false; } public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { return false; } public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { return true; } public String getIdentifierQuoteString() throws SQLException { return "\""; } public String getSQLKeywords() throws SQLException { // not implemented, return "show"; } public String getNumericFunctions() throws SQLException { // not implemented, // We should put this as a query to the database. It will need to // inspect all user defined functions also. return ""; } public String getStringFunctions() throws SQLException { // not implemented, // We should put this as a query to the database. It will need to // inspect all user defined functions also. return ""; } public String getSystemFunctions() throws SQLException { // not implemented, return ""; } public String getTimeDateFunctions() throws SQLException { // not implemented, // We should put this as a query to the database. It will need to // inspect all user defined functions also. return ""; } public String getSearchStringEscape() throws SQLException { return "\\"; } public String getExtraNameCharacters() throws SQLException { return ""; } //-------------------------------------------------------------------- // Functions describing which features are supported. public boolean supportsAlterTableWithAddColumn() throws SQLException { return true; } public boolean supportsAlterTableWithDropColumn() throws SQLException { return true; } public boolean supportsColumnAliasing() throws SQLException { return true; } public boolean nullPlusNonNullIsNull() throws SQLException { return true; } public boolean supportsConvert() throws SQLException { return false; } public boolean supportsConvert(int fromType, int toType) throws SQLException { return false; } public boolean supportsTableCorrelationNames() throws SQLException { // Is this, for example, "select * from Part P, Customer as C where ... ' // If it is then yes. return true; } public boolean supportsDifferentTableCorrelationNames() throws SQLException { // This is easily tested as, // SELECT * FROM Test1 AS Test2; // where 'Test2' is a valid table in the database. return false; } public boolean supportsExpressionsInOrderBy() throws SQLException { return true; } public boolean supportsOrderByUnrelated() throws SQLException { return true; } public boolean supportsGroupBy() throws SQLException { return true; } public boolean supportsGroupByUnrelated() throws SQLException { return true; } public boolean supportsGroupByBeyondSelect() throws SQLException { // This doesn't make sense - returning false to be safe, return false; } public boolean supportsLikeEscapeClause() throws SQLException { return true; } public boolean supportsMultipleResultSets() throws SQLException { return false; } public boolean supportsMultipleTransactions() throws SQLException { // Of course... :-) return true; } public boolean supportsNonNullableColumns() throws SQLException { return true; } public boolean supportsMinimumSQLGrammar() throws SQLException { // I need to check this... // What's minimum SQL as defined in ODBC? return false; } public boolean supportsCoreSQLGrammar() throws SQLException { // What's core SQL as defined in ODBC? return false; } public boolean supportsExtendedSQLGrammar() throws SQLException { return false; } public boolean supportsANSI92EntryLevelSQL() throws SQLException { // Not yet... return false; } public boolean supportsANSI92IntermediateSQL() throws SQLException { return false; } public boolean supportsANSI92FullSQL() throws SQLException { return false; } public boolean supportsIntegrityEnhancementFacility() throws SQLException { // ? return false; } public boolean supportsOuterJoins() throws SQLException { return true; } public boolean supportsFullOuterJoins() throws SQLException { return false; } public boolean supportsLimitedOuterJoins() throws SQLException { return true; } public String getSchemaTerm() throws SQLException { return "Schema"; } public String getProcedureTerm() throws SQLException { return "Procedure"; } public String getCatalogTerm() throws SQLException { return "Catalog"; } public boolean isCatalogAtStart() throws SQLException { // Don't support catalogs return false; } public String getCatalogSeparator() throws SQLException { return ""; } public boolean supportsSchemasInDataManipulation() throws SQLException { return true; } public boolean supportsSchemasInProcedureCalls() throws SQLException { // When we support procedures then true... return true; } public boolean supportsSchemasInTableDefinitions() throws SQLException { return true; } public boolean supportsSchemasInIndexDefinitions() throws SQLException { return true; } public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { return true; } public boolean supportsCatalogsInDataManipulation() throws SQLException { return false; } public boolean supportsCatalogsInProcedureCalls() throws SQLException { return false; } public boolean supportsCatalogsInTableDefinitions() throws SQLException { return false; } public boolean supportsCatalogsInIndexDefinitions() throws SQLException { return false; } public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { return false; } public boolean supportsPositionedDelete() throws SQLException { // I'm guessing this comes with updatable result sets. return false; } public boolean supportsPositionedUpdate() throws SQLException { // I'm guessing this comes with updatable result sets. return false; } public boolean supportsSelectForUpdate() throws SQLException { // I'm not sure what this is, return false; } public boolean supportsStoredProcedures() throws SQLException { return false; } public boolean supportsSubqueriesInComparisons() throws SQLException { // Not yet, return false; } public boolean supportsSubqueriesInExists() throws SQLException { // No 'exists' yet, return false; } public boolean supportsSubqueriesInIns() throws SQLException { return true; } public boolean supportsSubqueriesInQuantifieds() throws SQLException { // I don't think so, return false; } public boolean supportsCorrelatedSubqueries() throws SQLException { // Not yet, return false; } public boolean supportsUnion() throws SQLException { return false; } public boolean supportsUnionAll() throws SQLException { return false; } public boolean supportsOpenCursorsAcrossCommit() throws SQLException { // Sort of, a result set can remain open after a commit... return true; } public boolean supportsOpenCursorsAcrossRollback() throws SQLException { // Sort of, a result set can remain open after a commit... return true; } public boolean supportsOpenStatementsAcrossCommit() throws SQLException { return true; } public boolean supportsOpenStatementsAcrossRollback() throws SQLException { return true; } //---------------------------------------------------------------------- // The following group of methods exposes various limitations // based on the target database with the current driver. // Unless otherwise specified, a result of zero means there is no // limit, or the limit is not known. public int getMaxBinaryLiteralLength() throws SQLException { // No binary literals yet, return 0; } public int getMaxCharLiteralLength() throws SQLException { // This is an educated guess... return 32768; } public int getMaxColumnNameLength() throws SQLException { // Need to work out this limitation for real. There may be no limit. return 256; } public int getMaxColumnsInGroupBy() throws SQLException { // The limit is determined by number of columns in select. return getMaxColumnsInSelect(); } public int getMaxColumnsInIndex() throws SQLException { // No explicit indexing syntax, return 1; } public int getMaxColumnsInOrderBy() throws SQLException { // The limit is determined by number of columns in select. return getMaxColumnsInSelect(); } public int getMaxColumnsInSelect() throws SQLException { // Probably limited only by resources... return 4096; } public int getMaxColumnsInTable() throws SQLException { // Probably limited only by resources... return 4096; } public int getMaxConnections() throws SQLException { // Maybe we need to do some benchmarks for this. There's certainly no // limit with regard to licensing. return 8000; } public int getMaxCursorNameLength() throws SQLException { // Cursors not supported, return 0; } public int getMaxIndexLength() throws SQLException { // No explicit indexing syntax, return 0; } public int getMaxSchemaNameLength() throws SQLException { // Schema not supported, return 0; } public int getMaxProcedureNameLength() throws SQLException { // Procedures not supported, return 0; } public int getMaxCatalogNameLength() throws SQLException { // Catalog not supported, return 0; } public int getMaxRowSize() throws SQLException { // Only limited by resources, // Returning 16MB here. return 16 * 1024 * 1024; } public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { return false; } public int getMaxStatementLength() throws SQLException { // The size of a UTF-8 string? return 60000; } public int getMaxStatements() throws SQLException { // No coded limit, return 1024; } public int getMaxTableNameLength() throws SQLException { // This is what's in DatabaseConstants. // However, this limitation should no longer be applicable! return 50; } public int getMaxTablesInSelect() throws SQLException { // Should be no limit but we'll put an arbitary limit anyway... return 512; } public int getMaxUserNameLength() throws SQLException { // This is what's in DatabaseConstants. // However, this limitation should no longer be applicable! return 50; } //---------------------------------------------------------------------- public int getDefaultTransactionIsolation() throws SQLException { // Currently the only supported isolation level return Connection.TRANSACTION_SERIALIZABLE; } public boolean supportsTransactions() throws SQLException { // As of version 0.88, yes! return true; } public boolean supportsTransactionIsolationLevel(int level) throws SQLException { return (level == Connection.TRANSACTION_SERIALIZABLE); } public boolean supportsDataDefinitionAndDataManipulationTransactions() throws SQLException { return true; } public boolean supportsDataManipulationTransactionsOnly() throws SQLException { return false; } public boolean dataDefinitionCausesTransactionCommit() throws SQLException { return false; } public boolean dataDefinitionIgnoredInTransactions() throws SQLException { return false; } public ResultSet getProcedures(String catalog, String schemaPattern, String procedureNamePattern) throws SQLException { PreparedStatement statement = connection.prepareStatement( "SHOW JDBC_PROCEDURES ( ?, ?, ? )"); statement.setString(1, catalog); statement.setString(2, schemaPattern); statement.setString(3, procedureNamePattern); return statement.executeQuery(); } public ResultSet getProcedureColumns(String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) throws SQLException { PreparedStatement statement = connection.prepareStatement( "SHOW JDBC_PROCEDURE_COLUMNS ( ?, ?, ?, ? )"); statement.setString(1, catalog); statement.setString(2, schemaPattern); statement.setString(3, procedureNamePattern); statement.setString(4, columnNamePattern); return statement.executeQuery(); } public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types) throws SQLException { if (tableNamePattern == null) { tableNamePattern = "%"; } if (schemaPattern == null) { schemaPattern = "%"; } // The 'types' argument String type_part = ""; int type_size = 0; if (types != null && types.length > 0) { StringBuffer buf = new StringBuffer(); buf.append(" AND \"TABLE_TYPE\" IN ( "); for (int i = 0; i < types.length - 1; ++i) { buf.append("?, "); } buf.append("? ) \n"); type_size = types.length; type_part = new String(buf); } // Create the statement PreparedStatement stmt = connection.prepareStatement( " SELECT * \n" + " FROM \"SYS_JDBC.Tables\" \n" + " WHERE \"TABLE_SCHEM\" LIKE ? \n" + " AND \"TABLE_NAME\" LIKE ? \n" + type_part + " ORDER BY \"TABLE_TYPE\", \"TABLE_SCHEM\", \"TABLE_NAME\" \n" ); stmt.setString(1, schemaPattern); stmt.setString(2, tableNamePattern); if (type_size > 0) { for (int i = 0; i < type_size; ++i) { stmt.setString(3 + i, types[i]); } } return stmt.executeQuery(); } public ResultSet getSchemas() throws SQLException { Statement statement = connection.createStatement(); return statement.executeQuery( " SELECT * \n" + " FROM SYS_JDBC.Schemas \n" + " ORDER BY \"TABLE_SCHEM\" " ); } public ResultSet getCatalogs() throws SQLException { Statement statement = connection.createStatement(); return statement.executeQuery("SHOW JDBC_CATALOGS"); } public ResultSet getTableTypes() throws SQLException { Statement statement = connection.createStatement(); return statement.executeQuery("SHOW JDBC_TABLE_TYPES"); } public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { if (tableNamePattern == null) { tableNamePattern = "%"; } if (schemaPattern == null) { schemaPattern = "%"; } if (columnNamePattern == null) { columnNamePattern = "%"; } PreparedStatement statement = connection.prepareStatement( " SELECT * \n" + " FROM SYS_JDBC.Columns \n" + " WHERE \"TABLE_SCHEM\" LIKE ? \n" + " AND \"TABLE_NAME\" LIKE ? \n" + " AND \"COLUMN_NAME\" LIKE ? \n" + "ORDER BY \"TABLE_SCHEM\", \"TABLE_NAME\", \"ORDINAL_POSITION\"" ); statement.setString(1, schemaPattern); statement.setString(2, tableNamePattern); statement.setString(3, columnNamePattern); return statement.executeQuery(); } public ResultSet getColumnPrivileges(String catalog, String schema, String table, String columnNamePattern) throws SQLException { if (columnNamePattern == null) { columnNamePattern = "%"; } PreparedStatement statement = connection.prepareStatement( " SELECT * FROM SYS_JDBC.ColumnPrivileges \n" + " WHERE (? IS NOT NULL OR \"TABLE_SCHEM\" = ? ) \n" + " AND (? IS NOT NULL OR \"TABLE_NAME\" = ? ) \n" + " AND \"COLUMN_NAME\" LIKE ? \n" + " ORDER BY \"COLUMN_NAME\", \"PRIVILEGE\" " ); statement.setString(1, schema); statement.setString(2, schema); statement.setString(3, table); statement.setString(4, table); statement.setString(5, columnNamePattern); return statement.executeQuery(); } public ResultSet getTablePrivileges(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { if (schemaPattern == null) { schemaPattern = "%"; } if (tableNamePattern == null) { tableNamePattern = "%"; } PreparedStatement statement = connection.prepareStatement( " SELECT * FROM SYS_JDBC.TablePrivileges \n" + " WHERE \"TABLE_SCHEM\" LIKE ? \n" + " AND \"TABLE_NAME\" LIKE ? \n" + " ORDER BY \"TABLE_SCHEM\", \"TABLE_NAME\", \"PRIVILEGE\" " ); statement.setString(1, schemaPattern); statement.setString(2, tableNamePattern); return statement.executeQuery(); } public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) throws SQLException { PreparedStatement statement = connection.prepareStatement( "SHOW JDBC_BEST_ROW_IDENTIFIER ( ?, ?, ?, ?, ? )"); statement.setString(1, catalog); statement.setString(2, schema); statement.setString(3, table); statement.setInt(4, scope); statement.setBoolean(5, nullable); return statement.executeQuery(); } public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { PreparedStatement statement = connection.prepareStatement( "SHOW JDBC_VERSION_COLUMNS ( ?, ?, ? )"); statement.setString(1, catalog); statement.setString(2, schema); statement.setString(3, table); return statement.executeQuery(); } public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { PreparedStatement stmt = connection.prepareStatement( " SELECT * \n" + " FROM SYS_JDBC.PrimaryKeys \n" + " WHERE ( ? IS NULL OR \"TABLE_SCHEM\" = ? ) \n" + " AND \"TABLE_NAME\" = ? \n" + " ORDER BY \"COLUMN_NAME\"" ); stmt.setString(1, schema); stmt.setString(2, schema); stmt.setString(3, table); return stmt.executeQuery(); } public ResultSet getImportedKeys(String catalog, String schema, String table) throws SQLException { PreparedStatement stmt = connection.prepareStatement( " SELECT * FROM SYS_JDBC.ImportedKeys \n" + " WHERE ( ? IS NULL OR \"FKTABLE_SCHEM\" = ? )\n" + " AND \"FKTABLE_NAME\" = ? \n" + "ORDER BY \"FKTABLE_SCHEM\", \"FKTABLE_NAME\", \"KEY_SEQ\"" ); stmt.setString(1, schema); stmt.setString(2, schema); stmt.setString(3, table); return stmt.executeQuery(); } public ResultSet getExportedKeys(String catalog, String schema, String table) throws SQLException { PreparedStatement stmt = connection.prepareStatement( " SELECT * FROM SYS_JDBC.ImportedKeys \n" + " WHERE ( ? IS NULL OR \"PKTABLE_SCHEM\" = ? ) \n" + " AND \"PKTABLE_NAME\" = ? \n" + "ORDER BY \"FKTABLE_SCHEM\", \"FKTABLE_NAME\", \"KEY_SEQ\"" ); stmt.setString(1, schema); stmt.setString(2, schema); stmt.setString(3, table); return stmt.executeQuery(); } public ResultSet getCrossReference( String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable ) throws SQLException { PreparedStatement stmt = connection.prepareStatement( " SELECT * FROM SYS_JDBC.ImportedKeys \n" + " WHERE ( ? IS NULL OR \"PKTABLE_SCHEM\" = ? )\n" + " AND \"PKTABLE_NAME\" = ?\n" + " AND ( ? IS NULL OR \"FKTABLE_SCHEM\" = ? )\n" + " AND \"FKTABLE_NAME\" = ?\n" + "ORDER BY \"FKTABLE_SCHEM\", \"FKTABLE_NAME\", \"KEY_SEQ\"\n" ); stmt.setString(1, primarySchema); stmt.setString(2, primarySchema); stmt.setString(3, primaryTable); stmt.setString(4, foreignSchema); stmt.setString(5, foreignSchema); stmt.setString(6, foreignTable); return stmt.executeQuery(); } public ResultSet getTypeInfo() throws SQLException { return connection.createStatement().executeQuery( "SELECT * FROM SYS_INFO.sUSRSQLTypeInfo"); } public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) throws SQLException { PreparedStatement statement = connection.prepareStatement( "SHOW JDBC_INDEX_INFO ( ?, ?, ?, ?, ? )"); statement.setString(1, catalog); statement.setString(2, schema); statement.setString(3, table); statement.setBoolean(4, unique); statement.setBoolean(5, approximate); return statement.executeQuery(); } //#IFDEF(JDBC2.0) //--------------------------JDBC 2.0----------------------------- public boolean supportsResultSetType(int type) throws SQLException { return (type == ResultSet.TYPE_FORWARD_ONLY || type == ResultSet.TYPE_SCROLL_INSENSITIVE); } public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { if (type == ResultSet.TYPE_SCROLL_INSENSITIVE && concurrency == ResultSet.CONCUR_READ_ONLY) { return true; } return false; } public boolean ownUpdatesAreVisible(int type) throws SQLException { return false; } public boolean ownDeletesAreVisible(int type) throws SQLException { return false; } public boolean ownInsertsAreVisible(int type) throws SQLException { return false; } public boolean othersUpdatesAreVisible(int type) throws SQLException { return false; } public boolean othersDeletesAreVisible(int type) throws SQLException { return false; } public boolean othersInsertsAreVisible(int type) throws SQLException { return false; } public boolean updatesAreDetected(int type) throws SQLException { return false; } public boolean deletesAreDetected(int type) throws SQLException { return false; } public boolean insertsAreDetected(int type) throws SQLException { return false; } public boolean supportsBatchUpdates() throws SQLException { return true; } public ResultSet getUDTs(String catalog, String schemaPattern, String typeNamePattern, int[] types) throws SQLException { String where_clause = "true"; if (types != null) { for (int i = 0; i < types.length; ++i) { int t = types[i]; String tstr = "JAVA_OBJECT"; if (t == Types.STRUCT) { tstr = "STRUCT"; } else if (t == Types.DISTINCT) { tstr = "DISTINCT"; } if (i != 0) { where_clause += " AND"; } where_clause += " DATA_TYPE = '" + MckoiConnection.quote(tstr) + "'"; } } PreparedStatement statement = connection.prepareStatement( "SHOW JDBC_UDTS ( ?, ?, ? ) WHERE " + where_clause); statement.setString(1, catalog); statement.setString(2, schemaPattern); statement.setString(3, typeNamePattern); return statement.executeQuery(); } public Connection getConnection() throws SQLException { return connection; } //#ENDIF //#IFDEF(JDBC3.0) // ------------------- JDBC 3.0 ------------------------- public boolean supportsSavepoints() throws SQLException { // Currently no return false; } public boolean supportsNamedParameters() throws SQLException { return false; } public boolean supportsMultipleOpenResults() throws SQLException { return false; } public boolean supportsGetGeneratedKeys() throws SQLException { return false; } public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) throws SQLException { throw MSQLException.unsupported(); } public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException { throw MSQLException.unsupported(); } public ResultSet getAttributes(String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) throws SQLException { throw MSQLException.unsupported(); } public boolean supportsResultSetHoldability(int holdability) throws SQLException { return holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT; } public int getResultSetHoldability() throws SQLException { // Eh? This is in ResultSetMetaData also? An error in the spec or is // this the *default* holdability of a result set? return ResultSet.HOLD_CURSORS_OVER_COMMIT; } public int getDatabaseMajorVersion() throws SQLException { throw MSQLException.unsupported(); } public int getDatabaseMinorVersion() throws SQLException { throw MSQLException.unsupported(); } public int getJDBCMajorVersion() throws SQLException { return 3; } public int getJDBCMinorVersion() throws SQLException { return 0; } public int getSQLStateType() throws SQLException { // ? throw MSQLException.unsupported(); } public boolean locatorsUpdateCopy() throws SQLException { // Doesn't matter because this is not supported. throw MSQLException.unsupported(); } public boolean supportsStatementPooling() throws SQLException { return true; } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public RowIdLifetime getRowIdLifetime() throws SQLException { return RowIdLifetime.ROWID_UNSUPPORTED; } public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { String where_clause = "true"; if (catalog != null) { where_clause += " AND TABLE_CATALOG = '" + MckoiConnection.quote(catalog) + "'"; } if (schemaPattern != null) { where_clause += " AND TABLE_SCHEM = '" + MckoiConnection.quote(schemaPattern) + "'"; } Statement statement = connection.createStatement(); return statement.executeQuery( " SELECT * \n" + " FROM SYS_JDBC.Schemas \n" + " WHERE " + where_clause + "\n" + " ORDER BY \"TABLE_SCHEM\" " ); } public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { return false; } public boolean autoCommitFailureClosesAllResultSets() throws SQLException { return false; } public ResultSet getClientInfoProperties() throws SQLException { throw MSQLException.unsupported16(); } public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) throws SQLException { throw MSQLException.unsupported16(); } public ResultSet getFunctionColumns(String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) throws SQLException { throw MSQLException.unsupported16(); } public Object unwrap(Class iface) throws SQLException { throw MSQLException.unsupported16(); } public boolean isWrapperFor(Class iface) throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF //#IFDEF(JDBC5.0) // -------------------------- JDK 1.7 ----------------------------------- public ResultSet getPseudoColumns( String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) throws SQLException { throw MSQLException.unsupported16(); } public boolean generatedKeyAlwaysReturned() throws SQLException { return false; } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MDriver.java000066400000000000000000000532141330501023400250370ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MDriver 19 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.control.DefaultDBConfig; import java.io.*; import java.net.MalformedURLException; import java.net.URL; import java.sql.*; import java.util.Enumeration; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.StringTokenizer; import java.util.logging.Logger; /** * JDBC implementation of the driver for the Mckoi database. *

* The url protocol is as follows:

*

 *  For connecting to a remote database server:
 *    jdbc:mckoi:[//hostname[:portnum]/][schema_name/]
 *
 *  eg.  jdbc:mckoi://db.mckoi.com:7009/
 *
 *  If hostname is not provided then it defaults to localhost.
 *  If portnum is not provided it defaults to 9157.
 *  If schema_name is not provided it defaults to APP.
 *
 *  To start up a database in the local file system the protocol is:
 *    jdbc:mckoi:local://databaseconfiguration/[schema_name/]
 *
 *  eg.  jdbc:mckoi:local://D:/dbdata/db.conf
 *
 *  If schema_name is not provided it defaults to APP.
 *
 *  To create a database in the local file system then you need to supply a
 *  'create=true' assignment in the URL encoding.
 *
 *  eg.  jdbc:mckoi:local://D:/dbdata/db.conf?create=true
 * 
*

* A local database runs within the JVM of this JDBC driver. To boot a * local database, you must include the full database .jar release with * your application distribution. *

* For connecting to a remote database using the remote URL string, only the * JDBC driver need be included in the classpath. *

* NOTE: This needs to be a light-weight object, because a developer could * generate multiple instances of this class. Making an instance of * 'com.mckoi.JDBCDriver' will create at least two instances of this object. * * @author Tobias Downer */ public class MDriver implements Driver { // The major and minor version numbers of the driver. This only changes // when the JDBC communcation protocol changes. static final int DRIVER_MAJOR_VERSION = 1; static final int DRIVER_MINOR_VERSION = 0; // The name of the driver. static final String DRIVER_NAME = "Mckoi JDBC Driver"; // The version of the driver as a string. static final String DRIVER_VERSION = "" + DRIVER_MAJOR_VERSION + "." + DRIVER_MINOR_VERSION; // The protocol URL header string that signifies a Mckoi JDBC connection. private static final String mckoi_protocol_url = "jdbc:mckoi:"; /** * Set to true when this driver is registered. */ private static boolean registered = false; // ----- Static methods ----- /** * Static method that registers this driver with the JDBC driver manager. */ public synchronized static void register() { if (registered == false) { try { java.sql.DriverManager.registerDriver(new MDriver()); registered = true; } catch (SQLException e) { e.printStackTrace(System.err); } } } // ----- MDriver ----- /** * The timeout for a query in seconds. */ static int QUERY_TIMEOUT = Integer.MAX_VALUE; /** * The mapping of the database configuration URL string to the LocalBootable * object that manages the connection. This mapping is only used if the * driver makes local connections (eg. 'jdbc:mckoi:local://'). */ private Map local_session_map; /** * Constructor is public so that instances of the JDBC driver can be * created by developers. */ public MDriver() { local_session_map = new HashMap(); } /** * Given a URL encoded arguments string, this will extract the var=value * pairs and put them in the given Properties object. For example, * the string 'create=true&user=usr&password=passwd' will extract the three * values and put them in the Properties object. */ private static void parseEncodedVariables(String url_vars, Properties info) { // Parse the url variables. StringTokenizer tok = new StringTokenizer(url_vars, "&"); while (tok.hasMoreTokens()) { String token = tok.nextToken().trim(); int split_point = token.indexOf("="); if (split_point > 0) { String key = token.substring(0, split_point).toLowerCase(); String value = token.substring(split_point + 1); // Put the key/value pair in the 'info' object. info.put(key, value); } else { System.err.println("Ignoring url variable: '" + token + "'"); } } // while (tok.hasMoreTokens()) } /** * Creates a new LocalBootable object that is used to manage the connections * to a database running locally. This uses reflection to create a new * com.mckoi.database.jdbcserver.DefaultLocalBootable object. We use * reflection here because we don't want to make a source level dependency * link to the class. Throws an SQLException if the class was not found. */ private static LocalBootable createDefaultLocalBootable() throws SQLException { try { Class c = Class.forName( "com.mckoi.database.jdbcserver.DefaultLocalBootable"); return (LocalBootable) c.newInstance(); } catch (Throwable e) { // A lot of people ask us about this error so the message is verbose. throw new SQLException( "I was unable to find the class that manages local database " + "connections. This means you may not have included the correct " + "library in your classpath. Make sure that either mckoidb.jar " + "is in your classpath or your classpath references the complete " + "Mckoi SQL database class hierarchy."); } } /** * Makes a connection to a local database. If a local database connection * has not been made then it is created here. *

* Returns a list of two elements, (DatabaseInterface) db_interface and * (String) database_name. */ private synchronized Object[] connectToLocal(String url, String address_part, Properties info) throws SQLException { // If the LocalBootable object hasn't been created yet, do so now via // reflection. String schema_name = "APP"; DatabaseInterface db_interface; // Look for the name upto the URL encoded variables int url_start = address_part.indexOf("?"); if (url_start == -1) { url_start = address_part.length(); } // The path to the configuration String config_path = address_part.substring(8, url_start); // If no config_path, then assume it is ./db.conf if (config_path.length() == 0) { config_path = "./db.conf"; } // Substitute win32 '\' to unix style '/' config_path = config_path.replace('\\', '/'); // Is the config path encoded as a URL? if (config_path.startsWith("jar:") || config_path.startsWith("file:/") || config_path.startsWith("ftp:/") || config_path.startsWith("http:/") || config_path.startsWith("https:/")) { // Don't do anything - looks like a URL already. } else { // We don't care about anything after the ".conf/" String abs_path; String post_abs_path; int schem_del = config_path.indexOf(".conf/"); if (schem_del == -1) { abs_path = config_path; post_abs_path = ""; } else { abs_path = config_path.substring(0, schem_del + 5); post_abs_path = config_path.substring(schem_del + 5); } // If the config_path contains the string "!/" then assume this is a jar // file configuration reference. For example, // 'C:/my_db/my_jar.jar!/configs/db.conf' // If the config path is not encoded as a URL, add a 'file:/' preffix // to the path to make it a URL. For example 'C:/my_config.conf" becomes // 'file:/C:/my_config.conf', 'C:/my_libs/my_jar.jar!/configs/db.conf' // becomes 'jar:file:/C:/my_libs/my_jar.jar!/configs/db.conf' int jar_delim_i = abs_path.indexOf("!/"); String path_part = abs_path; String rest_part = ""; String pre = "file:/"; if (jar_delim_i != -1) { path_part = abs_path.substring(0, jar_delim_i); rest_part = abs_path.substring(jar_delim_i); pre = "jar:file:/"; } // Does the configuration file exist? Or does the resource that contains // the configuration exist? // We try the file with a preceeding '/' and without. File f = new File(path_part); if (!f.exists() && !path_part.startsWith("/")) { f = new File("/" + path_part); if (!f.exists()) { throw new SQLException("Unable to find file: " + path_part); } } // Construct the new qualified configuration path. config_path = pre + f.getAbsolutePath() + rest_part + post_abs_path; // Substitute win32 '\' to unix style '/' // We do this (again) because on win32 'f.getAbsolutePath()' returns win32 // style deliminators. config_path = config_path.replace('\\', '/'); } // Look for the string '.conf/' in the config_path which is used to // determine the initial schema name. For example, the connection URL, // 'jdbc:mckoi:local:///my_db/db.conf/TOBY' will start the database in the // TOBY schema of the database denoted by the configuration path // '/my_db/db.conf' int schema_del_i = config_path.toLowerCase().indexOf(".conf/"); if (schema_del_i > 0 && schema_del_i + 6 < config_path.length()) { schema_name = config_path.substring(schema_del_i + 6); config_path = config_path.substring(0, schema_del_i + 5); } // The url variables part String url_vars = ""; if (url_start < address_part.length()) { url_vars = address_part.substring(url_start + 1).trim(); } // Is there already a local connection to this database? String session_key = config_path.toLowerCase(); LocalBootable local_bootable = (LocalBootable) local_session_map.get(session_key); // No so create one and put it in the connection mapping if (local_bootable == null) { local_bootable = createDefaultLocalBootable(); local_session_map.put(session_key, local_bootable); } // Is the connection booted already? if (local_bootable.isBooted()) { // Yes, so simply login. db_interface = local_bootable.connectToJVM(); } else { // Otherwise we need to boot the local database. // This will be the configuration input file InputStream config_in; if (!config_path.startsWith("file:/")) { // Make the config_path into a URL and open an input stream to it. URL config_url; try { config_url = new URL(config_path); } catch (MalformedURLException e) { throw new SQLException("Malformed URL: " + config_path); } try { // Try and open an input stream to the given configuration. config_in = config_url.openConnection().getInputStream(); } catch (IOException e) { throw new SQLException("Unable to open configuration file. " + "I tried looking at '" + config_url.toString() + "'"); } } else { try { // Try and open an input stream to the given configuration. config_in = new FileInputStream(new File(config_path.substring(6))); } catch (IOException e) { throw new SQLException("Unable to open configuration file: " + config_path); } } // Work out the root path (the place in the local file system where the // configuration file is). File root_path; // If the URL is a file, we can work out what the root path is. if (config_path.startsWith("jar:file:/") || config_path.startsWith("file:/")) { int start_i = config_path.indexOf(":/"); // If the config_path is pointing inside a jar file, this denotes the // end of the file part. int file_end_i = config_path.indexOf("!"); String config_file_part; if (file_end_i == -1) { config_file_part = config_path.substring(start_i + 2); } else { config_file_part = config_path.substring(start_i + 2, file_end_i); } File absolute_config_file = new File( new File(config_file_part).getAbsolutePath()); root_path = new File(absolute_config_file.getParent()); } else { // This means the configuration file isn't sitting in the local file // system, so we assume root is the current directory. root_path = new File("."); } // Get the configuration bundle that was set as the path, DefaultDBConfig config = new DefaultDBConfig(root_path); try { config.loadFromStream(config_in); config_in.close(); } catch (IOException e) { throw new SQLException("Error reading configuration file: " + config_path + " Reason: " + e.getMessage()); } // Parse the url variables parseEncodedVariables(url_vars, info); boolean create_db = false; boolean create_db_if_not_exist = false; create_db = info.getProperty("create", "").equals("true"); create_db_if_not_exist = info.getProperty("boot_or_create", "").equals("true") || info.getProperty("create_or_boot", "").equals("true"); // Include any properties from the 'info' object Enumeration prop_keys = info.keys(); while (prop_keys.hasMoreElements()) { String key = prop_keys.nextElement().toString(); if (!key.equals("user") && !key.equals("password")) { config.setValue(key, (String) info.get(key)); } } // Check if the database exists boolean database_exists = local_bootable.checkExists(config); // If database doesn't exist and we've been told to create it if it // doesn't exist, then set the 'create_db' flag. if (create_db_if_not_exist && !database_exists) { create_db = true; } // Error conditions; // If we are creating but the database already exists. if (create_db && database_exists) { throw new SQLException( "Can not create database because a database already exists."); } // If we are booting but the database doesn't exist. if (!create_db && !database_exists) { throw new SQLException( "Can not find a database to start. Either the database needs to " + "be created or the 'database_path' property of the configuration " + "must be set to the location of the data files."); } // Are we creating a new database? if (create_db) { String username = info.getProperty("user", ""); String password = info.getProperty("password", ""); db_interface = local_bootable.create(username, password, config); } // Otherwise we must be logging onto a database, else { db_interface = local_bootable.boot(config); } } // Make up the return parameters. Object[] ret = new Object[2]; ret[0] = db_interface; ret[1] = schema_name; return ret; } // ---------- Implemented from Driver ---------- public Connection connect(String url, Properties info) throws SQLException { // We looking for url starting with this protocol if (!acceptsURL(url)) { // If the protocol not valid then return null as in the spec. return null; } DatabaseInterface db_interface; String default_schema = "APP"; int row_cache_size; int max_row_cache_size; String address_part = url.substring(url.indexOf(mckoi_protocol_url) + mckoi_protocol_url.length()); // If we are to connect this JDBC to a single user database running // within this JVM. if (address_part.startsWith("local://")) { // Returns a list of two Objects, db_interface and database_name. Object[] ret_list = connectToLocal(url, address_part, info); db_interface = (DatabaseInterface) ret_list[0]; default_schema = (String) ret_list[1]; // Internal row cache setting are set small. row_cache_size = 43; max_row_cache_size = 4092000; } else { int port = 9157; String host = "127.0.0.1"; // Otherwise we must be connecting remotely. if (address_part.startsWith("//")) { String args_string = ""; int arg_part = address_part.indexOf('?', 2); if (arg_part != -1) { args_string = address_part.substring(arg_part + 1); address_part = address_part.substring(0, arg_part); } // System.out.println("ADDRESS_PART: " + address_part); int end_address = address_part.indexOf("/", 2); if (end_address == -1) { end_address = address_part.length(); } String remote_address = address_part.substring(2, end_address); int delim = remote_address.indexOf(':'); if (delim == -1) { delim = remote_address.length(); } host = remote_address.substring(0, delim); if (delim < remote_address.length() - 1) { port = Integer.parseInt(remote_address.substring(delim + 1)); } // System.out.println("REMOTE_ADDRESS: '" + remote_address + "'"); // Schema name? String schema_part = ""; if (end_address < address_part.length()) { schema_part = address_part.substring(end_address + 1); } String schema_string = schema_part; int schema_end = schema_part.indexOf('/'); if (schema_end != -1) { schema_string = schema_part.substring(0, schema_end); } else { schema_end = schema_part.indexOf('?'); if (schema_end != -1) { schema_string = schema_part.substring(0, schema_end); } } // System.out.println("SCHEMA_STRING: '" + schema_string + "'"); // Argument part? if (!args_string.equals("")) { // System.out.println("ARGS: '" + args_string + "'"); parseEncodedVariables(args_string, info); } // Is there a schema or should we default? if (schema_string.length() > 0) { default_schema = schema_string; } } else { if (address_part.trim().length() > 0) { throw new SQLException("Malformed URL: " + address_part); } } // database_name = address_part; // if (database_name == null || database_name.trim().equals("")) { // database_name = "DefaultDatabase"; // } // BUG WORKAROUND: // There appears to be a bug in the socket code of some VM // implementations. With the IBM Linux JDK, if a socket is opened while // another is closed while blocking on a read, the socket that was just // opened breaks. This was causing the login code to block indefinitely // and the connection thread causing a null pointer exception. // The workaround is to put a short pause before the socket connection // is established. try { Thread.sleep(85); } catch (InterruptedException e) { /* ignore */ } // Make the connection TCPStreamDatabaseInterface tcp_db_interface = new TCPStreamDatabaseInterface(host, port); // Attempt to open a socket to the database. tcp_db_interface.connectToDatabase(); db_interface = tcp_db_interface; // For remote connection, row cache uses more memory. row_cache_size = 4111; max_row_cache_size = 8192000; } // System.out.println("DEFAULT SCHEMA TO CONNECT TO: " + default_schema); // Create the connection object on the given database, MConnection connection = new MConnection(url, db_interface, row_cache_size, max_row_cache_size); // Try and login (throws an SQLException if fails). connection.login(info, default_schema); return connection; } public boolean acceptsURL(String url) throws SQLException { return url.startsWith(mckoi_protocol_url) || url.startsWith(":" + mckoi_protocol_url); } public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { // Is this for asking for usernames and passwords if they are // required but not provided? // Return nothing for now, assume required info has been provided. return new DriverPropertyInfo[0]; } public int getMajorVersion() { return DRIVER_MAJOR_VERSION; } public int getMinorVersion() { return DRIVER_MINOR_VERSION; } public boolean jdbcCompliant() { // Certified compliant? - perhaps one day... return false; } //#IFDEF(JDBC5.0) // -------------------------- JDK 1.7 ----------------------------------- public Logger getParentLogger() throws SQLFeatureNotSupportedException { throw new SQLFeatureNotSupportedException(); } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MPreparedStatement.java000066400000000000000000000432251330501023400272340ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MPreparedStatement 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.CastHelper; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.global.StreamableObject; import com.mckoi.database.global.StringObject; import com.mckoi.util.BigNumber; import java.io.*; import java.math.BigDecimal; import java.sql.*; import java.util.Calendar; /** * An implementation of a JDBC prepared statement. * * Multi-threaded issue: This class is not designed to be multi-thread * safe. A PreparedStatement should not be accessed by concurrent threads. * * @author Tobias Downer */ class MPreparedStatement extends MStatement implements PreparedStatement { /** * The SQLQuery object constructed for this statement. */ private SQLQuery statement; /** * Constructs the PreparedStatement. */ MPreparedStatement(MConnection connection, String sql) { super(connection); statement = new SQLQuery(sql); } // ---------- Utility ---------- /** * Converts the given Object to the given SQL type object. */ Object convertToType(Object ob, int sqlType) throws SQLException { // We use CastHelper to convert to the given SQL type. return CastHelper.castObjectToSQLType(ob, sqlType, -1, -1, "requested type"); } /** * Converts a Java Object using the JDBC type conversion rules. For example, * java.lang.Double is converted to a NUMERIC type * (com.mckoi.util.BigNumber). */ Object castToMckoiObject(Object ob) throws SQLException { if (ob == null) { return ob; } if (ob instanceof String) { return StringObject.fromString((String) ob); } if (ob instanceof Boolean) { return ob; } if (ob instanceof Number) { Number n = (Number) ob; if (ob instanceof Byte || ob instanceof Short || ob instanceof Integer) { return BigNumber.fromInt(n.intValue()); } else if (ob instanceof Long) { return BigNumber.fromLong(n.longValue()); } else if (ob instanceof Float) { return BigNumber.fromFloat(n.floatValue()); } else if (ob instanceof Double) { return BigNumber.fromDouble(n.doubleValue()); } else { return BigNumber.fromString(n.toString()); } } if (ob instanceof byte[]) { return new ByteLongObject((byte[]) ob); } try { return ObjectTranslator.translate(ob); } catch (Throwable e) { // Hacky - we need for ObjectTranslator to throw a better exception throw new SQLException(e.getMessage()); } } /** * Given an InputStream and a length of bytes to read from the stream, this * method will insert a correct type of parameter into the statement to handle * this size of object. If the object is too large it will mark it as a * streamable object. * * @param parameterIndex 1 for first parameter, 2 for second, etc. * @param x the input stream containing the binary data. * @param length the number of bytes to read. * @param type 2 = binary, 3 = 1 byte char, 4 = 2 byte unicode. */ private void setVariableLengthStream(int parameterIndex, InputStream x, int length, byte type) throws IOException { // If we are going to transfer more than 8K bytes then the object becomes // a streamable object if (length > 8 * 1024) { int p_ind = parameterIndex - 1; // Generate a new StreamableObject and for this InputStream and store it // in the hold. StreamableObject s_object = createStreamableObject(x, length, type); // Put the streamable object in the statement variable list. statement.setVar(p_ind, s_object); } else { // If binary stream, if (type == 2) { // Less than 8K bytes so we transfer the object as a standard // ByteLongObject. ByteLongObject b = new ByteLongObject(x, length); statement.setVar(parameterIndex - 1, b); } // If ascii stream else if (type == 3) { // Convert to a String StringBuffer buf = new StringBuffer(); for (int i = 0; i < length; ++i) { int v = x.read(); if (v == -1) { throw new IOException("Premature EOF reached."); } buf.append((char) v); } statement.setVar(parameterIndex - 1, StringObject.fromString(new String(buf))); } // If unicode stream else if (type == 4) { // Convert to a String StringBuffer buf = new StringBuffer(); int half_len = length / 2; for (int i = 0; i < half_len; ++i) { int v1 = x.read(); int v2 = x.read(); if (v1 == -1 || v2 == -1) { throw new IOException("Premature EOF reached."); } buf.append((char) ((v1 << 8) + v2)); } statement.setVar(parameterIndex - 1, StringObject.fromString(new String(buf))); } else { throw new RuntimeException("Do not understand type."); } } } // ---------- Overridden from MStatement ---------- public void close() throws SQLException { super.close(); statement = null; } // ---------- Implemented from PreparedStatement ---------- public ResultSet executeQuery() throws SQLException { return executeQuery(statement); } public int executeUpdate() throws SQLException { MResultSet result_set = executeQuery(statement); return result_set.intValue(); } public void setNull(int parameterIndex, int sqlType) throws SQLException { statement.setVar(parameterIndex - 1, null); } public void setBoolean(int parameterIndex, boolean x) throws SQLException { statement.setVar(parameterIndex - 1, new Boolean(x)); } public void setByte(int parameterIndex, byte x) throws SQLException { setLong(parameterIndex, x); } public void setShort(int parameterIndex, short x) throws SQLException { setLong(parameterIndex, x); } public void setInt(int parameterIndex, int x) throws SQLException { setLong(parameterIndex, x); } public void setLong(int parameterIndex, long x) throws SQLException { statement.setVar(parameterIndex - 1, BigNumber.fromLong(x)); } public void setFloat(int parameterIndex, float x) throws SQLException { statement.setVar(parameterIndex - 1, BigNumber.fromFloat(x)); } public void setDouble(int parameterIndex, double x) throws SQLException { statement.setVar(parameterIndex - 1, BigNumber.fromDouble(x)); } public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.NUMERIC); } else { statement.setVar(parameterIndex - 1, BigNumber.fromBigDecimal(x)); } } public void setString(int parameterIndex, String x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.VARCHAR); } else { // If the string is less than 4K characters then transfer as a regular // string, otherwise treat the string as a large object. if (x.length() <= 4 * 1024) { statement.setVar(parameterIndex - 1, StringObject.fromString(x)); } else { setCharacterStream(parameterIndex, new StringReader(x), x.length()); } } } public void setBytes(int parameterIndex, byte[] x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.BINARY); } else { // If the byte array is small then transfer as a regular ByteLongObject if (x.length <= 8 * 1024) { ByteLongObject b = new ByteLongObject(x); statement.setVar(parameterIndex - 1, b); } else { // Otherwise wrap around a ByteArrayInputStream and treat as a large // object. setBinaryStream(parameterIndex, new ByteArrayInputStream(x), x.length); } } } // JDBC Extension ... Use java.util.Date as parameter public void extSetDate(int parameterIndex, java.util.Date x) throws SQLException { statement.setVar(parameterIndex - 1, x); } public void setDate(int parameterIndex, java.sql.Date x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.DATE); } else { extSetDate(parameterIndex, new java.util.Date(x.getTime())); } } public void setTime(int parameterIndex, java.sql.Time x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.TIME); } else { extSetDate(parameterIndex, new java.util.Date(x.getTime())); } } /** * True if the timestamp value includes nanoseconds, which is the case * starting with Java 1.4.0 */ private static final boolean TIMESTAMP_VALUE_INCLUDES_NANOS = (new java.sql.Timestamp(5).getTime() == 5); public void setTimestamp(int parameterIndex, java.sql.Timestamp x) throws SQLException { if (x == null) { setNull(parameterIndex, Types.TIMESTAMP); } else { long time = x.getTime(); if (!TIMESTAMP_VALUE_INCLUDES_NANOS) { time += (x.getNanos() / 1000000); } extSetDate(parameterIndex, new java.util.Date(time)); } } public void setAsciiStream(int parameterIndex, java.io.InputStream x, int length) throws SQLException { if (x == null) { setNull(parameterIndex, Types.LONGVARCHAR); } else { try { // Process a potentially large object. setVariableLengthStream(parameterIndex, x, length, (byte) 3); } catch (IOException e) { throw new SQLException("IOException reading input stream: " + e.getMessage()); } // // Fudge implementation since we fudged the result set version of this. // // In an ideal world we'd open up a stream with the server and download // // the information without having to collect all the data to transfer it. // try { // StringBuffer buf = new StringBuffer(); // int i = 0; // while (i < length) { // int c = x.read(); // if (c == -1) { // throw new IOException( // "End of stream reached before length reached."); // } // buf.append((char) c); // ++i; // } // setString(parameterIndex, new String(buf)); // } // catch (IOException e) { // e.printStackTrace(); // throw new SQLException("IO Error: " + e.getMessage()); // } } } /** * @deprecated */ public void setUnicodeStream(int parameterIndex, java.io.InputStream x, int length) throws SQLException { throw new SQLException("Deprecated method not supported"); } public void setBinaryStream(int parameterIndex, java.io.InputStream x, int length) throws SQLException { if (x == null) { setNull(parameterIndex, Types.BINARY); } else { try { // Process a potentially large object. setVariableLengthStream(parameterIndex, x, length, (byte) 2); } catch (IOException e) { throw new SQLException("IOException reading input stream: " + e.getMessage()); } } } public void clearParameters() throws SQLException { statement.clear(); } //---------------------------------------------------------------------- // Advanced features: public void setObject(int parameterIndex, Object x, int targetSqlType, int scale) throws SQLException { if (x == null) { setNull(parameterIndex, targetSqlType); } else { x = convertToType(x, targetSqlType); if (x instanceof BigDecimal) { x = ((BigDecimal) x).setScale(scale, BigDecimal.ROUND_HALF_UP); } statement.setVar(parameterIndex - 1, x); } } public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (x == null) { setNull(parameterIndex, targetSqlType); } else { statement.setVar(parameterIndex - 1, convertToType(x, targetSqlType)); } } public void setObject(int parameterIndex, Object x) throws SQLException { statement.setVar(parameterIndex - 1, castToMckoiObject(x)); } public boolean execute() throws SQLException { MResultSet result_set = executeQuery(statement); return !result_set.isUpdate(); } //#IFDEF(JDBC2.0) //--------------------------JDBC 2.0----------------------------- public void addBatch() throws SQLException { addBatch(statement.copy()); } public void setCharacterStream(int parameterIndex, java.io.Reader reader, int length) throws SQLException { if (reader == null) { setNull(parameterIndex, Types.LONGVARCHAR); } else { try { // Process as a potentially large object. setVariableLengthStream(parameterIndex, new UnicodeToBinaryStream(reader), length * 2, (byte) 4); } catch (IOException e) { throw new SQLException("IOException reading input stream: " + e.getMessage()); } // // NOTE: The whole stream is read into a String and the 'setString' method // // is called. This is inappropriate for long streams but probably // // won't be an issue any time in the future. // StringBuffer buf = new StringBuffer(); // final int BUF_LENGTH = 1024; // char[] char_buf = new char[BUF_LENGTH]; // try { // while (length > 0) { // int read = reader.read(char_buf, 0, Math.min(BUF_LENGTH, length)); // if (read > 0) { // buf.append(char_buf, 0, read); // length = length - read; // } // else { // throw new SQLException("Premature end of Reader reached."); // } // } // } // catch (IOException e) { // throw new SQLException("IOError: " + e.getMessage()); // } // setString(parameterIndex, new String(buf)); } } public void setRef (int i, Ref x) throws SQLException { throw MSQLException.unsupported(); } public void setBlob (int i, Blob x) throws SQLException { if (x == null) { setNull(i, Types.BLOB); } else { // BLOBs are handled the same as a binary stream. If the length of the // blob exceeds a certain threshold the object is treated as a large // object and transferred to the server in separate chunks. long len = x.length(); if (len >= 32768L * 65536L) { throw new SQLException("BLOB > 2 gigabytes is too large."); } setBinaryStream(i, x.getBinaryStream(), (int) len); } } public void setClob (int i, Clob x) throws SQLException { if (x == null) { setNull(i, Types.CLOB); } else { // CLOBs are handled the same as a character stream. If the length of the // clob exceeds a certain threshold the object is treated as a large // object and transferred to the server in separate chunks. long len = x.length(); if (len >= 16384L * 65536L) { throw new SQLException("CLOB > 1 billion characters is too large."); } setCharacterStream(i, x.getCharacterStream(), (int) len); } } public void setArray (int i, Array x) throws SQLException { throw MSQLException.unsupported(); } public ResultSetMetaData getMetaData() throws SQLException { // TODO.... throw MSQLException.unsupported(); } public void setDate(int parameterIndex, java.sql.Date x, Calendar cal) throws SQLException { // Kludge... setDate(parameterIndex, x); } public void setTime(int parameterIndex, java.sql.Time x, Calendar cal) throws SQLException { // Kludge... setTime(parameterIndex, x); } public void setTimestamp(int parameterIndex, java.sql.Timestamp x, Calendar cal) throws SQLException { // Kludge... setTimestamp(parameterIndex, x); } public void setNull (int paramIndex, int sqlType, String typeName) throws SQLException { // Kludge again... setNull(paramIndex, sqlType); } //#ENDIF //#IFDEF(JDBC3.0) // ---------- JDBC 3.0 ---------- public void setURL(int parameterIndex, java.net.URL x) throws SQLException { throw MSQLException.unsupported(); } public ParameterMetaData getParameterMetaData() throws SQLException { throw MSQLException.unsupported(); } //#ENDIF /** * For diagnostics. */ public String toString() { return statement.toString(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MResultSet.java000066400000000000000000001607361330501023400255460ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MResultSet 19 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.*; import java.math.BigDecimal; import java.util.Calendar; import java.util.Hashtable; import java.util.Vector; import com.mckoi.database.global.ColumnDescription; import com.mckoi.database.global.ByteLongObject; import com.mckoi.database.global.CastHelper; import com.mckoi.database.global.ObjectTransfer; import com.mckoi.database.global.StreamableObject; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.global.SQLTypes; import com.mckoi.database.global.StringObject; import com.mckoi.util.BigNumber; /** * Implementation of a ResultSet. *

* Multi-threaded issue: This class is not designed to be multi-thread * safe. A ResultSet should not be accessed by concurrent threads. * * @author Tobias Downer */ public final class MResultSet implements ResultSet { /** * The default fetch size. */ private static final int DEFAULT_FETCH_SIZE = 32; /** * The maximum fetch size. */ private static final int MAXIMUM_FETCH_SIZE = 512; /** * The current unique id key. */ private static int unique_id_key = 1; /** * BigNumber for 0. */ private static BigNumber BD_ZERO = BigNumber.fromInt(0); /** * A unique int that refers to this result set. */ private int unique_id; /** * The MConnection that this result set is in. */ private MConnection connection; /** * The MStatement that this result is from. */ private MStatement statement; /** * SQL warnings for this result set. Cleared each time a new row accessed. */ private SQLWarning head_warning; /** * The current result_id for the information in the current result set. */ private int result_id; /** * The array of ColumnDescription that describes each column in the result * set. */ private ColumnDescription[] col_list; /** * The length of time it took to execute this query in ms. */ private int query_time_ms; /** * The number of rows in the result set. */ private int result_row_count; /** * The maximum row count as set in the Statement by the 'setMaxRows' method * or 0 if the max row count is not important. */ private int max_row_count = Integer.MAX_VALUE; /** * The row number of the first row of the 'result_block' */ private int block_top_row; /** * The number of rows in 'result_block' */ private int block_row_count; /** * The number of rows to fetch each time we need to get rows from the * database. */ private int fetch_size; /** * The Vector that contains the Objects downloaded into this result set. * It only contains the objects from the last block of rows downloaded. */ private Vector result_block; /** * The real index of the result set we are currently at. */ private int real_index = Integer.MAX_VALUE; /** * The offset into 'result_block' where 'real_index' is. This is set up * by 'ensureIndexLoaded'. */ private int real_index_offset = -1; /** * Set to true if the last 'getxxx' method was a null. Otherwise set to * false. */ private boolean last_was_null = false; /** * A Hashtable that acts as a cache for column name/column number look ups. */ private Hashtable column_hash; /** * Set to true if the result set is closed on the server. */ private boolean closed_on_server; /** * Constructor. */ MResultSet(MConnection connection, MStatement statement) { this.connection = connection; this.statement = statement; unique_id = unique_id_key++; result_id = -1; result_block = new Vector(); } /** * Adds a new SQLWarning to the chain. */ void addSQLWarning(SQLWarning warning) { if (head_warning == null) { head_warning = warning; } else { head_warning.setNextWarning(warning); } } /** * Returns true if verbose column names are enabled on the connection. * Returns false by default. */ boolean verboseColumnNames() { return connection.verboseColumnNames(); } // ---------- Connection callback methods ---------- // These methods are called back from the ConnectionThread running on the // connection. These methods require some synchronization thought. /** * Called by the ConnectionThread when we have received the initial bag of * the result set. This contains information about the columns in the * result, the number of rows in the entire set, etc. This sets up the * result set to handle the result. */ void connSetup(int result_id, ColumnDescription[] col_list, int total_row_count) { this.result_id = result_id; this.col_list = col_list; this.result_row_count = total_row_count; block_top_row = -1; result_block.removeAllElements(); real_index = -1; fetch_size = DEFAULT_FETCH_SIZE; closed_on_server = false; } /** * Sets the length of time in milliseconds (server-side) it took to execute * this query. Useful as feedback for the server-side optimisation systems. *

* VERY MINOR ISSUE: An int can 'only' contain 35 weeks worth of * milliseconds. So if a query takes longer than that this number will * overflow. */ void setQueryTime(int time_ms) { query_time_ms = time_ms; } /** * Sets the maximum number of rows that this ResultSet will return or 0 if * the max number of rows is not important. This is set by MStatement * when a query is evaluated. */ void setMaxRowCount(int row_count) { if (row_count == 0) { max_row_count = Integer.MAX_VALUE; } else { max_row_count = row_count; } } /** * Returns true if this ResultSet contains large objects. This looks at the * ColumnDescription object to determine this. */ boolean containsLargeObjects() { for (int i = 0; i < col_list.length; ++i) { ColumnDescription col = col_list[i]; int sql_type = col.getSQLType(); if (sql_type == com.mckoi.database.global.SQLTypes.BINARY || sql_type == com.mckoi.database.global.SQLTypes.VARBINARY || sql_type == com.mckoi.database.global.SQLTypes.LONGVARBINARY || sql_type == com.mckoi.database.global.SQLTypes.BLOB || sql_type == com.mckoi.database.global.SQLTypes.CHAR || sql_type == com.mckoi.database.global.SQLTypes.VARCHAR || sql_type == com.mckoi.database.global.SQLTypes.LONGVARCHAR || sql_type == com.mckoi.database.global.SQLTypes.CLOB) { return true; } } return false; } /** * Asks the server for all the rows in the result set and stores it * locally within this object. It then disposes all resources associated * with this result set on the server. */ void storeResultLocally() throws SQLException { // After this call, 'result_block' will contain the whole result set. updateResultPart(0, rowCount()); // Request to close the current result set on the server. connection.disposeResult(result_id); closed_on_server = true; } /** * Asks the server for more information about this result set to put * into the 'result_block'. This should be called when we need to request * more information from the server. *

* @param row_index the top row index from the block of the result set to * download. * @param row_count the maximum number of rows to download (may be less if * no more are available). */ void updateResultPart(int row_index, int row_count) throws SQLException { // If row_count is 0 then we don't need to do anything. if (row_count == 0) { return; } if (row_index + row_count < 0) { throw new SQLException( "ResultSet row index is before the start of the set."); } else if (row_index < 0) { row_index = 0; row_count = row_count + row_index; } if (row_index >= rowCount()) { throw new SQLException( "ResultSet row index is after the end of the set."); } else if (row_index + row_count > rowCount()) { row_count = rowCount() - row_index; } if (result_id == -1) { throw new SQLException("result_id == -1. No result to get from."); } try { // Request the result via the RowCache. If the information is not found // in the row cache then the request is forwarded onto the database. result_block = connection.getRowCache().getResultPart(result_block, connection, result_id, row_index, row_count, columnCount(), rowCount()); // Set the row that's at the top block_top_row = row_index; // Set the number of rows in the block. block_row_count = row_count; // // Request a part of a result from the server (blocks) // DataInputStream din = connection.requestResultPart(result_id, // row_index, row_count); // // // Clear the block. // result_block.removeAllElements(); // int num_entries = row_count * columnCount(); // for (int i = 0; i < num_entries; ++i) { // result_block.addElement(ObjectTransfer.readFrom(din)); // } } catch (IOException e) { e.printStackTrace(); throw new SQLException("IO Error: " + e.getMessage()); } } /** * Closes the current server side result for this result set ready for a * new one. This should be called before we execute a query. It sends a * command to the server to dispose of any resources associated with the * current result_id. *

* It's perfectly safe to call this method even if we haven't downloaded * a result set from the server and you may also safely call it multiple * times (it will only send one request to the server). */ void closeCurrentResult() throws SQLException { if (getResultID() != -1) { if (!closed_on_server) { // Request to close the current result set connection.disposeResult(result_id); closed_on_server = true; } result_id = -1; real_index = Integer.MAX_VALUE; // Clear the column name -> index mapping, if (column_hash != null) { column_hash.clear(); } } } /** * Returns the 'result_id' that is used as a key to refer to the result set * on the server that is the result of the query. A 'resultID' of -1 means * there is no server side result set associated with this object. */ int getResultID() { return result_id; } /** * The total number of rows in the result set. */ int rowCount() { // The row count is whatever is the least between max_row_count (the // maximum the user has set) and result_row_count (the actual number of // rows in the result. return Math.min(result_row_count, max_row_count); } /** * The column count of columns in the result set. */ int columnCount() { return col_list.length; } /** * Returns the ColumnDescription of the given column (first column is 0, * etc). */ ColumnDescription getColumn(int column) { return col_list[column]; } /** * Returns true if this result set contains 1 column and 1 row and the name * of the column is 'result'. This indicates the result set is a DDL * command ( UPDATE, INSERT, CREATE, ALTER, etc ). *

* NOTE: This is a minor hack because there is no real * indication that this is a DML statement. Theoretically a DQL query could * be constructed that meets all these requirements and is processed * incorrectly. */ boolean isUpdate() { // Must have 1 col and 1 row and the title of the column must be // 'result' aliased. return (columnCount() == 1 && rowCount() == 1 && getColumn(0).getName().equals("@aresult")); } /** * Returns this ResultSet as an 'int' value. This is only valid if the * result set has a single column and a single row of type 'BigNumber'. */ int intValue() throws SQLException { if (isUpdate()) { Object ob = result_block.elementAt(0); if (ob instanceof BigNumber) { return ((BigNumber) ob).intValue(); } else { return 0; } } throw new SQLException("Unable to format query result as an update value."); } /** * Disposes of all resources associated with this result set. This could * either be called from the MStatement finalize or close method. Calls to * this object are undefined after this method has finished. */ void dispose() { try { close(); } catch (SQLException e) { // Ignore // We ignore exceptions because handling cases where the server // connection has broken for many ResultSets would be annoying. } connection = null; statement = null; col_list = null; result_block = null; } /** * Ensures that the row index pointed to by 'real_index' is actually loaded * into the 'result_block'. If not, we send a request to the database to * get it. */ void ensureIndexLoaded() throws SQLException { if (real_index == -1) { throw new SQLException("Row index out of bounds."); } // Offset into our block int row_offset = real_index - block_top_row; if (row_offset >= block_row_count) { // Need to download the next block from the server. updateResultPart(real_index, fetch_size); // Set up the index into the downloaded block. row_offset = real_index - block_top_row; real_index_offset = row_offset * columnCount(); } else if (row_offset < 0) { int fs_dif = Math.min(fetch_size, 8); // Need to download the next block from the server. updateResultPart(real_index - fetch_size + fs_dif, fetch_size); // Set up the index into the downloaded block. row_offset = real_index - block_top_row; real_index_offset = row_offset * columnCount(); } } /** * Searches for the index of the column with the given name. First column * index is 1, second is 2, etc. */ int findColumnIndex(String name) throws SQLException { // For speed, we keep column name -> column index mapping in the hashtable. // This makes column reference by string faster. if (column_hash == null) { column_hash = new Hashtable(); } boolean case_insensitive = connection.isCaseInsensitiveIdentifiers(); if (case_insensitive) { name = name.toUpperCase(); } Integer index = (Integer) column_hash.get(name); if (index == null) { int col_count = columnCount(); // First construct an unquoted list of all column names String[] cols = new String[col_count]; for (int i = 0; i < col_count; ++i) { String col_name = col_list[i].getName(); if (col_name.startsWith("\"")) { col_name = col_name.substring(1, col_name.length() - 1); } // Strip any codes from the name if (col_name.startsWith("@")) { col_name = col_name.substring(2); } if (case_insensitive) { col_name = col_name.toUpperCase(); } cols[i] = col_name; } for (int i = 0; i < col_count; ++i) { String col_name = cols[i]; if (col_name.equals(name)) { column_hash.put(name, new Integer(i + 1)); return i + 1; } } // If not found then search for column name ending, String point_name = "." + name; for (int i = 0; i < col_count; ++i) { String col_name = cols[i]; if (col_name.endsWith(point_name)) { column_hash.put(name, new Integer(i + 1)); return i + 1; } } // // DEBUG: output the list of columns, // for (int i = 0; i < col_count; ++i) { // System.out.println(cols[i]); // } throw new SQLException("Couldn't find column with name: " + name); } else { return index.intValue(); } } /** * Returns the column Object of the current index. The first column is 1, * the second is 2, etc. */ Object getRawColumn(int column) throws SQLException { // ASSERTION - // Is the given column in bounds? if (column < 1 || column > columnCount()) { throw new SQLException( "Column index out of bounds: 1 > " + column + " > " + columnCount()); } // Ensure the current indexed row is fetched from the server. ensureIndexLoaded(); // Return the raw cell object. Object ob = result_block.elementAt(real_index_offset + (column - 1)); // Null check of the returned object, if (ob != null) { last_was_null = false; // If this is a java object then deserialize it, // ISSUE: Cache deserialized objects? if (getColumn(column - 1).getSQLType() == com.mckoi.database.global.SQLTypes.JAVA_OBJECT) { ob = ObjectTranslator.deserialize((ByteLongObject) ob); } return ob; } last_was_null = true; return null; } /** * Returns the column Object of the name of the current index. */ Object getRawColumn(String name) throws SQLException { return getRawColumn(findColumnIndex(name)); } /** * This should be called when the 'real_index' variable changes. It updates * internal variables. */ private void realIndexUpdate() throws SQLException { // Set up the index into the downloaded block. int row_offset = real_index - block_top_row; real_index_offset = row_offset * columnCount(); // Clear any warnings as in the spec. clearWarnings(); } /** * Returns true if the given object is either an instanceof StringObject or * is an instanceof StreamableObject, and therefore can be made into a * string. */ private boolean canMakeString(Object ob) { return (ob instanceof StringObject || ob instanceof StreamableObject); } /** * If the object represents a String or is a form that can be readily * translated to a String (such as a Clob, String, BigNumber, Boolean, etc) * the string representation of the given Object is returned. This method is * a convenient way to convert objects such as Clobs into java.util.String * objects. This will cause a ClassCastException if the given object * represents a BLOB or ByteLongObject. */ private String makeString(Object ob) throws SQLException { if (ob instanceof StreamableObject) { Clob clob = asClob(ob); long clob_len = clob.length(); if (clob_len < 16384L * 65536L) { return clob.getSubString(1, (int) clob_len); } throw new SQLException("Clob too large to return as a string."); } else if (ob instanceof ByteLongObject) { throw new ClassCastException(); } else { return ob.toString(); } } /** * Returns the given object as a Blob instance. */ private Blob asBlob(Object ob) { if (ob instanceof StreamableObject) { StreamableObject s_ob = (StreamableObject) ob; byte type = (byte) (s_ob.getType() & 0x0F); if (type == 2) { return new MStreamableBlob(connection, result_id, type, s_ob.getIdentifier(), s_ob.getSize()); } } else if (ob instanceof ByteLongObject) { return new MBlob((ByteLongObject) ob); } throw new ClassCastException(); } /** * Returns the given object as a Clob instance. */ private Clob asClob(Object ob) { if (ob instanceof StreamableObject) { StreamableObject s_ob = (StreamableObject) ob; byte type = (byte) (s_ob.getType() & 0x0F); if (type == 3 || type == 4) { return new MStreamableClob(connection, result_id, type, s_ob.getIdentifier(), s_ob.getSize()); } } else if (ob instanceof StringObject) { return new MClob(ob.toString()); } throw new ClassCastException(); } /** * Casts an internal object to the sql_type given for return by methods * such as 'getObject'. */ private Object jdbcObjectCast(Object ob, int sql_type) throws SQLException { switch (sql_type) { case(SQLTypes.BIT): return ob; case(SQLTypes.TINYINT): return new Byte(((BigNumber) ob).byteValue()); case(SQLTypes.SMALLINT): return new Short(((BigNumber) ob).shortValue()); case(SQLTypes.INTEGER): return new Integer(((BigNumber) ob).intValue()); case(SQLTypes.BIGINT): return new Long(((BigNumber) ob).longValue()); case(SQLTypes.FLOAT): return new Double(((BigNumber) ob).doubleValue()); case(SQLTypes.REAL): return new Float(((BigNumber) ob).floatValue()); case(SQLTypes.DOUBLE): return new Double(((BigNumber) ob).doubleValue()); case(SQLTypes.NUMERIC): return ((BigNumber) ob).asBigDecimal(); case(SQLTypes.DECIMAL): return ((BigNumber) ob).asBigDecimal(); case(SQLTypes.CHAR): return makeString(ob); case(SQLTypes.VARCHAR): return makeString(ob); case(SQLTypes.LONGVARCHAR): return makeString(ob); case(SQLTypes.DATE): return new java.sql.Date(((java.util.Date) ob).getTime()); case(SQLTypes.TIME): return new java.sql.Time(((java.util.Date) ob).getTime()); case(SQLTypes.TIMESTAMP): return new java.sql.Timestamp(((java.util.Date) ob).getTime()); case(SQLTypes.BINARY): // fall through case(SQLTypes.VARBINARY): // fall through case(SQLTypes.LONGVARBINARY): Blob b = asBlob(ob); return b.getBytes(1, (int) b.length()); case(SQLTypes.NULL): return ob; case(SQLTypes.OTHER): return ob; case(SQLTypes.JAVA_OBJECT): return ob; case(SQLTypes.DISTINCT): // (Not supported) return ob; case(SQLTypes.STRUCT): // (Not supported) return ob; case(SQLTypes.ARRAY): // (Not supported) return ob; //#IFDEF(JDBC2.0) case(SQLTypes.BLOB): return asBlob(ob); case(SQLTypes.CLOB): return asClob(ob); case(SQLTypes.REF): // (Not supported) return ob; //#ENDIF default: return ob; } } // ---------- JDBC Extentions ---------- // All non-standard extentions to the JDBC API. This is rather ugly because // to use these we need to cast to a com.mckoi.database.jdbc.? class. /** * The number of milliseconds it took the server to execute this query. * This is set after the call to 'connSetup' so is available as soon as the * header information is received from the server. */ public int extQueryTimeMillis() { return query_time_ms; } /** * Access column as java.util.Date (which is the native object used in the * mckoi database to handle time). */ public java.util.Date extGetDate(int columnIndex) throws SQLException { return (java.util.Date) getRawColumn(columnIndex); } /** * Access column as java.util.Date (which is the native object used in the * mckoi database to handle time). */ public java.util.Date extGetDate(String columnName) throws SQLException { return extGetDate(findColumnIndex(columnName)); } // ---------- Implemented from ResultSet ---------- public boolean next() throws SQLException { int row_count = rowCount(); if (real_index < row_count) { ++real_index; if (real_index < row_count) { realIndexUpdate(); } } return (real_index < row_count); } public void close() throws SQLException { closeCurrentResult(); } public boolean wasNull() throws SQLException { // Note: we don't check that a previous value was read. return last_was_null; } //====================================================================== // Methods for accessing results by column index //====================================================================== public String getString(int columnIndex) throws SQLException { Object str = getRawColumn(columnIndex); if (str == null) { return null; } else { if (canMakeString(str)) { return makeString(str); } else { // For date, time and timestamp we must format as per the JDBC // specification. if (str instanceof java.util.Date) { int sql_type = getColumn(columnIndex - 1).getSQLType(); return jdbcObjectCast(str, sql_type).toString(); } return str.toString(); } } } public boolean getBoolean(int columnIndex) throws SQLException { Object ob = getRawColumn(columnIndex); if (ob == null) { return false; } else if (ob instanceof Boolean) { return ((Boolean) ob).booleanValue(); } else if (ob instanceof BigNumber) { return ((BigNumber) ob).compareTo(BD_ZERO) != 0; } else if (canMakeString(ob)) { return makeString(ob).equalsIgnoreCase("true"); } else { throw new SQLException("Unable to cast value in ResultSet to boolean"); } } public byte getByte(int columnIndex) throws SQLException { // Translates from BigNumber BigNumber num = getBigNumber(columnIndex); return num == null ? 0 : num.byteValue(); } public short getShort(int columnIndex) throws SQLException { // Translates from BigNumber BigNumber num = getBigNumber(columnIndex); return num == null ? 0 : num.shortValue(); } public int getInt(int columnIndex) throws SQLException { // Translates from BigNumber BigNumber num = getBigNumber(columnIndex); return num == null ? 0 : num.intValue(); } public long getLong(int columnIndex) throws SQLException { // Translates from BigNumber BigNumber num = getBigNumber(columnIndex); return num == null ? 0 : num.longValue(); } public float getFloat(int columnIndex) throws SQLException { // Translates from BigNumber BigNumber num = getBigNumber(columnIndex); return num == null ? 0 : num.floatValue(); } public double getDouble(int columnIndex) throws SQLException { // Translates from BigNumber BigNumber num = getBigNumber(columnIndex); return num == null ? 0 : num.doubleValue(); } /** * @deprecated */ public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { return getBigDecimal(columnIndex); } public byte[] getBytes(int columnIndex) throws SQLException { Blob b = getBlob(columnIndex); if (b == null) { return null; } else { if (b.length() <= Integer.MAX_VALUE) { return b.getBytes(1, (int) b.length()); } else { throw new SQLException("Blob too large to return as byte[]"); } } // Object ob = getRawColumn(columnIndex); // if (ob == null) { // return null; // } // else if (ob instanceof ByteLongObject) { // // Return a safe copy of the byte[] array (BLOB). // ByteLongObject b = (ByteLongObject) ob; // byte[] barr = new byte[b.length()]; // System.arraycopy(b.getByteArray(), 0, barr, 0, b.length()); // return barr; // } // else { // throw new SQLException("Unable to cast value in ResultSet to byte[]"); // } } public Date getDate(int columnIndex) throws SQLException { // Wrap java.util.Date with java.sql.Date java.util.Date d = extGetDate(columnIndex); if (d != null) { return new Date(d.getTime()); } return null; } public java.sql.Time getTime(int columnIndex) throws SQLException { // Wrap java.util.Date with java.sql.Time java.util.Date d = extGetDate(columnIndex); if (d != null) { return new Time(d.getTime()); } return null; } public java.sql.Timestamp getTimestamp(int columnIndex) throws SQLException { // ISSUE: This may be incorrectly implemented.... // Wrap java.util.Date with java.sql.Timestamp java.util.Date d = extGetDate(columnIndex); if (d != null) { return new Timestamp(d.getTime()); } return null; } public java.io.InputStream getAsciiStream(int columnIndex) throws SQLException { Clob c = getClob(columnIndex); if (c == null) { return null; } else { return c.getAsciiStream(); } } /** * @deprecated */ public java.io.InputStream getUnicodeStream(int columnIndex) throws SQLException { throw new SQLException("Deprecated method not supported"); } public java.io.InputStream getBinaryStream(int columnIndex) throws SQLException { Blob blob = getBlob(columnIndex); if (blob == null) { return null; } else { return blob.getBinaryStream(); } // Object ob = getRawColumn(columnIndex); // if (ob == null) { // return null; // } // else if (ob instanceof ByteLongObject) { // ByteLongObject b = (ByteLongObject) ob; // return new ByteArrayInputStream(b.getByteArray()); // } // else { // throw new SQLException( // "Unable to cast value in ResultSet to binary stream"); // } } //====================================================================== // Methods for accessing results by column name //====================================================================== public String getString(String columnName) throws SQLException { return getString(findColumnIndex(columnName)); } public boolean getBoolean(String columnName) throws SQLException { return getBoolean(findColumnIndex(columnName)); } public byte getByte(String columnName) throws SQLException { return getByte(findColumnIndex(columnName)); } public short getShort(String columnName) throws SQLException { return getShort(findColumnIndex(columnName)); } public int getInt(String columnName) throws SQLException { return getInt(findColumnIndex(columnName)); } public long getLong(String columnName) throws SQLException { return getLong(findColumnIndex(columnName)); } public float getFloat(String columnName) throws SQLException { return getFloat(findColumnIndex(columnName)); } public double getDouble(String columnName) throws SQLException { return getDouble(findColumnIndex(columnName)); } /** * @deprecated */ public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException { return getBigDecimal(findColumnIndex(columnName)); } public byte[] getBytes(String columnName) throws SQLException { return getBytes(findColumnIndex(columnName)); } public java.sql.Date getDate(String columnName) throws SQLException { return getDate(findColumnIndex(columnName)); } public java.sql.Time getTime(String columnName) throws SQLException { return getTime(findColumnIndex(columnName)); } public java.sql.Timestamp getTimestamp(String columnName) throws SQLException { return getTimestamp(findColumnIndex(columnName)); } public java.io.InputStream getAsciiStream(String columnName) throws SQLException { return getAsciiStream(findColumnIndex(columnName)); } /** * @deprecated */ public java.io.InputStream getUnicodeStream(String columnName) throws SQLException { return getUnicodeStream(findColumnIndex(columnName)); } public java.io.InputStream getBinaryStream(String columnName) throws SQLException { return getBinaryStream(findColumnIndex(columnName)); } //===================================================================== // Advanced features: //===================================================================== public SQLWarning getWarnings() throws SQLException { return head_warning; } public void clearWarnings() throws SQLException { head_warning = null; } public String getCursorName() throws SQLException { // Cursor not supported... throw MSQLException.unsupported(); } public ResultSetMetaData getMetaData() throws SQLException { return new MResultSetMetaData(this); } public Object getObject(int columnIndex) throws SQLException { Object ob = getRawColumn(columnIndex); if (ob == null) { return ob; } if (connection.isStrictGetObject()) { // Convert depending on the column type, ColumnDescription col_desc = getColumn(columnIndex - 1); int sql_type = col_desc.getSQLType(); return jdbcObjectCast(ob, sql_type); } //#IFDEF(JDBC2.0) else { // For blobs, return an instance of Blob. if (ob instanceof ByteLongObject || ob instanceof StreamableObject) { return asBlob(ob); } } //#ENDIF return ob; } public Object getObject(String columnName) throws SQLException { return getObject(findColumnIndex(columnName)); } //---------------------------------------------------------------- public int findColumn(String columnName) throws SQLException { return findColumnIndex(columnName); } //--------------------------JDBC 2.0----------------------------------- // NOTE: We allow 'getBigDecimal' methods as extensions to JDBC 1.0 // because they are a key object in the Mckoi world. public BigDecimal getBigDecimal(int columnIndex) throws SQLException { BigNumber bnum = getBigNumber(columnIndex); if (bnum != null) { return bnum.asBigDecimal(); } else { return null; } } private BigNumber getBigNumber(int columnIndex) throws SQLException { Object ob = getRawColumn(columnIndex); if (ob == null) { return null; } if (ob instanceof BigNumber) { return (BigNumber) ob; } else { return BigNumber.fromString(makeString(ob)); } } public BigDecimal getBigDecimal(String columnName) throws SQLException { return getBigDecimal(findColumnIndex(columnName)); } // NOTE: We allow 'setFetchSize' and 'getFetchSize' as extensions to // JDBC 1.0 also. public void setFetchSize(int rows) throws SQLException { if (rows > 0) { fetch_size = Math.min(rows, MAXIMUM_FETCH_SIZE); } else { fetch_size = DEFAULT_FETCH_SIZE; } } public int getFetchSize() throws SQLException { return fetch_size; } //#IFDEF(JDBC2.0) //--------------------------------------------------------------------- // Getters and Setters //--------------------------------------------------------------------- public java.io.Reader getCharacterStream(int columnIndex) throws SQLException { Clob c = getClob(columnIndex); if (c == null) { return null; } else { return c.getCharacterStream(); } } public java.io.Reader getCharacterStream(String columnName) throws SQLException { return getCharacterStream(findColumnIndex(columnName)); } //--------------------------------------------------------------------- // Traversal/Positioning //--------------------------------------------------------------------- public boolean isBeforeFirst() throws SQLException { return real_index < 0; } public boolean isAfterLast() throws SQLException { return real_index >= rowCount(); } public boolean isFirst() throws SQLException { return real_index == 0; } public boolean isLast() throws SQLException { return real_index == rowCount() - 1; } public void beforeFirst() throws SQLException { real_index = -1; } public void afterLast() throws SQLException { real_index = rowCount(); } public boolean first() throws SQLException { real_index = 0; realIndexUpdate(); return real_index < rowCount(); } public boolean last() throws SQLException { real_index = rowCount() - 1; realIndexUpdate(); return real_index >= 0; } public int getRow() throws SQLException { return real_index + 1; } public boolean absolute( int row ) throws SQLException { if (row > 0) { real_index = row - 1; } else if (row < 0) { real_index = rowCount() + row; } realIndexUpdate(); return (real_index >= 0 && real_index < rowCount()); } public boolean relative( int rows ) throws SQLException { real_index += rows; int row_count = rowCount(); if (real_index < -1) { real_index = -1; } if (real_index > row_count) { real_index = row_count; } realIndexUpdate(); return (real_index >= 0 && real_index < rowCount()); } public boolean previous() throws SQLException { if (real_index >= 0) { --real_index; realIndexUpdate(); } return real_index >= 0; } public void setFetchDirection(int direction) throws SQLException { // Currently ignored... // We could improve cache performance with this hint. } public int getFetchDirection() throws SQLException { // Return default... // We could improve cache performance with this hint. return FETCH_UNKNOWN; } public int getType() throws SQLException { // Supports scrolling but can't change return TYPE_SCROLL_INSENSITIVE; } public int getConcurrency() throws SQLException { // Only support read only result sets... return CONCUR_READ_ONLY; } //--------------------------------------------------------------------- // Updates //--------------------------------------------------------------------- public boolean rowUpdated() throws SQLException { throw MSQLException.unsupported(); } public boolean rowInserted() throws SQLException { throw MSQLException.unsupported(); } public boolean rowDeleted() throws SQLException { throw MSQLException.unsupported(); } public void updateNull(int columnIndex) throws SQLException { throw MSQLException.unsupported(); } public void updateBoolean(int columnIndex, boolean x) throws SQLException { throw MSQLException.unsupported(); } public void updateByte(int columnIndex, byte x) throws SQLException { throw MSQLException.unsupported(); } public void updateShort(int columnIndex, short x) throws SQLException { throw MSQLException.unsupported(); } public void updateInt(int columnIndex, int x) throws SQLException { throw MSQLException.unsupported(); } public void updateLong(int columnIndex, long x) throws SQLException { throw MSQLException.unsupported(); } public void updateFloat(int columnIndex, float x) throws SQLException { throw MSQLException.unsupported(); } public void updateDouble(int columnIndex, double x) throws SQLException { throw MSQLException.unsupported(); } public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { throw MSQLException.unsupported(); } public void updateString(int columnIndex, String x) throws SQLException { throw MSQLException.unsupported(); } public void updateBytes(int columnIndex, byte x[]) throws SQLException { throw MSQLException.unsupported(); } public void updateDate(int columnIndex, java.sql.Date x) throws SQLException { throw MSQLException.unsupported(); } public void updateTime(int columnIndex, java.sql.Time x) throws SQLException { throw MSQLException.unsupported(); } public void updateTimestamp(int columnIndex, java.sql.Timestamp x) throws SQLException { throw MSQLException.unsupported(); } public void updateAsciiStream(int columnIndex, java.io.InputStream x, int length) throws SQLException { throw MSQLException.unsupported(); } public void updateBinaryStream(int columnIndex, java.io.InputStream x, int length) throws SQLException { throw MSQLException.unsupported(); } public void updateCharacterStream(int columnIndex, java.io.Reader x, int length) throws SQLException { throw MSQLException.unsupported(); } public void updateObject(int columnIndex, Object x, int scale) throws SQLException { throw MSQLException.unsupported(); } public void updateObject(int columnIndex, Object x) throws SQLException { throw MSQLException.unsupported(); } public void updateNull(String columnName) throws SQLException { throw MSQLException.unsupported(); } public void updateBoolean(String columnName, boolean x) throws SQLException { throw MSQLException.unsupported(); } public void updateByte(String columnName, byte x) throws SQLException { throw MSQLException.unsupported(); } public void updateShort(String columnName, short x) throws SQLException { throw MSQLException.unsupported(); } public void updateInt(String columnName, int x) throws SQLException { throw MSQLException.unsupported(); } public void updateLong(String columnName, long x) throws SQLException { throw MSQLException.unsupported(); } public void updateFloat(String columnName, float x) throws SQLException { throw MSQLException.unsupported(); } public void updateDouble(String columnName, double x) throws SQLException { throw MSQLException.unsupported(); } public void updateBigDecimal(String columnName, BigDecimal x) throws SQLException { throw MSQLException.unsupported(); } public void updateString(String columnName, String x) throws SQLException { throw MSQLException.unsupported(); } public void updateBytes(String columnName, byte x[]) throws SQLException { throw MSQLException.unsupported(); } public void updateDate(String columnName, java.sql.Date x) throws SQLException { throw MSQLException.unsupported(); } public void updateTime(String columnName, java.sql.Time x) throws SQLException { throw MSQLException.unsupported(); } public void updateTimestamp(String columnName, java.sql.Timestamp x) throws SQLException { throw MSQLException.unsupported(); } public void updateAsciiStream(String columnName, java.io.InputStream x, int length) throws SQLException { throw MSQLException.unsupported(); } public void updateBinaryStream(String columnName, java.io.InputStream x, int length) throws SQLException { throw MSQLException.unsupported(); } public void updateCharacterStream(String columnName, java.io.Reader reader, int length) throws SQLException { throw MSQLException.unsupported(); } public void updateObject(String columnName, Object x, int scale) throws SQLException { throw MSQLException.unsupported(); } public void updateObject(String columnName, Object x) throws SQLException { throw MSQLException.unsupported(); } public void insertRow() throws SQLException { throw MSQLException.unsupported(); } public void updateRow() throws SQLException { throw MSQLException.unsupported(); } public void deleteRow() throws SQLException { throw MSQLException.unsupported(); } public void refreshRow() throws SQLException { throw MSQLException.unsupported(); } public void cancelRowUpdates() throws SQLException { throw MSQLException.unsupported(); } public void moveToInsertRow() throws SQLException { throw MSQLException.unsupported(); } public void moveToCurrentRow() throws SQLException { throw MSQLException.unsupported(); } public Statement getStatement() throws SQLException { return statement; } public Object getObject(int i, java.util.Map map) throws SQLException { // Haven't had time to research what exactly needs to be stored in the // map, so I'm defaulting this to 'getObject' return getObject(i); } public Ref getRef(int i) throws SQLException { // Interesting idea this... Can't really see the applications of it // though unless you dealing with a really big cell and you just want to // pass around the reference rather than the actual cell contents. // Easy to fudge an implementation for this if an application needs it. throw MSQLException.unsupported(); } public Blob getBlob(int i) throws SQLException { // I'm assuming we must return 'null' for a null blob.... Object ob = getRawColumn(i); if (ob != null) { try { return asBlob(ob); } catch (ClassCastException e) { throw new SQLException("Column " + i + " is not a binary column."); } } return null; } public Clob getClob(int i) throws SQLException { // I'm assuming we must return 'null' for a null clob.... Object ob = getRawColumn(i); if (ob != null) { try { return asClob(ob); } catch (ClassCastException e) { throw new SQLException("Column " + i + " is not a character column."); } } return null; } public Array getArray(int i) throws SQLException { // Arrays not available in database... throw MSQLException.unsupported(); } public Object getObject(String colName, java.util.Map map) throws SQLException { // Haven't had time to research what exactly needs to be stored in the // map, so I'm defaulting this to 'getObject' return getObject(colName); } public Ref getRef(String colName) throws SQLException { throw MSQLException.unsupported(); } public Blob getBlob(String colName) throws SQLException { return getBlob(findColumnIndex(colName)); } public Clob getClob(String colName) throws SQLException { return getClob(findColumnIndex(colName)); } public Array getArray(String colName) throws SQLException { throw MSQLException.unsupported(); } public java.sql.Date getDate(int columnIndex, Calendar cal) throws SQLException { return getDate(columnIndex); } public java.sql.Date getDate(String columnName, Calendar cal) throws SQLException { return getDate(columnName); } public java.sql.Time getTime(int columnIndex, Calendar cal) throws SQLException { return getTime(columnIndex); } public java.sql.Time getTime(String columnName, Calendar cal) throws SQLException { return getTime(columnName); } public java.sql.Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { return getTimestamp(columnIndex); } public java.sql.Timestamp getTimestamp(String columnName, Calendar cal) throws SQLException { return getTimestamp(columnName); } //#ENDIF //#IFDEF(JDBC3.0) //-------------------------- JDBC 3.0 ---------------------------------------- public java.net.URL getURL(int columnIndex) throws SQLException { throw MSQLException.unsupported(); } public java.net.URL getURL(String columnName) throws SQLException { throw MSQLException.unsupported(); } public void updateRef(int columnIndex, java.sql.Ref x) throws SQLException { throw MSQLException.unsupported(); } public void updateRef(String columnName, java.sql.Ref x) throws SQLException { throw MSQLException.unsupported(); } public void updateBlob(int columnIndex, java.sql.Blob x) throws SQLException { throw MSQLException.unsupported(); } public void updateBlob(String columnName, java.sql.Blob x) throws SQLException { throw MSQLException.unsupported(); } public void updateClob(int columnIndex, java.sql.Clob x) throws SQLException { throw MSQLException.unsupported(); } public void updateClob(String columnName, java.sql.Clob x) throws SQLException { throw MSQLException.unsupported(); } public void updateArray(int columnIndex, java.sql.Array x) throws SQLException { throw MSQLException.unsupported(); } public void updateArray(String columnName, java.sql.Array x) throws SQLException { throw MSQLException.unsupported(); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public RowId getRowId(int columnIndex) throws SQLException { throw MSQLException.unsupported16(); } public RowId getRowId(String columnLabel) throws SQLException { throw MSQLException.unsupported16(); } public void updateRowId(int columnIndex, RowId x) throws SQLException { throw MSQLException.unsupported16(); } public void updateRowId(String columnLabel, RowId x) throws SQLException { throw MSQLException.unsupported16(); } public int getHoldability() throws SQLException { return ResultSet.HOLD_CURSORS_OVER_COMMIT; } public boolean isClosed() throws SQLException { return getResultID() == -1; } public void updateNString(int columnIndex, String nString) throws SQLException { throw MSQLException.unsupported16(); } public void updateNString(String columnLabel, String nString) throws SQLException { throw MSQLException.unsupported16(); } public void updateNClob(int columnIndex, NClob nClob) throws SQLException { throw MSQLException.unsupported16(); } public void updateNClob(String columnLabel, NClob nClob) throws SQLException { throw MSQLException.unsupported16(); } public NClob getNClob(int columnIndex) throws SQLException { throw MSQLException.unsupported16(); } public NClob getNClob(String columnLabel) throws SQLException { throw MSQLException.unsupported16(); } public SQLXML getSQLXML(int columnIndex) throws SQLException { throw MSQLException.unsupported16(); } public SQLXML getSQLXML(String columnLabel) throws SQLException { throw MSQLException.unsupported16(); } public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { throw MSQLException.unsupported16(); } public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { throw MSQLException.unsupported16(); } public String getNString(int columnIndex) throws SQLException { throw MSQLException.unsupported16(); } public String getNString(String columnLabel) throws SQLException { throw MSQLException.unsupported16(); } public Reader getNCharacterStream(int columnIndex) throws SQLException { throw MSQLException.unsupported16(); } public Reader getNCharacterStream(String columnLabel) throws SQLException { throw MSQLException.unsupported16(); } public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { throw MSQLException.unsupported16(); } public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { throw MSQLException.unsupported16(); } public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { throw MSQLException.unsupported16(); } public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { throw MSQLException.unsupported16(); } public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { throw MSQLException.unsupported16(); } public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { throw MSQLException.unsupported16(); } public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { throw MSQLException.unsupported16(); } public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { throw MSQLException.unsupported16(); } public void updateClob(int columnIndex, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void updateClob(String columnLabel, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void updateNClob(int columnIndex, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void updateNClob(String columnLabel, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public Object unwrap(Class iface) throws SQLException { throw MSQLException.unsupported16(); } public boolean isWrapperFor(Class iface) throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF //#IFDEF(JDBC5.0) // -------------------------- JDK 1.7 ----------------------------------- public Object getObject(int columnIndex, Class type) throws SQLException { throw MSQLException.unsupported16(); } public Object getObject(String columnLabel, Class type) throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF // ---------- Finalize ---------- public void finalize() { dispose(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MResultSetMetaData.java000066400000000000000000000243351330501023400271410ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MResultSetMetaData 23 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.global.ColumnDescription; import com.mckoi.database.global.SQLTypes; import java.sql.*; import java.math.BigDecimal; /** * An implementation of JDBC's ResultSetmetaData. * * @author Tobias Downer */ public class MResultSetMetaData implements ResultSetMetaData { /** * The parent MResultSet object. */ private MResultSet result_set; /** * Constructs the ResultSetMetaData over the given result set. */ MResultSetMetaData(MResultSet result_set) { this.result_set = result_set; } /** * Returns the object class that a given sql_type will produce by the * 'getObject' call in ResultSet. */ private static Class jdbcSQLClass(int sql_type) { switch (sql_type) { case(SQLTypes.BIT): return Boolean.class; case(SQLTypes.TINYINT): return Byte.class; case(SQLTypes.SMALLINT): return Short.class; case(SQLTypes.INTEGER): return Integer.class; case(SQLTypes.BIGINT): return Long.class; case(SQLTypes.FLOAT): return Double.class; case(SQLTypes.REAL): return Float.class; case(SQLTypes.DOUBLE): return Double.class; case(SQLTypes.NUMERIC): return BigDecimal.class; case(SQLTypes.DECIMAL): return BigDecimal.class; case(SQLTypes.CHAR): return String.class; case(SQLTypes.VARCHAR): return String.class; case(SQLTypes.LONGVARCHAR): return String.class; case(SQLTypes.DATE): return java.sql.Date.class; case(SQLTypes.TIME): return java.sql.Time.class; case(SQLTypes.TIMESTAMP): return java.sql.Timestamp.class; case(SQLTypes.BINARY): return byte[].class; case(SQLTypes.VARBINARY): return byte[].class; case(SQLTypes.LONGVARBINARY): return byte[].class; case(SQLTypes.NULL): return Object.class; case(SQLTypes.OTHER): return Object.class; case(SQLTypes.JAVA_OBJECT): return Object.class; case(SQLTypes.DISTINCT): // (Not supported) return Object.class; case(SQLTypes.STRUCT): // (Not supported) return Object.class; case(SQLTypes.ARRAY): // (Not supported) return Object.class; //#IFDEF(JDBC2.0) case(SQLTypes.BLOB): return java.sql.Blob.class; case(SQLTypes.CLOB): return java.sql.Clob.class; case(SQLTypes.REF): // (Not supported) return Object.class; //#ENDIF default: return Object.class; } } // ---------- Implemented from ResultSetMetaData ---------- public int getColumnCount() throws SQLException { return result_set.columnCount(); } public boolean isAutoIncrement(int column) throws SQLException { // There are no hard-coded auto increment columns but you can make one // with the UNIQUEKEY function. return false; } public boolean isCaseSensitive(int column) throws SQLException { return true; } public boolean isSearchable(int column) throws SQLException { return result_set.getColumn(column -1).isQuantifiable(); } public boolean isCurrency(int column) throws SQLException { // Currency not supported by the driver or the database. return false; } public int isNullable(int column) throws SQLException { if (result_set.getColumn(column - 1).isNotNull()) { return columnNoNulls; } else { return columnNullable; } } public boolean isSigned(int column) throws SQLException { // There are no unsigned numbers.... if (result_set.getColumn(column - 1).isNumericType()) { return true; } else { // All other types aren't signed (strings, dates, etc) return false; } } public int getColumnDisplaySize(int column) throws SQLException { // How can we implement this when strings and numbers // can be any length? return 64; } public String getColumnLabel(int column) throws SQLException { // ISSUE: Should this process be cached? Could be a problem if this // method is used in an inner loop. (A string object is created) String encoded_name = result_set.getColumn(column - 1).getName(); if (encoded_name.startsWith("@a")) { // Strip any control characters and return return encoded_name.substring(2); } else if (encoded_name.startsWith("@f")) { // Return only the column name, not the schema.table part int p = encoded_name.lastIndexOf("."); if (p > -1) { return encoded_name.substring(p + 1); } else { return encoded_name.substring(2); } } // No encoding (must be an older version of the database engine). return encoded_name; } public String getColumnName(int column) throws SQLException { // If the JDBC driver is set to succinct column names (the default) then // return what 'getColumnLabel' tells us. if (!result_set.verboseColumnNames()) { return getColumnLabel(column); } else { // ISSUE: Should this process be cached? Could be a problem if this // method is used in an inner loop. (A string object is created) String encoded_name = result_set.getColumn(column - 1).getName(); if (encoded_name.startsWith("@")) { // Strip any control characters and return return encoded_name.substring(2); } // No encoding (must be an older version of the database engine). return encoded_name; } } public String getSchemaName(int column) throws SQLException { ColumnDescription col = result_set.getColumn(column - 1); String name = col.getName(); // Do we have a column code. If not default to 'f' char col_code = 'f'; int name_start = 0; if (name.startsWith("@")) { col_code = name.charAt(1); name_start = 2; } if (col_code == 'a') { // This is an alias so there is no table name return ""; } else if (col_code == 'f') { // Assume it is [schema_name].[table_name].[column_name] int delim = name.lastIndexOf("."); if (delim == -1) { return ""; } else { delim = name.lastIndexOf(".", delim - 1); if (delim == -1) { return ""; } else { int end_point = delim; delim = name.lastIndexOf(".", delim - 1); if (delim == -1) { return name.substring(name_start, end_point); } else { return name.substring(delim + 1, end_point); } } } } else { throw new SQLException("Unknown column code: '" + col_code + "'"); } } public int getPrecision(int column) throws SQLException { // HACK: Precision is not a property we define for columns yet.... // For *CHAR columns, we make this return the max size of the string int size = result_set.getColumn(column - 1).getSize(); if (size == -1) { size = 32; } return size; } public int getScale(int column) throws SQLException { int scale = result_set.getColumn(column - 1).getScale(); if (scale == -1) { scale = 0; } return scale; } public String getTableName(int column) throws SQLException { ColumnDescription col = result_set.getColumn(column - 1); String name = col.getName(); // Do we have a column code. If not default to 'f' char col_code = 'f'; int name_start = 0; if (name.startsWith("@")) { col_code = name.charAt(1); name_start = 2; } if (col_code == 'a') { // This is an alias so there is no table name return ""; } else if (col_code == 'f') { // Assume it is [schema_name].[table_name].[column_name] int delim = name.lastIndexOf("."); if (delim == -1) { return ""; } else { int end_point = delim; delim = name.lastIndexOf(".", end_point - 1); if (delim == -1) { return name.substring(name_start, end_point); } else { return name.substring(delim + 1, end_point); } } } else { throw new SQLException("Unknown column code: '" + col_code + "'"); } } public String getCatalogName(int column) throws SQLException { // No support for catalogs return ""; } public int getColumnType(int column) throws SQLException { return result_set.getColumn(column - 1).getSQLType(); } public String getColumnTypeName(int column) throws SQLException { return result_set.getColumn(column - 1).getSQLTypeName(); } public boolean isReadOnly(int column) throws SQLException { return false; } public boolean isWritable(int column) throws SQLException { return true; } public boolean isDefinitelyWritable(int column) throws SQLException { return false; } //#IFDEF(JDBC2.0) //--------------------------JDBC 2.0----------------------------------- public String getColumnClassName(int column) throws SQLException { // PENDING: This should return the instance class name set as the // constraint for a JAVA_OBJECT column. return jdbcSQLClass(result_set.getColumn(column - 1).getSQLType()).getName(); // return result_set.getColumn(column - 1).classType().getName(); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public Object unwrap(Class iface) throws SQLException { throw MSQLException.unsupported(); } public boolean isWrapperFor(Class iface) throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MSQLException.java000066400000000000000000000077161330501023400261300ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MSQLException 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.SQLException; import java.io.*; /** * SQLException used by the McKoi database engine. * * @author Tobias Downer */ public class MSQLException extends SQLException { private String server_error_msg; private String server_stack_trace; public MSQLException(String reason, String SQLState, int vendorCode) { super(reason, SQLState, vendorCode); } public MSQLException(String reason, String SQLState) { super(reason, SQLState); } public MSQLException(String reason) { super(reason); } public MSQLException() { super(); } /** * MSQL Specific. This stores the reason, the server exception message, and * the server stack trace. */ public MSQLException(String reason, String server_error_msg, int vendor_code, Throwable server_error) { super(reason, null, vendor_code); this.server_error_msg = server_error_msg; if (server_error != null) { StringWriter writer = new StringWriter(); server_error.printStackTrace(new PrintWriter(writer)); this.server_stack_trace = writer.toString(); } else { this.server_stack_trace = "<< NO SERVER STACK TRACE >>"; } } /** * MSQL Specific. This stores the reason, the server exception message, and * the server stack trace as a string. */ public MSQLException(String reason, String server_error_msg, int vendor_code, String server_error_trace) { super(reason, null, vendor_code); this.server_error_msg = server_error_msg; this.server_stack_trace = server_error_trace; } /** * Returns the error message that generated this exception. */ public String getServerErrorMsg() { return server_error_msg; } /** * Returns the server side stack trace for this error. */ public String getServerErrorStackTrace() { return server_stack_trace; } /** * Overwrites the print stack trace information with some more detailed * information about the error. */ public void printStackTrace() { printStackTrace(System.err); } /** * Overwrites the print stack trace information with some more detailed * information about the error. */ public void printStackTrace(PrintStream s) { synchronized(s) { super.printStackTrace(s); if (server_stack_trace != null) { s.print("CAUSE: "); s.println(server_stack_trace); } } } /** * Overwrites the print stack trace information with some more detailed * information about the error. */ public void printStackTrace(PrintWriter s) { synchronized(s) { super.printStackTrace(s); if (server_stack_trace != null) { s.print("CAUSE: "); s.println(server_stack_trace); } } } /** * Returns an SQLException that is used for all unsupported features of the * JDBC driver. */ public static SQLException unsupported() { return new MSQLException("Not Supported"); } //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- /** * Generates the feature not supported exception. */ public static SQLException unsupported16() { return new java.sql.SQLFeatureNotSupportedException(); } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MStatement.java000066400000000000000000000434121330501023400255470ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MStatement 20 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.global.StreamableObject; import java.io.*; import java.sql.*; import java.util.ArrayList; import java.util.List; /** * An implementation of JDBC Statement. *

* Multi-threaded issue: This class is not designed to be multi-thread * safe. A Statement should not be accessed by concurrent threads. * * @author Tobias Downer */ class MStatement implements Statement { /** * The MConnection object for this statement. */ private MConnection connection; /** * The list of all MResultSet objects that represents the results of a query. */ private MResultSet[] result_set_list; private int max_field_size; private int max_row_count; private int query_timeout; private int fetch_size; private SQLWarning head_warning; private boolean escape_processing; /** * The list of queries to execute in a batch. */ private List batch_list; /** * The list of streamable objects created via the 'createStreamableObject' * method. */ private List streamable_object_list; /** * For multiple result sets, the index of the result set we are currently on. */ private int multi_result_set_index; /** * Constructs the statement. */ MStatement(MConnection connection) { this.connection = connection; this.escape_processing = true; } /** * Adds a new SQLWarning to the chain. */ final void addSQLWarning(SQLWarning warning) { if (head_warning == null) { head_warning = warning; } else { head_warning.setNextWarning(warning); } } /** * Returns an array of ResultSet objects of the give length for this * statement. This is intended for multiple result queries (such as batch * statements). */ final MResultSet[] internalResultSetList(int count) { if (count <= 0) { throw new Error("'count' must be > 0"); } if (result_set_list != null && result_set_list.length != count) { // Dispose all the ResultSet objects currently open. for (int i = 0; i < result_set_list.length; ++i) { result_set_list[i].dispose(); } result_set_list = null; } if (result_set_list == null) { result_set_list = new MResultSet[count]; for (int i = 0; i < count; ++i) { result_set_list[i] = new MResultSet(connection, this); } } return result_set_list; } /** * Returns the single ResultSet object for this statement. This should only * be used for single result queries. */ final MResultSet internalResultSet() { return internalResultSetList(1)[0]; } /** * Generates a new StreamableObject and stores it in the hold for future * access by the server. */ protected StreamableObject createStreamableObject(InputStream x, int length, byte type) { StreamableObject s_ob = connection.createStreamableObject(x, length, type); if (streamable_object_list == null) { streamable_object_list = new ArrayList(); } streamable_object_list.add(s_ob); return s_ob; } /** * Adds a query to the batch of queries executed by this statement. */ protected void addBatch(SQLQuery query) { if (batch_list == null) { batch_list = new ArrayList(); } batch_list.add(query); } /** * Executes the given SQLQuery object and fill's in at most the top 10 * entries of the result set. */ protected MResultSet executeQuery(SQLQuery query) throws SQLException { // Get the local result set MResultSet result_set = internalResultSet(); // Execute the query executeQueries(new SQLQuery[] { query }); // Return the result set return result_set; } /** * Executes a batch of SQL queries as listed as an array. */ protected MResultSet[] executeQueries(SQLQuery[] queries) throws SQLException { // Allocate the result set for this batch MResultSet[] results = internalResultSetList(queries.length); // Reset the result set index multi_result_set_index = 0; // For each query, for (int i = 0; i < queries.length; ++i) { // Prepare the query queries[i].prepare(escape_processing); // Make sure the result set is closed results[i].closeCurrentResult(); } // Execute each query connection.executeQueries(queries, results); // Post processing on the ResultSet objects for (int i = 0; i < queries.length; ++i) { MResultSet result_set = results[i]; // Set the fetch size result_set.setFetchSize(fetch_size); // Set the max row count result_set.setMaxRowCount(max_row_count); // Does the result set contain large objects? We can't cache a // result that contains binary data. boolean contains_large_objects = result_set.containsLargeObjects(); // If the result row count < 40 then download and store locally in the // result set and dispose the resources on the server. if (!contains_large_objects && result_set.rowCount() < 40) { result_set.storeResultLocally(); } else { result_set.updateResultPart(0, Math.min(10, result_set.rowCount())); } } return results; } // ---------- Implemented from Statement ---------- public ResultSet executeQuery(String sql) throws SQLException { return executeQuery(new SQLQuery(sql)); } public int executeUpdate(String sql) throws SQLException { MResultSet result_set = executeQuery(new SQLQuery(sql)); return result_set.intValue(); // Throws SQL error if not 1 col 1 row } public void close() throws SQLException { // Behaviour of calls to Statement undefined after this method finishes. if (result_set_list != null) { for (int i = 0; i < result_set_list.length; ++i) { result_set_list[i].dispose(); } result_set_list = null; } // Remove any streamable objects that have been created on the client // side. if (streamable_object_list != null) { int sz = streamable_object_list.size(); for (int i = 0; i < sz; ++i) { StreamableObject s_object = (StreamableObject) streamable_object_list.get(i); connection.removeStreamableObject(s_object); } streamable_object_list = null; } } //---------------------------------------------------------------------- public int getMaxFieldSize() throws SQLException { // Are there limitations here? Strings can be any size... return max_field_size; } public void setMaxFieldSize(int max) throws SQLException { if (max >= 0) { max_field_size = max; } else { throw new SQLException("MaxFieldSize negative."); } } public int getMaxRows() throws SQLException { return max_row_count; } public void setMaxRows(int max) throws SQLException { if (max >= 0) { max_row_count = max; } else { throw new SQLException("MaxRows negative."); } } public void setEscapeProcessing(boolean enable) throws SQLException { escape_processing = enable; } public int getQueryTimeout() throws SQLException { return query_timeout; } public void setQueryTimeout(int seconds) throws SQLException { if (seconds >= 0) { query_timeout = seconds; // Hack: We set the global query timeout for the driver in this VM. // This global value is used in RemoteDatabaseInterface. // // This is a nasty 'global change' hack. A developer may wish to // set a long timeout for one statement and a short for a different // one however the timeout for all queries will be the very last time // out set by any statement. Unfortunately to fix this problem, we'll // need to make a revision to the DatabaseInterface interface. I // don't think this is worth doing because I don't see this as being a // major limitation of the driver. MDriver.QUERY_TIMEOUT = seconds; } else { throw new SQLException("Negative query timout."); } } public void cancel() throws SQLException { if (result_set_list != null) { for (int i = 0; i < result_set_list.length; ++i) { connection.disposeResult(result_set_list[i].getResultID()); } } } public SQLWarning getWarnings() throws SQLException { return head_warning; } public void clearWarnings() throws SQLException { head_warning = null; } public void setCursorName(String name) throws SQLException { // Cursors not supported... } //----------------------- Multiple Results -------------------------- // NOTE: Mckoi database doesn't support multiple result sets. I think multi- // result sets are pretty nasty anyway - are they really necessary? // We do support the 'Multiple Results' interface for 1 result set. public boolean execute(String sql) throws SQLException { MResultSet result_set = executeQuery(new SQLQuery(sql)); return !result_set.isUpdate(); } public ResultSet getResultSet() throws SQLException { if (result_set_list != null) { if (multi_result_set_index < result_set_list.length) { return result_set_list[multi_result_set_index]; } } return null; } public int getUpdateCount() throws SQLException { if (result_set_list != null) { if (multi_result_set_index < result_set_list.length) { MResultSet rs = result_set_list[multi_result_set_index]; if (rs.isUpdate()) { return rs.intValue(); } } } return -1; } public boolean getMoreResults() throws SQLException { // If we are at the end then return false if (result_set_list == null || multi_result_set_index >= result_set_list.length) { return false; } // Move to the next result set. ++multi_result_set_index; // We successfully moved to the next result return true; } //--------------------------JDBC 2.0----------------------------- // NOTE: These methods are provided as extensions for the JDBC 1.0 driver. public void setFetchSize(int rows) throws SQLException { if (rows >= 0) { fetch_size = rows; } else { throw new SQLException("Negative fetch size."); } } public int getFetchSize() throws SQLException { return fetch_size; } //#IFDEF(JDBC2.0) public void setFetchDirection(int direction) throws SQLException { // We could use this hint to improve cache hits..... } public int getFetchDirection() throws SQLException { return ResultSet.FETCH_UNKNOWN; } public int getResultSetConcurrency() throws SQLException { // Read only I'm afraid... return ResultSet.CONCUR_READ_ONLY; } public int getResultSetType() throws SQLException { // Scroll insensitive operation only... return ResultSet.TYPE_SCROLL_INSENSITIVE; } public void addBatch(String sql) throws SQLException { addBatch(new SQLQuery(sql)); } public void clearBatch() throws SQLException { batch_list = null; } public int[] executeBatch() throws SQLException { // Execute the batch, if (batch_list == null) { // Batch list is empty - nothing to do throw new SQLException("Batch list is empty - nothing to do."); } int sz = batch_list.size(); SQLQuery[] batch_query_list = new SQLQuery[sz]; for (int i = 0; i < sz; ++i) { batch_query_list[i] = (SQLQuery) batch_list.get(i); } try { // Execute the batch and find the results in the resultant array MResultSet[] batch_results = executeQueries(batch_query_list); // Put the result into an update array int[] update_result = new int[sz]; for (int i = 0; i < sz; ++i) { update_result[i] = batch_results[i].intValue(); batch_results[i].closeCurrentResult(); } return update_result; } finally { // Make sure we clear the batch list. clearBatch(); } } public Connection getConnection() throws SQLException { return connection; } //#ENDIF //#IFDEF(JDBC3.0) //--------------------------JDBC 3.0----------------------------- public boolean getMoreResults(int current) throws SQLException { return getMoreResults(); } public ResultSet getGeneratedKeys() throws SQLException { throw MSQLException.unsupported(); } public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { throw MSQLException.unsupported(); } public int executeUpdate(String sql, int columnIndexes[]) throws SQLException { throw MSQLException.unsupported(); } public int executeUpdate(String sql, String columnNames[]) throws SQLException { throw MSQLException.unsupported(); } public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { throw MSQLException.unsupported(); } public boolean execute(String sql, int columnIndexes[]) throws SQLException { throw MSQLException.unsupported(); } public boolean execute(String sql, String columnNames[]) throws SQLException { throw MSQLException.unsupported(); } public int getResultSetHoldability() throws SQLException { // In Mckoi, all cursors may be held over commit. return ResultSet.HOLD_CURSORS_OVER_COMMIT; } //#ENDIF //#IFDEF(JDK1.6) // -------------------------- JDK 1.6 ----------------------------------- public boolean isClosed() throws SQLException { return result_set_list == null; } public void setPoolable(boolean poolable) throws SQLException { } public boolean isPoolable() throws SQLException { return true; } public Object unwrap(Class iface) throws SQLException { throw MSQLException.unsupported16(); } public boolean isWrapperFor(Class iface) throws SQLException { throw MSQLException.unsupported16(); } public void setRowId(int parameterIndex, RowId x) throws SQLException { throw MSQLException.unsupported16(); } public void setNString(int parameterIndex, String value) throws SQLException { throw MSQLException.unsupported16(); } public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setNClob(int parameterIndex, NClob value) throws SQLException { throw MSQLException.unsupported16(); } public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { throw MSQLException.unsupported16(); } public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { throw MSQLException.unsupported16(); } public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { throw MSQLException.unsupported16(); } public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { throw MSQLException.unsupported16(); } public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { throw MSQLException.unsupported16(); } public void setClob(int parameterIndex, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { throw MSQLException.unsupported16(); } public void setNClob(int parameterIndex, Reader reader) throws SQLException { throw MSQLException.unsupported16(); } //#ENDIF //#IFDEF(JDBC5.0) // -------------------------- JDK 1.7 ----------------------------------- public void closeOnCompletion() throws SQLException { // This is a no-op in Mckoi. // The reason being that no resources are consumed when all the result // sets created by a statement are closed. Therefore it's safe to have // this be a 'no-op'. } public boolean isCloseOnCompletion() throws SQLException { return false; } //#ENDIF // ---------- Finalize ---------- /** * The statement will close when it is garbage collected. */ public void finalize() { try { close(); } catch (SQLException e) { /* ignore */ } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MStreamableBlob.java000066400000000000000000000073471330501023400264700ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MStreamableBlob 22 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.Blob; import java.sql.SQLException; /** * A Blob that is a large object that may be streamed from the server directly * to this object. A blob that is streamable is only alive for the lifetime of * the result set it is part of. If the underlying result set that contains * this streamable blob is closed then this blob is no longer valid. * * @author Tobias Downer */ class MStreamableBlob extends AbstractStreamableObject implements Blob { /** * Constructs the blob. */ MStreamableBlob(MConnection connection, int result_set_id, byte type, long streamable_object_id, long size) { super(connection, result_set_id, type, streamable_object_id, size); } // ---------- Implemented from Blob ---------- public long length() throws SQLException { return rawSize(); } public byte[] getBytes(long pos, int length) throws SQLException { // First byte is at position 1 according to JDBC Spec. --pos; if (pos < 0 || pos + length > length()) { throw new SQLException("Out of bounds."); } // The buffer we are reading into byte[] buf = new byte[length]; InputStream i_stream = getBinaryStream(); try { i_stream.skip(pos); for (int i = 0; i < length; ++i) { buf[i] = (byte) i_stream.read(); } } catch (IOException e) { e.printStackTrace(System.err); throw new SQLException("IO Error: " + e.getMessage()); } return buf; } public InputStream getBinaryStream() throws SQLException { return new StreamableObjectInputStream(rawSize()); } public long position(byte[] pattern, long start) throws SQLException { throw MSQLException.unsupported(); } public long position(Blob pattern, long start) throws SQLException { throw MSQLException.unsupported(); } //#IFDEF(JDBC3.0) // -------------------------- JDBC 3.0 ----------------------------------- public int setBytes(long pos, byte[] bytes) throws SQLException { throw MSQLException.unsupported(); } public int setBytes(long pos, byte[] bytes, int offset, int len) throws SQLException { throw MSQLException.unsupported(); } public java.io.OutputStream setBinaryStream(long pos) throws SQLException { throw MSQLException.unsupported(); } public void truncate(long len) throws SQLException { throw MSQLException.unsupported(); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public void free() throws SQLException { } public InputStream getBinaryStream(long pos, long length) throws SQLException { long end = pos + length; if (end > rawSize() || end < 0) { throw new java.lang.IndexOutOfBoundsException(); } InputStream is = new StreamableObjectInputStream(end); try { is.skip(pos); } catch (IOException e) { throw new SQLException(e); } return is; } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MStreamableClob.java000066400000000000000000000112311330501023400264540ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MStreamableClob 31 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.Clob; import java.sql.SQLException; /** * A Clob that is a large object that may be streamed from the server directly * to this object. A clob that is streamable is only alive for the lifetime of * the result set it is part of. If the underlying result set that contains * this streamable clob is closed then this clob is no longer valid. * * @author Tobias Downer */ class MStreamableClob extends AbstractStreamableObject implements Clob { /** * Constructs the Clob. */ MStreamableClob(MConnection connection, int result_set_id, byte type, long streamable_object_id, long size) { super(connection, result_set_id, type, streamable_object_id, size); } // ---------- Implemented from Blob ---------- public long length() throws SQLException { if (getType() == 4) { return rawSize() / 2; } return rawSize(); } public String getSubString(long pos, int length) throws SQLException { int p = (int) (pos - 1); Reader reader = getCharacterStream(); try { reader.skip(p); StringBuffer buf = new StringBuffer(length); for (int i = 0; i < length; ++i) { int c = reader.read(); buf.append((char) c); } return new String(buf); } catch (IOException e) { e.printStackTrace(System.err); throw new SQLException("IO Error: " + e.getMessage()); } } public Reader getCharacterStream() throws SQLException { if (getType() == 3) { return new AsciiReader(new StreamableObjectInputStream(rawSize())); } else if (getType() == 4) { return new BinaryToUnicodeReader( new StreamableObjectInputStream(rawSize())); } else { throw new SQLException("Unknown type."); } } public java.io.InputStream getAsciiStream() throws SQLException { if (getType() == 3) { return new StreamableObjectInputStream(rawSize()); } else if (getType() == 4) { return new AsciiInputStream(getCharacterStream()); } else { throw new SQLException("Unknown type."); } } public long position(String searchstr, long start) throws SQLException { throw MSQLException.unsupported(); } public long position(Clob searchstr, long start) throws SQLException { throw MSQLException.unsupported(); } //#IFDEF(JDBC3.0) //---------------------------- JDBC 3.0 ----------------------------------- public int setString(long pos, String str) throws SQLException { throw MSQLException.unsupported(); } public int setString(long pos, String str, int offset, int len) throws SQLException { throw MSQLException.unsupported(); } public java.io.OutputStream setAsciiStream(long pos) throws SQLException { throw MSQLException.unsupported(); } public java.io.Writer setCharacterStream(long pos) throws SQLException { throw MSQLException.unsupported(); } public void truncate(long len) throws SQLException { throw MSQLException.unsupported(); } //#ENDIF //#IFDEF(JDBC4.0) // -------------------------- JDK 1.6 ----------------------------------- public void free() throws SQLException { } public Reader getCharacterStream(long pos, long length) throws SQLException { if (getType() == 3) { long raw_end = pos + length; if (raw_end > rawSize() || raw_end < 0) { throw new java.lang.IndexOutOfBoundsException(); } return new AsciiReader(new StreamableObjectInputStream(rawSize())); } else if (getType() == 4) { long raw_end = pos + (length * 2); if (raw_end > rawSize() || raw_end < 0) { throw new java.lang.IndexOutOfBoundsException(); } return new BinaryToUnicodeReader( new StreamableObjectInputStream(rawSize())); } else { throw new SQLException("Unknown type."); } } //#ENDIF } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/MckoiConnection.java000066400000000000000000000122761330501023400265540ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.MckoiConnection 04 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.*; /** * Wraps a Connection and provides Mckoi specific extensions that are * outside the JDBC specification. *

* Example, *

 * Connection connection = java.sql.DriverManager.getConnection( .... );
 * MckoiConnection mckoi_connection = new MckoiConnection(connection);
 * // 'mckoi_connection' is used for mckoi specific comms.
 * 
* * @author Tobias Downer */ public final class MckoiConnection { /** * The wrapped MConnection. */ private MConnection connection; /** * Constructs the Mckoi specific extension access object. */ public MckoiConnection(Connection connection) { if (connection instanceof MConnection) { this.connection = (MConnection) connection; } else { throw new Error("Can only wrap a Mckoi Database JDBC connection."); } } /** * This method can be used to disable strict get object in ResultSet. If * strict get object is disabled then the 'getObject' method will return the * raw data type that the engine uses to represent the respective data * item. If it is enabled the 'getObject' method returns the correct type * as specified by the JDBC spec. *

* Strict get is enabled by default. */ public void setStrictGetObject(boolean status) { connection.setStrictGetObject(status); } /** * This method is used to enable verbose column names in ResultSetMetaData. * If verbose column names is enabled the getColumnName method returns * a string which includes the schema and table name. This property is * disabled by default and provided only for compatibility with older * Mckoi applications. */ public void setVerboseColumnNames(boolean status) { connection.setVerboseColumnNames(status); } /** * Registers a TriggerListener to listen for any triggers that are fired * with the given name. A TriggerListener may be registered to listen for * multiple database triggers. *

* NOTE: All trigger events are fired on a dedicated trigger thread. All * triggers are fired from this thread in sequence. * * @param trigger_name the name of the database trigger to listen for. * @param trigger_listener the listener to be notified when the trigger * event occurs. */ public void addTriggerListener(String trigger_name, TriggerListener trigger_listener) { connection.addTriggerListener(trigger_name, trigger_listener); } /** * Removes a TriggerListener that is listening for triggers with the given * name. * * @param trigger_name the name of the database trigger to stop listening * for. * @param trigger_listener the listener to stop being notified of trigger * events for this trigger name. */ public void removeTriggerListener(String trigger_name, TriggerListener trigger_listener) { connection.removeTriggerListener(trigger_name, trigger_listener); } // ---------- Static methods ---------- /** * Given a string, this will use escape codes to convert the Java string into * a Mckoi SQL string that can be parsed correctly by the database. * For example;

*

   *   String user_input = [some untrusted string]
   *   Statement statement = connection.createStatement();
   *   ResultSet result = statement.executeQuery(
   *         "SELECT number FROM Part WHERE number = " +
   *         MckoiConnection.quote(user_input));
   * 
* If the user supplies the string "Gr's\nut\'", this method will generate * the SQL query string;

*

   *   SELECT number FROM Part WHERE number = 'Gr\'s\\nut\\\''
   * 
* This is used for generating secure dynamic SQL commands. It is * particularly important if the quoted strings are coming from an untrusted * source. *

* This security precaution is not necessary if using PreparedStatement to * form the SQL parameters. */ public static String quote(String java_string) { StringBuffer buf = new StringBuffer(); int str_len = java_string.length(); for (int i = 0; i < str_len; ++i) { char c = java_string.charAt(i); if (c == '\'' || c == '\\') { buf.append('\\'); } if (c == '\n') { buf.append("\\n"); } else if (c == '\r') { buf.append("\\r"); } else if (c == '\t') { buf.append("\\t"); } else { buf.append(c); } } return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/ProtocolConstants.java000066400000000000000000000056531330501023400271710ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.ProtocolConstants 20 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; /** * Constants used in the JDBC database communication protocol. * * @author Tobias Downer */ public interface ProtocolConstants { /** * Sent as an acknowledgement to a command. */ public int ACKNOWLEDGEMENT = 5; /** * Sent if login passed. */ public int USER_AUTHENTICATION_PASSED = 10; /** * Sent if login failed because username or password were invalid. */ public int USER_AUTHENTICATION_FAILED = 15; /** * Operation was successful. */ public int SUCCESS = 20; /** * Operation failed (followed by a UTF String error message). */ public int FAILED = 25; /** * Operation threw an exception. */ public int EXCEPTION = 30; /** * There was an authentication error. A query couldn't be executed because * the user does not have enough rights. */ public int AUTHENTICATION_ERROR = 35; // ---------- Commands ---------- /** * Query sent to the server for processing. */ public int QUERY = 50; /** * Disposes the server-side resources associated with a result. */ public int DISPOSE_RESULT = 55; /** * Requests a section of a result from the server. */ public int RESULT_SECTION = 60; /** * Requests a section of a streamable object from the server. */ public int STREAMABLE_OBJECT_SECTION = 61; /** * Disposes of the resources associated with a streamable object on the * server. */ public int DISPOSE_STREAMABLE_OBJECT = 62; /** * For pushing a part of a streamable object onto the server from the client. */ public int PUSH_STREAMABLE_OBJECT_PART = 63; /** * Ping command. */ public int PING = 65; /** * Closes the protocol stream. */ public int CLOSE = 70; /** * Denotes an event from the database (trigger, etc). */ public int DATABASE_EVENT = 75; /** * Denotes a server side request for information. For example, a request for * a part of a streamable object. */ public int SERVER_REQUEST = 80; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/QueryResponse.java000066400000000000000000000036771330501023400263230ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.QueryResponse 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.database.global.ColumnDescription; /** * The response to a query executed via the 'execQuery' method in the * DatabaseInterface interface. This contains general information about the * result of the query. * * @author Tobias Downer */ public interface QueryResponse { /** * Returns a number that identifies this query within the set of queries * executed on the connection. This is used for identifying this query * in subsequent operations. */ int getResultID(); /** * The time, in milliseconds, that the query took to execute. */ int getQueryTimeMillis(); /** * The total number of rows in the query result. This is known ahead of * time, even if no data in the query has been accessed. */ int getRowCount(); /** * The number of columns in the query result. */ int getColumnCount(); /** * The ColumnDescription object that describes column 'n' in the result. 0 * is the first column, 1 is the second column, etc. */ ColumnDescription getColumnDescription(int column); /** * Returns any warnings about the query. If there were no warnings then * this can return 'null'. */ String getWarnings(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/RemoteDatabaseInterface.java000066400000000000000000000651771330501023400302030ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.RemoteDatabaseInterface 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.*; import java.util.Properties; import java.util.Vector; import com.mckoi.database.global.ColumnDescription; import com.mckoi.database.global.ObjectTransfer; import com.mckoi.util.ByteArrayUtil; /** * An abstract implementation of DatabaseInterface that retrieves information * from a remote server host. The actual implementation of the communication * protocol is left to the derived classes. * * @author Tobias Downer */ abstract class RemoteDatabaseInterface implements DatabaseInterface, ProtocolConstants { /** * The thread that dispatches commands to the server. This is created and * started after the 'login' method is called. This can handle concurrent * queries through the protocol pipe. */ private ConnectionThread connection_thread; /** * A DatabaseCallBack implementation that is notified of all events that * are received from the database. */ private DatabaseCallBack database_call_back; /** * Writes the exception to the JDBC log stream. */ private static void logException(Throwable e) { PrintWriter out = null; //#IFDEF(NO_1.1) out = DriverManager.getLogWriter(); //#ENDIF if (out != null) { e.printStackTrace(out); } // else { // e.printStackTrace(System.err); // } } // ---------- Abstract methods ---------- /** * Writes the given command to the server. The way the command is written * is totally network layer dependent. */ abstract void writeCommandToServer(byte[] command, int offset, int length) throws IOException; /** * Blocks until the next command is received from the server. The way this * is implemented is network layer dependant. */ abstract byte[] nextCommandFromServer(int timeout) throws IOException; /** * Closes the connection. */ abstract void closeConnection() throws IOException; // ---------- Implemented from DatabaseInterface ---------- public boolean login(String default_schema, String user, String password, DatabaseCallBack call_back) throws SQLException { try { // Do some handshaking, ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(bout); // Write out the magic number out.writeInt(0x0ced007); // Write out the JDBC driver version out.writeInt(MDriver.DRIVER_MAJOR_VERSION); out.writeInt(MDriver.DRIVER_MINOR_VERSION); byte[] arr = bout.toByteArray(); writeCommandToServer(arr, 0, arr.length); byte[] response = nextCommandFromServer(0); // printByteArray(response); int ack = ByteArrayUtil.getInt(response, 0); if (ack == ACKNOWLEDGEMENT) { // History of server versions (inclusive) // Engine version | server_version // -----------------|------------------- // 0.00 - 0.91 | 0 // 0.92 - | 1 // -----------------|------------------- // Server version defaults to 0 // Server version 0 is for all versions of the engine previous to 0.92 int server_version = 0; // Is there anything more to read? if (response.length > 4 && response[4] == 1) { // Yes so read the server version server_version = ByteArrayUtil.getInt(response, 5); } // Send the username and password to the server // SECURITY: username/password sent as plain text. This is okay // if we are connecting to localhost, but not good if we connecting // over the internet. We could encrypt this, but it would probably // be better if we put the entire stream through an encyption // protocol. bout.reset(); out.writeUTF(default_schema); out.writeUTF(user); out.writeUTF(password); arr = bout.toByteArray(); writeCommandToServer(arr, 0, arr.length); response = nextCommandFromServer(0); int result = ByteArrayUtil.getInt(response, 0); if (result == USER_AUTHENTICATION_PASSED) { // Set the call_back, this.database_call_back = call_back; // User authentication passed so we successfully logged in now. connection_thread = new ConnectionThread(); connection_thread.start(); return true; } else if (result == USER_AUTHENTICATION_FAILED) { throw new SQLLoginException("User Authentication failed."); } else { throw new SQLException("Unexpected response."); } } else { throw new SQLException("No acknowledgement received from server."); } } catch (IOException e) { logException(e); throw new SQLException("IOException: " + e.getMessage()); } } public void pushStreamableObjectPart(byte type, long object_id, long object_length, byte[] buf, long offset, int length) throws SQLException { try { // Push the object part int dispatch_id = connection_thread.pushStreamableObjectPart( type, object_id, object_length, buf, offset, length); // Get the response ServerCommand command = connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); // If command == null then we timed out if (command == null) { throw new SQLException("Query timed out after " + MDriver.QUERY_TIMEOUT + " seconds."); } DataInputStream din = new DataInputStream(command.getInputStream()); int status = din.readInt(); // If failed report the error. if (status == FAILED) { throw new SQLException("Push object failed: " + din.readUTF()); } } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public QueryResponse execQuery(SQLQuery sql) throws SQLException { try { // Execute the query int dispatch_id = connection_thread.executeQuery(sql); // Get the response ServerCommand command = connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); // If command == null then we timed out if (command == null) { throw new SQLException("Query timed out after " + MDriver.QUERY_TIMEOUT + " seconds."); } DataInputStream in = new DataInputStream(command.getInputStream()); // Query response protocol... int status = in.readInt(); if (status == SUCCESS) { final int result_id = in.readInt(); final int query_time = in.readInt(); final int row_count = in.readInt(); final int col_count = in.readInt(); final ColumnDescription[] col_list = new ColumnDescription[col_count]; for (int i = 0; i < col_count; ++i) { col_list[i] = ColumnDescription.readFrom(in); } return new QueryResponse() { public int getResultID() { return result_id; } public int getQueryTimeMillis() { return query_time; } public int getRowCount() { return row_count; } public int getColumnCount() { return col_count; } public ColumnDescription getColumnDescription(int n) { return col_list[n]; } public String getWarnings() { return ""; } }; } else if (status == EXCEPTION) { int db_code = in.readInt(); String message = in.readUTF(); String stack_trace = in.readUTF(); // System.out.println("**** DUMP OF SERVER STACK TRACE OF ERROR:"); // System.out.println(stack_trace); // System.out.println("**** ----------"); throw new MSQLException(message, null, db_code, stack_trace); } else if (status == AUTHENTICATION_ERROR) { // Means we could perform the query because user doesn't have enough // rights. String access_type = in.readUTF(); String table_name = in.readUTF(); throw new SQLException("User doesn't have enough privs to " + access_type + " table " + table_name); } else { // System.err.println(status); // int count = in.available(); // for (int i = 0; i < count; ++i) { // System.err.print(in.read() + ", "); // } throw new SQLException("Illegal response code from server."); } } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public ResultPart getResultPart(int result_id, int start_row, int count_rows) throws SQLException { try { // Get the first few rows of the result.. int dispatch_id = connection_thread.getResultPart(result_id, start_row, count_rows); // Get the response ServerCommand command = connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); // If command == null then we timed out if (command == null) { throw new SQLException("Downloading result part timed out after " + MDriver.QUERY_TIMEOUT + " seconds."); } // Wrap around a DataInputStream DataInputStream din = new DataInputStream(command.getInputStream()); int status = din.readInt(); if (status == SUCCESS) { // Return the contents of the response. int col_count = din.readInt(); int size = count_rows * col_count; ResultPart list = new ResultPart(size); for (int i = 0; i < size; ++i) { list.addElement(ObjectTransfer.readFrom(din)); } return list; } else if (status == EXCEPTION) { int db_code = din.readInt(); String message = din.readUTF(); String stack_trace = din.readUTF(); // System.out.println("**** DUMP OF SERVER STACK TRACE OF ERROR:"); // System.out.println(stack_trace); // System.out.println("**** ----------"); throw new SQLException(message, null, db_code); } else { throw new SQLException("Illegal response code from server."); } } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public void disposeResult(int result_id) throws SQLException { try { int dispatch_id = connection_thread.disposeResult(result_id); // Get the response ServerCommand command = connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); // If command == null then we timed out if (command == null) { throw new SQLException("Dispose result timed out after " + MDriver.QUERY_TIMEOUT + " seconds."); } // Check the dispose was successful. DataInputStream din = new DataInputStream(command.getInputStream()); int status = din.readInt(); // If failed report the error. if (status == FAILED) { throw new SQLException("Dispose failed: " + din.readUTF()); } } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public StreamableObjectPart getStreamableObjectPart(int result_id, long streamable_object_id, long offset, int len) throws SQLException { try { int dispatch_id = connection_thread.getStreamableObjectPart(result_id, streamable_object_id, offset, len); ServerCommand command = connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); // If command == null then we timed out if (command == null) { throw new SQLException("getStreamableObjectPart timed out after " + MDriver.QUERY_TIMEOUT + " seconds."); } DataInputStream din = new DataInputStream(command.getInputStream()); int status = din.readInt(); if (status == SUCCESS) { // Return the contents of the response. int contents_size = din.readInt(); byte[] buf = new byte[contents_size]; din.readFully(buf, 0, contents_size); return new StreamableObjectPart(buf); } else if (status == EXCEPTION) { int db_code = din.readInt(); String message = din.readUTF(); String stack_trace = din.readUTF(); throw new SQLException(message, null, db_code); } else { throw new SQLException("Illegal response code from server."); } } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public void disposeStreamableObject(int result_id, long streamable_object_id) throws SQLException { try { int dispatch_id = connection_thread.disposeStreamableObject( result_id, streamable_object_id); ServerCommand command = connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); // If command == null then we timed out if (command == null) { throw new SQLException("disposeStreamableObject timed out after " + MDriver.QUERY_TIMEOUT + " seconds."); } DataInputStream din = new DataInputStream(command.getInputStream()); int status = din.readInt(); // If failed report the error. if (status == FAILED) { throw new SQLException("Dispose failed: " + din.readUTF()); } } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public void dispose() throws SQLException { try { int dispatch_id = connection_thread.sendCloseCommand(); // // Get the response // ServerCommand command = // connection_thread.getCommand(MDriver.QUERY_TIMEOUT, dispatch_id); closeConnection(); } catch (IOException e) { logException(e); throw new SQLException("IO Error: " + e.getMessage()); } } // ---------- Inner classes ---------- /** * The connection thread that can dispatch commands concurrently through the * in/out pipe. */ private class ConnectionThread extends Thread { /** * The command to write out to the server. */ private MByteArrayOutputStream com_bytes; private DataOutputStream com_data; /** * Running dispatch id values which we use as a unique key. */ private int running_dispatch_id = 1; /** * Set to true when the thread is closed. */ private boolean thread_closed; /** * The list of commands received from the server that are pending to be * processed (ServerCommand). */ private Vector commands_list; /** * Constructs the connection thread. */ ConnectionThread() throws IOException { setDaemon(true); setName("Mckoi - Connection Thread"); com_bytes = new MByteArrayOutputStream(); com_data = new DataOutputStream(com_bytes); commands_list = new Vector(); thread_closed = false; } // ---------- Utility ---------- /** * Returns a unique dispatch id number for a command. */ private int nextDispatchID() { return running_dispatch_id++; } /** * Blocks until a response from the server has been received with the * given dispatch id. It waits for 'timeout' seconds and if the response * hasn't been received by then returns null. */ ServerCommand getCommand(int timeout, int dispatch_id) throws SQLException { final long time_in = System.currentTimeMillis(); final long time_out_high = time_in + ((long) timeout * 1000); synchronized (commands_list) { if (commands_list == null) { throw new SQLException("Connection to server closed"); } while (true) { for (int i = 0; i < commands_list.size(); ++i) { ServerCommand command = (ServerCommand) commands_list.elementAt(i); if (command.dispatchID() == dispatch_id) { commands_list.removeElementAt(i); return command; } } // Return null if we haven't received a response in the timeout // period. if (timeout != 0 && System.currentTimeMillis() > time_out_high) { return null; } // Wait a second. try { commands_list.wait(1000); } catch (InterruptedException e) { /* ignore */ } } // while (true) } // synchronized } // ---------- Server request methods ---------- /** * Flushes the command in 'com_bytes' to the server. */ private synchronized void flushCommand() throws IOException { // We flush the size of the command string followed by the command // itself to the server. This format allows us to implement a simple // non-blocking command parser on the server. writeCommandToServer(com_bytes.getBuffer(), 0, com_bytes.size()); com_bytes.reset(); } /** * Pushes a part of a streamable object onto the server. Used in * preparation to executing queries containing large objects. */ synchronized int pushStreamableObjectPart(byte type, long object_id, long object_length, byte[] buf, long offset, int length) throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(PUSH_STREAMABLE_OBJECT_PART); com_data.writeInt(dispatch_id); com_data.writeByte(type); com_data.writeLong(object_id); com_data.writeLong(object_length); com_data.writeInt(length); com_data.write(buf, 0, length); com_data.writeLong(offset); flushCommand(); return dispatch_id; } /** * Sends a command to the server to process a query. The response from * the server will contain a 'result_id' that is a unique number for * refering to the result. It also contains information about the columns * in the table, and the total number of rows in the result. *

* Returns the dispatch id key for the response from the server. */ synchronized int executeQuery(SQLQuery sql) throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(QUERY); com_data.writeInt(dispatch_id); sql.writeTo(com_data); flushCommand(); return dispatch_id; } /** * Releases the server side resources associated with a given query key * returned by the server. This should be called when the ResultSet is * closed, or if we cancel in the middle of downloading a result. *

* It's very important that the server resources for a query is released. *

* Returns the dispatch id key for the response from the server. */ synchronized int disposeResult(int result_id) throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(DISPOSE_RESULT); com_data.writeInt(dispatch_id); com_data.writeInt(result_id); flushCommand(); return dispatch_id; } /** * Requests a part of a result of a query. This is used to download a * part of a result set from the server. The 'result_id' is generated * by the 'query' command. Please note that this will generate an error * if the result_id is invalid or has previously been disposed. The * 'row_number' refers to the row to download from. The 'row_count' * refers to the number of rows to download. *

* Returns the dispatch id key for the response from the server. */ synchronized int getResultPart(int result_id, int row_number, int row_count) throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(RESULT_SECTION); com_data.writeInt(dispatch_id); com_data.writeInt(result_id); com_data.writeInt(row_number); com_data.writeInt(row_count); flushCommand(); return dispatch_id; } /** * Requests a part of an open StreamableObject channel. This is used to * download a section of a large object, such as a Blob or a Clob. The * 'streamable_object_id' is returned by the 'getIdentifier' method of the * StreamableObject in a ResultPart. *

* Returns the dispatch id key for the response from the server. */ synchronized int getStreamableObjectPart(int result_id, long streamable_object_id, long offset, int length) throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(STREAMABLE_OBJECT_SECTION); com_data.writeInt(dispatch_id); com_data.writeInt(result_id); com_data.writeLong(streamable_object_id); com_data.writeLong(offset); com_data.writeInt(length); flushCommand(); return dispatch_id; } /** * Disposes the resources associated with a streamable object on the server. * This would typically be called when either of the following situations * occured - the Blob is closed/disposed/finalized, the InputStream is * closes/finalized. *

* It's very important that the server resources for a streamable object is * released. *

* Returns the dispatch id key for the response from the server. */ synchronized int disposeStreamableObject(int result_id, long streamable_object_id) throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(DISPOSE_STREAMABLE_OBJECT); com_data.writeInt(dispatch_id); com_data.writeInt(result_id); com_data.writeLong(streamable_object_id); flushCommand(); return dispatch_id; } /** * Sends close command to server. */ synchronized int sendCloseCommand() throws IOException { int dispatch_id = nextDispatchID(); com_data.writeInt(CLOSE); com_data.writeInt(dispatch_id); flushCommand(); return dispatch_id; } // ---------- Server read methods ---------- /** * Listens for commands from the server. When received puts the command * on the dispatch list. */ public void run() { try { while (!thread_closed) { // Block until next command received from server. byte[] buf = nextCommandFromServer(0); int dispatch_id = ByteArrayUtil.getInt(buf, 0); if (dispatch_id == -1) { // This means a trigger or a ping or some other server side event. processEvent(buf); } synchronized (commands_list) { // Add this command to the commands list commands_list.addElement(new ServerCommand(dispatch_id, buf)); // Notify any threads waiting on it. commands_list.notifyAll(); } } // while(true) } catch (IOException e) { // System.err.println("Connection Thread closed because of IOException"); // e.printStackTrace(); } finally { // Invalidate this object when the thread finishes. Object old_commands_list = commands_list; synchronized (old_commands_list) { commands_list = null; old_commands_list.notifyAll(); } } } /** * Processes a server side event. */ private void processEvent(byte[] buf) throws IOException { int event = ByteArrayUtil.getInt(buf, 4); if (event == PING) { // Ignore ping events, they only sent by server to see if we are // alive. Ping back? } else if (event == DATABASE_EVENT) { // A database event that is passed to the DatabaseCallBack... ByteArrayInputStream bin = new ByteArrayInputStream(buf, 8, buf.length - 8); DataInputStream din = new DataInputStream(bin); int event_type = din.readInt(); String event_msg = din.readUTF(); database_call_back.databaseEvent(event_type, event_msg); } // else if (event == SERVER_REQUEST) { // // A server request that is passed to the DatabaseCallBack... // ByteArrayInputStream bin = // new ByteArrayInputStream(buf, 8, buf.length - 8); // DataInputStream din = new DataInputStream(bin); // // int command = din.readInt(); // Currently ignored // long stream_id = din.readLong(); // int length = din.readInt(); // database_call_back.streamableObjectRequest(stream_id, length); // } else { System.err.println("[RemoteDatabaseInterface] " + "Received unrecognised server side event: " + event); } } } /** * A ByteArrayOutputStream that allows us access to the underlying byte[] * array. */ static class MByteArrayOutputStream extends ByteArrayOutputStream { MByteArrayOutputStream() { super(256); } public byte[] getBuffer() { return buf; } public int size() { return count; } } /** * Represents the data in a command from the server. */ static class ServerCommand { private int dispatch_id; private byte[] buf; ServerCommand(int dispatch_id, byte[] buf) { this.dispatch_id = dispatch_id; this.buf = buf; } public int dispatchID() { return dispatch_id; } public byte[] getBuf() { return buf; } public ByteArrayInputStream getInputStream() { return new ByteArrayInputStream(buf, 4, buf.length - 4); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/ResultPart.java000066400000000000000000000020051330501023400255640ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.ResultPart 02 Mar 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; /** * A container class that holds a part of a result set. * * @author Tobias Downer */ public class ResultPart extends java.util.Vector { public ResultPart() { super(); } public ResultPart(int initial_size) { super(initial_size); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/RowCache.java000066400000000000000000000146061330501023400251640ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.RowCache 03 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import com.mckoi.util.Cache; import com.mckoi.database.global.ObjectTransfer; import java.util.Vector; import java.io.*; import java.sql.SQLException; /** * A Cache that stores rows retrieved from the server in result set's. This * provides various mechanisms for determining the best rows to pick out that * haven't been cached, etc. * * @author Tobias Downer */ final class RowCache { /** * The actual cache that stores the rows. */ private Cache row_cache; /** * Constructs the cache. * * @param cache_size the number of elements in the row cache. * @param max_size the maximum size of the combined total of all items in * the cache. */ RowCache(int cache_size, int max_size) { row_cache = new Cache(cache_size, cache_size, 20); } /** * Requests a block of parts. If the block can be completely retrieved from * the cache then it is done so. Otherwise, it forwards the request for the * rows onto the connection object. */ synchronized Vector getResultPart(Vector result_block, MConnection connection, int result_id, int row_index, int row_count, int col_count, int total_row_count) throws IOException, SQLException { // What was requested.... int orig_row_index = row_index; int orig_row_count = row_count; Vector rows = new Vector(); // The top row that isn't found in the cache. boolean found_notcached = false; // Look for the top row in the block that hasn't been cached for (int r = 0; r < row_count && !found_notcached; ++r) { int da_row = row_index + r; // Is the row in the cache? RowRef row_ref = new RowRef(result_id, da_row); // Not in cache so mark this as top row not in cache... CachedRow row = (CachedRow) row_cache.get(row_ref); if (row == null) { row_index = da_row; if (row_index + row_count > total_row_count) { row_count = total_row_count - row_index; } found_notcached = true; } else { rows.addElement(row); } } Vector rows2 = new Vector(); if (found_notcached) { // Now work up from the bottom and find row that isn't in cache.... found_notcached = false; // Look for the bottom row in the block that hasn't been cached for (int r = row_count - 1; r >= 0 && !found_notcached; --r) { int da_row = row_index + r; // Is the row in the cache? RowRef row_ref = new RowRef(result_id, da_row); // Not in cache so mark this as top row not in cache... CachedRow row = (CachedRow) row_cache.get(row_ref); if (row == null) { if (row_index == orig_row_index) { row_index = row_index - (row_count - (r + 1)); if (row_index < 0) { row_count = row_count + row_index; row_index = 0; } } else { row_count = r + 1; } found_notcached = true; } else { rows2.insertElementAt(row, 0); } } } // Some of it not in the cache... if (found_notcached) { // System.out.println("REQUESTING: " + row_index + " - " + row_count); // Request a part of a result from the server (blocks) ResultPart block = connection.requestResultPart(result_id, row_index, row_count); int block_index = 0; for (int r = 0; r < row_count; ++r) { Object[] arr = new Object[col_count]; int da_row = (row_index + r); int col_size = 0; for (int c = 0; c < col_count; ++c) { Object ob = block.elementAt(block_index); ++block_index; arr[c] = ob; col_size += ObjectTransfer.size(ob); } CachedRow cached_row = new CachedRow(); cached_row.row = da_row; cached_row.row_data = arr; // Don't cache if it's over a certain size, if (col_size <= 3200) { row_cache.put(new RowRef(result_id, da_row), cached_row); } rows.addElement(cached_row); } } // At this point, the cached rows should be completely in the cache so // retrieve it from the cache. result_block.removeAllElements(); int low = orig_row_index; int high = orig_row_index + orig_row_count; for (int r = 0; r < rows.size(); ++r) { CachedRow row = (CachedRow) rows.elementAt(r); // Put into the result block if (row.row >= low && row.row < high) { for (int c = 0; c < col_count; ++c) { result_block.addElement(row.row_data[c]); } } } for (int r = 0; r < rows2.size(); ++r) { CachedRow row = (CachedRow) rows2.elementAt(r); // Put into the result block if (row.row >= low && row.row < high) { for (int c = 0; c < col_count; ++c) { result_block.addElement(row.row_data[c]); } } } // And return the result (phew!) return result_block; } /** * Flushes the complete contents of the cache. */ synchronized void clear() { row_cache.removeAll(); } // ---------- Inner classes ---------- /** * Used for the hash key in the cache. */ private final static class RowRef { int table_id; int row; RowRef(int table_id, int row) { this.table_id = table_id; this.row = row; } public int hashCode() { return (int) table_id + (row * 35331); } public boolean equals(Object ob) { RowRef dest = (RowRef) ob; return (row == dest.row && table_id == dest.table_id); } } /** * A cached row. */ private final static class CachedRow { int row; Object[] row_data; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/SQLLoginException.java000066400000000000000000000024251330501023400267740ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.SQLLoginException 20 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.*; /** * An SQLException that signifies username/password authentication failed. * * @author Tobias Downer */ public class SQLLoginException extends SQLException { public SQLLoginException(String reason, String SQLState, int vendorCode) { super(reason, SQLState, vendorCode); } public SQLLoginException(String reason, String SQLState) { super(reason, SQLState); } public SQLLoginException(String reason) { super(reason); } public SQLLoginException() { super(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/SQLQuery.java000066400000000000000000000254421330501023400251560ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.SQLQuery 20 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.SQLException; import com.mckoi.database.global.ObjectTransfer; import com.mckoi.database.global.ObjectTranslator; import com.mckoi.database.global.ByteLongObject; /** * Represents an SQL Query to the database. This includes the query string * itself plus any data types that are part of the query. *

* FUTURE ENHANCEMENTS: This could do some preliminary parsing of the query * string for faster translation by the database. * * @author Tobias Downer */ public final class SQLQuery { /** * The SQL String. For example, "select * from Part". */ private String query; /** * Set to true when this query is prepared via the prepare method. */ private boolean prepared; /** * The list of all variable substitutions that are in the query. A * variable substitution is set up in a prepared statement. */ private Object[] parameters; private int parameters_index; private int parameter_count; /** * Empty constructor. */ private SQLQuery() { } /** * Constructs the query. */ public SQLQuery(String query) { this.query = query; parameters = new Object[8]; parameters_index = 0; parameter_count = 0; prepared = false; } /** * Grows the parameters list to the given size. */ private void growParametersList(int new_size) { // Make new list Object[] new_list = new Object[new_size]; // Copy everything to new list System.arraycopy(parameters, 0, new_list, 0, parameters.length); // Set the new list. parameters = new_list; } /** * Translates the given object to a type the object can process. */ private Object translateObjectType(Object ob) { return ObjectTranslator.translate(ob); } /** * Adds a variable to the query. If the object is not a type that is * a database 'primitive' type (BigDecimal, ByteLongObject, Boolean, * Date, String) then it is serialized and the serialized form is wrapped * in a ByteLongObject. */ public void addVar(Object ob) { ob = translateObjectType(ob); parameters[parameters_index] = ob; ++parameters_index; ++parameter_count; if (parameters_index >= parameters.length) { growParametersList(parameters_index + 8); } } /** * Sets a variable at the given index. Grows if necessary. If the object is * not a type that is a database 'primitive' type (BigDecimal, * ByteLongObject, Boolean, Date, String) then it is serialized and the * serialized form is wrapped in a ByteLongObject. */ public void setVar(int i, Object ob) { ob = translateObjectType(ob); if (i >= parameters.length) { growParametersList(i + 8); } parameters[i] = ob; parameters_index = i + 1; parameter_count = Math.max(parameters_index, parameter_count); } /** * Clears all the parameters. */ public void clear() { parameters_index = 0; parameter_count = 0; for (int i = 0; i < parameters.length; ++i) { parameters[i] = null; } } /** * Returns the query string. */ public String getQuery() { return query; } /** * Returns the array of all objects that are to be used as substitutions * for '?' in the query. *

* NOTE: Array returned references internal Object[] here so don't change! */ public Object[] getVars() { return parameters; } /** * Given a JDBC escape code of the form {keyword ... parameters ...} this * will return the most optimal Mckoi SQL query for the code. */ private String escapeJDBCSubstitution(String jdbc_code) throws SQLException { String code = jdbc_code.substring(1, jdbc_code.length() - 1); int kp_delim = code.indexOf(' '); if (kp_delim != -1) { String keyword = code.substring(0, kp_delim); String body = code.substring(kp_delim).trim(); if (keyword.equals("d")) { // Process a date return "DATE " + body; } if (keyword.equals("t")) { // Process a time return "TIME " + body; } if (keyword.equals("ts")) { // Process a timestamp return "TIMESTAMP " + body; } if (keyword.equals("fn")) { // A function return body; } if (keyword.equals("call") || keyword.equals("?=")) { throw new MSQLException("Stored procedures not supported."); } if (keyword.equals("oj")) { // Outer join return body; } throw new MSQLException("Do not understand JDBC substitution keyword '" + keyword + "' of " + jdbc_code); } else { throw new MSQLException("Malformed JDBC escape code: " + jdbc_code); } } /** * Performs any JDBC escape processing on the query. For example, the * code {d 'yyyy-mm-dd'} is converted to 'DATE 'yyyy-mm-dd'. */ private void doEscapeSubstitutions() throws SQLException { // This is a fast but primitive parser that scans the SQL string and // substitutes any {[code] ... } type escape sequences to the Mckoi // equivalent. This will not make substitutions of anything inside a // quoted area of the query. // Exit early if no sign of an escape code if (query.indexOf('{') == -1) { return; } StringBuffer buf = new StringBuffer(); StringBuffer jdbc_escape = null; int i = 0; int sz = query.length(); int state = 0; boolean ignore_next = false; while (i < sz) { char c = query.charAt(i); if (state == 0) { // If currently processing SQL code if (c == '\'' || c == '\"') { state = c; // Set state to quote } else if (c == '{') { jdbc_escape = new StringBuffer(); state = '}'; } } else if (state != 0) { // If currently inside a quote or escape if (!ignore_next) { if (c == '\\') { ignore_next = true; } else { // If at the end of a quoted area if (c == (char) state) { state = 0; if (c == '}') { jdbc_escape.append('}'); buf.append(escapeJDBCSubstitution(new String(jdbc_escape))); jdbc_escape = null; c = ' '; } } } } else { ignore_next = false; } } if (state != '}') { // Copy the character buf.append(c); } else { jdbc_escape.append(c); } ++i; } if (state == '}') { throw new SQLException("Unterminated JDBC escape code in query: " + new String(jdbc_escape)); } query = new String(buf); } /** * Prepares the query by parsing the query string and performing any updates * that are required before being passed down to the lower layers of the * database engine for processing. For example, JDBC escape code processing. */ public void prepare(boolean do_escape_processing) throws SQLException { if (do_escape_processing) { doEscapeSubstitutions(); } prepared = true; } /** * Returns true if this query is equal to another. */ public boolean equals(Object ob) { SQLQuery q2 = (SQLQuery) ob; // NOTE: This could do syntax analysis on the query string to determine // if it's the same or not. if (query.equals(q2.query)) { if (parameter_count == q2.parameter_count) { for (int i = 0; i < parameter_count; ++i) { if (parameters[i] != q2.parameters[i]) { return false; } } return true; } } return false; } /** * Creates an exact copy of this object. */ public SQLQuery copy() { SQLQuery q = new SQLQuery(); q.query = query; q.parameters = (Object[]) parameters.clone(); q.parameters_index = parameters_index; q.parameter_count = parameter_count; q.prepared = prepared; return q; } /** * Outputs the query as text (for debugging) */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append("[ Query:\n[ "); buf.append(getQuery()); buf.append(" ]\n"); if (parameter_count > 0) { buf.append("\nParams:\n[ "); for (int i = 0; i < parameter_count; ++i) { Object ob = parameters[i]; if (ob == null) { buf.append("NULL"); } else { buf.append(parameters[i].toString()); } buf.append(", "); } buf.append(" ]"); } buf.append("\n]"); return new String(buf); } // ---------- Stream transfer methods ---------- /** * Writes the SQL query to the data output stream. */ public void writeTo(DataOutputStream out) throws IOException { out.writeUTF(query); out.writeInt(parameter_count); for (int i = 0; i < parameter_count; ++i) { ObjectTransfer.writeTo(out, parameters[i]); } } /** * Reads an SQLQuery object from the data input stream. */ public static SQLQuery readFrom(DataInputStream in) throws IOException { String query_string = in.readUTF(); SQLQuery query = new SQLQuery(query_string); int arg_length = in.readInt(); for (int i = 0; i < arg_length; ++i) { query.addVar(ObjectTransfer.readFrom(in)); } return query; } /** * Serializes an SQLQuery object to a ByteLongObject. */ public ByteLongObject serializeToBlob() { ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(bout); try { writeTo(out); out.flush(); return new ByteLongObject(bout.toByteArray()); } catch (IOException e) { throw new Error("IO Error: " + e.getMessage()); } } /** * Deserializes an SQLQuery object from a ByteLongObject. */ public static SQLQuery deserializeFromBlob(ByteLongObject ob) { DataInputStream in = new DataInputStream( new ByteArrayInputStream(ob.getByteArray())); try { return readFrom(in); } catch (IOException e) { throw new Error("IO Error: " + e.getMessage()); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/StreamDatabaseInterface.java000066400000000000000000000062321330501023400301660ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.StreamDatabaseInterface 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.sql.SQLException; import java.io.*; /** * An stream implementation of an interface to a McKoi database. This * is a stream based communication protocol. * * @author Tobias Downer */ class StreamDatabaseInterface extends RemoteDatabaseInterface { /** * The data output stream for the db protocol. */ protected DataOutputStream out; /** * The data input stream for the db protocol. */ protected DataInputStream in; private boolean closed = false; // /** // * Constructor. // */ // StreamDatabaseInterface(String db_name) { // super(db_name); // } /** * Sets up the stream connection with the given input/output stream. */ void setup(InputStream rawin, OutputStream rawout) throws IOException { // System.out.println("rawin: " + rawin); // System.out.println("rawout: " + rawout); if (rawin == null || rawout == null) { throw new IOException("rawin or rawin is null"); } // Get the input and output and wrap around Data streams. in = new DataInputStream(new BufferedInputStream(rawin, 32768)); out = new DataOutputStream(new BufferedOutputStream(rawout, 32768)); } /** * Writes the given command to the server. The stream protocol flushes the * byte array onto the stream. */ void writeCommandToServer(byte[] command, int offset, int size) throws IOException { out.writeInt(size); out.write(command, 0, size); out.flush(); } /** * Blocks until the next command is received from the server. The stream * protocol waits until we receive something from the server. */ byte[] nextCommandFromServer(int timeout) throws IOException { if (closed) { throw new IOException("DatabaseInterface is closed!"); } try { // System.out.println("I'm waiting for a command: " + this); // new Error().printStackTrace(); int command_length = in.readInt(); byte[] buf = new byte[command_length]; in.readFully(buf, 0, command_length); return buf; } catch (NullPointerException e) { System.out.println("Throwable generated at: " + this); throw e; } } void closeConnection() throws IOException { // System.out.println("Closed: " + this); closed = true; try { out.close(); } catch (IOException e) { in.close(); throw e; } in.close(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/StreamableObjectPart.java000066400000000000000000000027541330501023400275270ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.StreamableObjectPart 07 Sep 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; /** * Represents a response from the server for a section of a streamable object. * A streamable object can always be represented as a byte[] array and is * limited to String (as 2-byte unicode) and binary data types. * * @author Tobias Downer */ public class StreamableObjectPart { /** * The byte[] array that is the contents of the cell from the server. */ private byte[] part_contents; /** * Constructs the ResultCellPart. Note that the 'contents' byte array must * be immutable. */ public StreamableObjectPart(byte[] contents) { this.part_contents = contents; } /** * Returns the contents of this ResultCellPart. */ public byte[] getContents() { return part_contents; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/TCPStreamDatabaseInterface.java000066400000000000000000000035051330501023400305350ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.TCPStreamDatabaseInterface 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.*; import java.sql.*; import java.net.*; /** * Connection to the database via the TCP protocol. * * @author Tobias Downer */ class TCPStreamDatabaseInterface extends StreamDatabaseInterface { /** * The name of the host we are connected to. */ private String host; /** * The port we are connected to. */ private int port; /** * The Socket connection. */ private Socket socket; /** * Constructor. */ TCPStreamDatabaseInterface(String host, int port) { this.host = host; this.port = port; } /** * Connects to the database. */ void connectToDatabase() throws SQLException { if (socket != null) { throw new SQLException("Connection already established."); } try { // Open a socket connection to the server. socket = new Socket(host, port); // Setup the stream with the given input and output streams. setup(socket.getInputStream(), socket.getOutputStream()); } catch (IOException e) { throw new SQLException(e.getMessage()); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/TriggerListener.java000066400000000000000000000023251330501023400265750ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.TriggerListener 04 Oct 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; /** * A listener that is notified when the trigger being listened to is fired. * * @author Tobias Downer */ public interface TriggerListener { /** * Notifies this listener that the trigger with the name has been fired. * Trigger's are specified via the SQL syntax and a trigger listener can * be registered via MckoiConnection. * * @param trigger_name the name of the trigger that fired. */ void triggerFired(String trigger_name); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/UnicodeToBinaryStream.java000066400000000000000000000045351330501023400277030ustar00rootroot00000000000000/** * com.mckoi.database.jdbc.UnicodeToBinaryStream 29 Jan 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbc; import java.io.InputStream; import java.io.IOException; import java.io.BufferedReader; import java.io.Reader; /** * An object that wraps around a Reader and translates the unicode stream into * a stream of bytes that the database is able to transfer to the database. * This object simply converts each char from the Reader into two bytes. See * also BinaryToUnicodeReader for the Reader version of this class. * * @author Tobias Downer */ final class UnicodeToBinaryStream extends InputStream { /** * The Reader we are wrapping. */ private Reader reader; /** * If this is 0 we are on the left byte of the character. If this is 1 we * are on the right byte of the current character. */ private int lr_byte; /** * The current character if 'lr_byte' is 1. */ private int current_c; /** * Constructs the stream. */ public UnicodeToBinaryStream(Reader reader) { // Note, we wrap the input Reader around a BufferedReader. // This is a bit lazy. Perhaps a better implementation of this would // implement 'read(byte[] buf, ...)' and provide its own buffering. this.reader = new BufferedReader(reader); lr_byte = 0; } /** * Reads the next character from the stream. */ public int read() throws IOException { if (lr_byte == 0) { current_c = reader.read(); if (current_c == -1) { return -1; } lr_byte = 1; return (current_c >> 8) & 0x0FF; } else { lr_byte = 0; return current_c & 0x0FF; } } public int available() throws IOException { return 0; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbc/package.html000066400000000000000000000067431330501023400251120ustar00rootroot00000000000000 com.mckoi.database.jdbc - JDBC interface to Mckoi

The JDBC interface to Mckoi.

Establishing a Connection

An application establishes a JDBC Connection by calling java.sql.DriverManager.getConnection(String url, Properties info). DriverManager.getConnection calls java.sql.Driver.connect, which figures out that the URL is a Mckoi URL, so calls com.mckoi.database.jdbc.MDriver, which was registered by a static initializer in com.mckoi.JDBCDriver.

If the URL is local (embedded mode), MDriver creates an instance of com.mckoi.database.jdbcserver.LocalDatabaseInterface, which in turns creates and wraps up an instance of com.mckoi.database.jdbcserver.JDBCDatabaseInterface, and calls its connectToJVM method to initialize it.

If the URL is remote (client/server mode), MDriver creates an instance of TCPStreamDatabaseInterface and calls its connectToDatabase method in order to establish a TCP connection to the Mckoi database server. For more information on how the server handles connections, see the package com.mckoi.database.jdbcserver.

In either case, the resulting DatabaseInterface is wrapped up in a MConnection, and returned to the application as an instance of java.sql.Connection.

Executing a Query

When an application calls java.sql.Connection.createStatement() on its MConnection, it gets back an instance of MStatement, which carries a pointer to the MConnection.

When the application calls java.sql.Statement.executeQuery(String) on its MStatement, Mckoi creates an SQLQuery from the String, creates an empty MResultSet, then calls MStatement.executeQuery with those two objects. MStatement.executeQuery turns around and calls Mconnection.executeQuery, which calls execQuery on its DatabaseInterface. Depending on whether the connection is local or remote, this call is either to com.mckoi.database.jdbcserver.LocalDatabaseInterface.execQuery for local connections, or to RemoteDatabaseInterface.execQuery for remote connections. These are described more fully in the package description for com.mckoi.database.jdbcserver under Local Queries and Remote Queries. mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/000077500000000000000000000000001330501023400240465ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/AbstractJDBCDatabaseInterface.java000066400000000000000000000652131330501023400323540ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.AbstractJDBCDatabaseInterface 16 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.*; import com.mckoi.database.global.*; import com.mckoi.database.interpret.Statement; import com.mckoi.database.interpret.SQLQueryExecutor; import com.mckoi.database.sql.SQL; import com.mckoi.database.sql.ParseException; import com.mckoi.database.jdbc.*; import com.mckoi.util.IntegerVector; import com.mckoi.util.StringUtil; import com.mckoi.debug.*; import java.sql.SQLException; import java.io.*; import java.util.ArrayList; import java.util.Iterator; import java.util.HashMap; /** * An abstract implementation of JDBCDatabaseInterface that provides a * connection between a single DatabaseConnection and a DatabaseInterface * implementation. *

* This receives database commands from the JDBC layer and dispatches the * queries to the database system. It also manages ResultSet maps for query * results. *

* This implementation does not handle authentication (login) / construction * of the DatabaseConnection object, or disposing of the connection. *

* This implementation ignores the AUTO-COMMIT flag when a query is executed. * To implement AUTO-COMMIT, you should 'commit' after a command is executed. *

* SYNCHRONIZATION: This interface is NOT thread-safe. To make a thread-safe * implementation use the LockingMechanism. *

* See JDBCDatabaseInterface for a standard server-side implementation of this * class. * * @author Tobias Downer */ public abstract class AbstractJDBCDatabaseInterface implements DatabaseInterface { /** * The Databas object that represents the context of this * database interface. */ private Database database; /** * The mapping that maps from result id number to Table object that this * JDBC connection is currently maintaining. *

* NOTE: All Table objects are now valid over a database shutdown + init. */ private HashMap result_set_map; /** * This is incremented every time a result set is added to the map. This * way, we always have a unique key on hand. */ private int unique_result_id; /** * Access to information regarding the user logged in on this connection. * If no user is logged in, this is left as 'null'. We can also use this to * retreive the Database object the user is logged into. */ private User user = null; /** * The database connection transaction. */ private DatabaseConnection database_connection; /** * The SQL parser object for this interface. When a statement is being * parsed, this object is sychronized. */ private SQLQueryExecutor sql_executor; // private SQL sql_parser; /** * Mantains a mapping from streamable object id for a particular object that * is currently being uploaded to the server. This maps streamable_object_id * to blob id reference. */ private HashMap blob_id_map; /** * Set to true when this database interface is disposed. */ private boolean disposed; /** * Sets up the database interface. */ public AbstractJDBCDatabaseInterface(Database database) { this.database = database; result_set_map = new HashMap(); blob_id_map = new HashMap(); unique_result_id = 1; disposed = false; } // ---------- Utility methods ---------- /** * Initializes this database interface with a User and DatabaseConnection * object. This would typically be called from inside an authentication * method, or from 'login'. This must be set before the object can be * used. */ protected final void init(User user, DatabaseConnection connection) { this.user = user; this.database_connection = connection; // Set up the sql parser. sql_executor = new SQLQueryExecutor(); // sql_parser = new SQL(new StringReader("")); } /** * Returns the Database that is the context of this interface. */ protected final Database getDatabase() { return database; } /** * Returns the User object for this connection. */ protected final User getUser() { return user; } /** * Returns a DebugLogger object that can be used to log debug messages * against. */ public final DebugLogger Debug() { return getDatabase().Debug(); } /** * Returns the DatabaseConnection objcet for this connection. */ protected final DatabaseConnection getDatabaseConnection() { return database_connection; } /** * Adds this result set to the list of result sets being handled through * this processor. Returns a number that unique identifies this result * set. */ private int addResultSet(ResultSetInfo result) { // Lock the roots of the result set. result.lockRoot(-1); // -1 because lock_key not implemented // Make a new result id int result_id; // This ensures this block can handle concurrent updates. synchronized (result_set_map) { result_id = ++unique_result_id; // Add the result to the map. result_set_map.put(new Integer(result_id), result); } return result_id; } /** * Gets the result set with the given result_id. */ private ResultSetInfo getResultSet(int result_id) { synchronized (result_set_map) { return (ResultSetInfo) result_set_map.get(new Integer(result_id)); } } /** * Disposes of the result set with the given result_id. After this has * been called, the GC should garbage the table. */ private void disposeResultSet(int result_id) { // Remove this entry. ResultSetInfo table; synchronized (result_set_map) { table = (ResultSetInfo) result_set_map.remove(new Integer(result_id)); } if (table != null) { table.dispose(); } else { Debug().write(Lvl.ERROR, this, "Attempt to dispose invalid 'result_id'."); } } /** * Clears the contents of the result set map. This removes all result_id * ResultSetInfo maps. */ protected final void clearResultSetMap() { Iterator keys; ArrayList list; synchronized (result_set_map) { keys = result_set_map.keySet().iterator(); list = new ArrayList(); while (keys.hasNext()) { list.add(keys.next()); } } keys = list.iterator(); while (keys.hasNext()) { int result_id = ((Integer) keys.next()).intValue(); disposeResultSet(result_id); } } /** * Wraps a Throwable thrown by the execution of a query in DatabaseConnection * with an SQLException and puts the appropriate error messages to the debug * log. */ protected final SQLException handleExecuteThrowable(Throwable e, SQLQuery query) { if (e instanceof ParseException) { Debug().writeException(Lvl.WARNING, e); // Parse exception when parsing the SQL. String msg = e.getMessage(); msg = StringUtil.searchAndReplace(msg, "\r", ""); return new MSQLException(msg, msg, 35, e); } else if (e instanceof TransactionException) { TransactionException te = (TransactionException) e; // Output query that was in error to debug log. Debug().write(Lvl.INFORMATION, this, "Transaction error on: " + query); Debug().writeException(Lvl.INFORMATION, e); // Denotes a transaction exception. return new MSQLException(e.getMessage(), e.getMessage(), 200 + te.getType(), e); } else { // Output query that was in error to debug log. Debug().write(Lvl.WARNING, this, "Exception thrown during query processing on: " + query); Debug().writeException(Lvl.WARNING, e); // Error, we need to return exception to client. return new MSQLException(e.getMessage(), e.getMessage(), 1, e); } } /** * Returns a reference implementation object that handles an object that is * either currently being pushed onto the server from the client, or is being * used to reference a large object in an SQLQuery. */ private Ref getLargeObjectRefFor(long streamable_object_id, byte type, long object_length) { // Does this mapping already exist? Long s_ob_id = new Long(streamable_object_id); Object ob = blob_id_map.get(s_ob_id); if (ob == null) { // Doesn't exist so create a new blob handler. Ref ref = database_connection.createNewLargeObject(type, object_length); // Make the blob id mapping blob_id_map.put(s_ob_id, ref); // And return it return ref; } else { // Exists so use this blob ref. return (Ref) ob; } } /** * Returns a reference object that handles the given streamable object id * in this database interface. Unlike the other 'getLargeObjectRefFor * method, this will not create a new handle if it has not already been * formed before by this connection. If the large object ref is not found * an exception is generated. */ private Ref getLargeObjectRefFor(long streamable_object_id) throws SQLException { Long s_ob_id = new Long(streamable_object_id); Object ob = blob_id_map.get(s_ob_id); if (ob == null) { // This basically means the streamable object hasn't been pushed onto the // server. throw new SQLException("Invalid streamable object id in query."); } else { return (Ref) ob; } } /** * Removes the large object reference from the HashMap for the given * streamable object id from the HashMap. This allows the Ref to finalize if * the VM does not maintain any other pointers to it, and therefore clean up * the resources in the store. */ private Ref flushLargeObjectRefFromCache(long streamable_object_id) throws SQLException { try { Long s_ob_id = new Long(streamable_object_id); Object ob = blob_id_map.remove(s_ob_id); if (ob == null) { // This basically means the streamable object hasn't been pushed onto the // server. throw new SQLException("Invalid streamable object id in query."); } else { Ref ref = (Ref) ob; // Mark the blob as complete ref.complete(); // And return it. return ref; } } catch (IOException e) { Debug().writeException(e); throw new SQLException("IO Error: " + e.getMessage()); } } /** * Disposes all resources associated with this object. This clears the * ResultSet map, and NULLs all references to help the garbage collector. * This method would normally be called from implementations of the * 'dispose' method. */ protected final void internalDispose() { disposed = true; // Clear the result set mapping clearResultSetMap(); user = null; database_connection = null; sql_executor = null; } /** * Checks if the interface is disposed, and if it is generates a friendly * SQLException informing the user of this. */ protected final void checkNotDisposed() throws SQLException { if (disposed) { throw new SQLException( "Database interface was disposed (was the connection closed?)"); } } // ---------- Implemented from DatabaseInterface ---------- public void pushStreamableObjectPart(byte type, long object_id, long object_length, byte[] buf, long offset, int length) throws SQLException { checkNotDisposed(); try { // Create or retrieve the object managing this binary object_id in this // connection. Ref ref = getLargeObjectRefFor(object_id, type, object_length); // Push this part of the blob into the object. ref.write(offset, buf, length); } catch (IOException e) { Debug().writeException(e); throw new SQLException("IO Error: " + e.getMessage()); } } public QueryResponse execQuery(SQLQuery query) throws SQLException { checkNotDisposed(); // Record the query start time long start_time = System.currentTimeMillis(); // Where query result eventually resides. ResultSetInfo result_set_info; int result_id = -1; // For each StreamableObject in the SQLQuery object, translate it to a // Ref object that presumably has been pre-pushed onto the server from // the client. boolean blobs_were_flushed = false; Object[] vars = query.getVars(); if (vars != null) { for (int i = 0; i < vars.length; ++i) { Object ob = vars[i]; // This is a streamable object, so convert it to a *Ref if (ob != null && ob instanceof StreamableObject) { StreamableObject s_object = (StreamableObject) ob; // Flush the streamable object from the cache // Note that this also marks the blob as complete in the blob store. Ref ref = flushLargeObjectRefFromCache(s_object.getIdentifier()); // Set the Ref object in the query. vars[i] = ref; // There are blobs in this query that were written to the blob store. blobs_were_flushed = true; } } } // After the blobs have been flushed, we must tell the connection to // flush and synchronize any blobs that have been written to disk. This // is an important (if subtle) step. if (blobs_were_flushed) { database_connection.flushBlobStore(); } try { // Evaluate the sql query. Table result = sql_executor.execute(database_connection, query); // Put the result in the result cache... This will lock this object // until it is removed from the result set cache. Returns an id that // uniquely identifies this result set in future communication. // NOTE: This locks the roots of the table so that its contents // may not be altered. result_set_info = new ResultSetInfo(query, result); result_id = addResultSet(result_set_info); } catch (Throwable e) { // If result_id set, then dispose the result set. if (result_id != -1) { disposeResultSet(result_id); } // Handle the throwable during query execution throw handleExecuteThrowable(e, query); } // The time it took the query to execute. long taken = System.currentTimeMillis() - start_time; // Return the query response return new JDIQueryResponse(result_id, result_set_info, (int) taken, ""); } public ResultPart getResultPart(int result_id, int row_number, int row_count) throws SQLException { checkNotDisposed(); ResultSetInfo table = getResultSet(result_id); if (table == null) { throw new MSQLException("'result_id' invalid.", null, 4, (Throwable) null); } int row_end = row_number + row_count; if (row_number < 0 || row_number >= table.getRowCount() || row_end > table.getRowCount()) { throw new MSQLException("Result part out of range.", null, 4, (Throwable) null); } try { int col_count = table.getColumnCount(); ResultPart block = new ResultPart(row_count * col_count); for (int r = row_number; r < row_end; ++r) { for (int c = 0; c < col_count; ++c) { TObject t_object = table.getCellContents(c, r); // If this is a Ref, we must assign it a streamable object // id that the client can use to access the large object. Object client_ob; if (t_object.getObject() instanceof Ref) { Ref ref = (Ref) t_object.getObject(); client_ob = new StreamableObject(ref.getType(), ref.getRawSize(), ref.getID()); } else { client_ob = t_object.getObject(); } block.addElement(client_ob); } } return block; } catch (Throwable e) { Debug().writeException(Lvl.WARNING, e); // If an exception was generated while getting the cell contents, then // throw an SQLException. throw new MSQLException( "Exception while reading results: " + e.getMessage(), e.getMessage(), 4, e); } } public void disposeResult(int result_id) throws SQLException { // Check the DatabaseInterface is not dispoed checkNotDisposed(); // Dispose the result disposeResultSet(result_id); } public StreamableObjectPart getStreamableObjectPart(int result_id, long streamable_object_id, long offset, int len) throws SQLException { checkNotDisposed(); // NOTE: It's important we handle the 'result_id' here and don't just // treat the 'streamable_object_id' as a direct reference into the // blob store. If we don't authenticate a streamable object against its // originating result, we can't guarantee the user has permission to // access the data. This would mean a malicious client could access // BLOB data they may not be permitted to look at. // This also protects us from clients that might send a bogus // streamable_object_id and cause unpredictible results. ResultSetInfo table = getResultSet(result_id); if (table == null) { throw new MSQLException("'result_id' invalid.", null, 4, (Throwable) null); } // Get the large object ref that has been cached in the result set. Ref ref = table.getRef(streamable_object_id); if (ref == null) { throw new MSQLException("'streamable_object_id' invalid.", null, 4, (Throwable) null); } // Restrict the server so that a streamable object part can not exceed // 512 KB. if (len > 512 * 1024) { throw new MSQLException("Request length exceeds 512 KB", null, 4, (Throwable) null); } try { // Read the blob part into the byte array. byte[] blob_part = new byte[len]; ref.read(offset, blob_part, len); // And return as a StreamableObjectPart object. return new StreamableObjectPart(blob_part); } catch (IOException e) { throw new MSQLException( "Exception while reading blob: " + e.getMessage(), e.getMessage(), 4, e); } } public void disposeStreamableObject(int result_id, long streamable_object_id) throws SQLException { checkNotDisposed(); // This actually isn't as an important step as I had originally designed // for. To dispose we simply remove the blob ref from the cache in the // result. If this doesn't happen, nothing seriously bad will happen. ResultSetInfo table = getResultSet(result_id); if (table == null) { throw new MSQLException("'result_id' invalid.", null, 4, (Throwable) null); } // Remove this Ref from the table table.removeRef(streamable_object_id); } // ---------- Clean up ---------- /** * Clean up if this object is GC'd. */ public void finalize() throws Throwable { super.finalize(); try { if (!disposed) { dispose(); } } catch (Throwable e) { /* ignore this */ } } // ---------- Inner classes ---------- /** * The response to a query. */ private final static class JDIQueryResponse implements QueryResponse { int result_id; ResultSetInfo result_set_info; int query_time; String warnings; JDIQueryResponse(int result_id, ResultSetInfo result_set_info, int query_time, String warnings) { this.result_id = result_id; this.result_set_info = result_set_info; this.query_time = query_time; this.warnings = warnings; } public int getResultID() { return result_id; } public int getQueryTimeMillis() { return query_time; } public int getRowCount() { return result_set_info.getRowCount(); } public int getColumnCount() { return result_set_info.getColumnCount(); } public ColumnDescription getColumnDescription(int n) { return result_set_info.getFields()[n]; } public String getWarnings() { return warnings; } } /** * Whenever a ResultSet is generated, this object contains the result set. * This class only allows calls to safe methods in Table. *

* NOTE: This is safe provided, * a) The column topology doesn't change (NOTE: issues with ALTER command) * b) Root locking prevents modification to rows. */ private final static class ResultSetInfo { /** * The SQLQuery that was executed to produce this result. */ private SQLQuery query; /** * The table that is the result. */ private Table result; /** * A set of ColumnDescription that describes each column in the ResultSet. */ private ColumnDescription[] col_desc; /** * IntegerVector that contains the row index into the table for each * row of the result. For example, row.intAt(5) will return the row index * of 'result' of the 5th row item. */ private IntegerVector row_index_map; /** * Set to true if the result table has a SimpleRowEnumeration, therefore * guarenteeing we do not need to store a row lookup list. */ private boolean result_is_simple_enum; /** * The number of rows in the result. */ private int result_row_count; /** * Incremented when we lock roots. */ private int locked; /** * A HashMap of blob_reference_id values to Ref objects used to handle * and streamable objects in this result. */ private HashMap streamable_blob_map; /** * Constructs the result set. */ ResultSetInfo(SQLQuery query, Table table) { this.query = query; this.result = table; this.streamable_blob_map = new HashMap(); result_row_count = table.getRowCount(); // HACK: Read the contents of the first row so that we can pick up // any errors with reading, and also to fix the 'uniquekey' bug // that causes a new transaction to be started if 'uniquekey' is // a column and the value is resolved later. RowEnumeration row_enum = table.rowEnumeration(); if (row_enum.hasMoreRows()) { int row_index = row_enum.nextRowIndex(); for (int c = 0; c < table.getColumnCount(); ++c) { table.getCellContents(c, row_index); } } // If simple enum, note it here result_is_simple_enum = (row_enum instanceof SimpleRowEnumeration); row_enum = null; // Build 'row_index_map' if not a simple enum if (!result_is_simple_enum) { row_index_map = new IntegerVector(table.getRowCount()); RowEnumeration en = table.rowEnumeration(); while (en.hasMoreRows()) { row_index_map.addInt(en.nextRowIndex()); } } // This is a safe operation provides we are shared. // Copy all the TableField columns from the table to our own // ColumnDescription array, naming each column by what is returned from // the 'getResolvedVariable' method. final int col_count = table.getColumnCount(); col_desc = new ColumnDescription[col_count]; for (int i = 0; i < col_count; ++i) { Variable v = table.getResolvedVariable(i); String field_name; if (v.getTableName() == null) { // This means the column is an alias field_name = "@a" + v.getName(); } else { // This means the column is an schema/table/column reference field_name = "@f" + v.toString(); } col_desc[i] = table.getColumnDefAt(i).columnDescriptionValue(field_name); // col_desc[i] = new ColumnDescription(field_name, table.getFieldAt(i)); } locked = 0; } /** * Returns the SQLQuery that was used to produce this result. */ SQLQuery getSQLQuery() { return query; } /** * Returns a Ref that has been cached in this table object by its * identifier value. */ Ref getRef(long id) { return (Ref) streamable_blob_map.get(new Long(id)); } /** * Removes a Ref that has been cached in this table object by its * identifier value. */ void removeRef(long id) { streamable_blob_map.remove(new Long(id)); } /** * Disposes this object. */ void dispose() { while (locked > 0) { unlockRoot(-1); } result = null; row_index_map = null; col_desc = null; } /** * Gets the cell contents of the cell at the given row/column. *

* Safe only if roots are locked. */ TObject getCellContents(int column, int row) { if (locked > 0) { int real_row; if (result_is_simple_enum) { real_row = row; } else { real_row = row_index_map.intAt(row); } TObject tob = result.getCellContents(column, real_row); // If this is a large object reference then cache it so a streamable // object can reference it via this result. if (tob.getObject() instanceof Ref) { Ref ref = (Ref) tob.getObject(); streamable_blob_map.put(new Long(ref.getID()), ref); } return tob; } else { throw new RuntimeException("Table roots not locked!"); } } /** * Returns the column count. */ int getColumnCount() { return result.getColumnCount(); } /** * Returns the row count. */ int getRowCount() { return result_row_count; } /** * Returns the ColumnDescription array of all the columns in the result. */ ColumnDescription[] getFields() { return col_desc; } /** * Locks the root of the result set. */ void lockRoot(int key) { result.lockRoot(key); ++locked; } /** * Unlocks the root of the result set. */ void unlockRoot(int key) { result.unlockRoot(key); --locked; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/ConnectionPoolServer.java000066400000000000000000000030161330501023400310310ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.ConnectionPoolServer 21 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; /** * An interface for the connection pool for a server. This is the API for * a service that accepts connections via 'addConnection', waits for the * connection to make a request, and dispatch the request as appropriate to * the database engine. *

* This interface is used to provide different implementations for command * dispatching mechanisms, such as a thread per TCP user, one thread per * TCP connection set, UDP, etc. * * @author Tobias Downer */ interface ConnectionPoolServer { /** * Connects a new ServerConnection into the pool of connections to clients * that this server maintains. */ void addConnection(ServerConnection connection); /** * Closes this connection pool server down. */ void close(); } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/DefaultLocalBootable.java000066400000000000000000000154061330501023400307260ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.DefaultLocalBootable 28 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; //import com.mckoi.runtime.BootMain; import com.mckoi.database.control.DBConfig; import com.mckoi.database.control.DBSystem; import com.mckoi.database.control.DBController; import com.mckoi.database.control.DefaultDBConfig; import com.mckoi.database.DatabaseSystem; import com.mckoi.database.Database; import com.mckoi.database.DatabaseException; import com.mckoi.database.jdbc.LocalBootable; import com.mckoi.database.jdbc.DatabaseCallBack; import com.mckoi.database.jdbc.DatabaseInterface; import com.mckoi.database.jdbc.QueryResponse; import com.mckoi.database.jdbc.ResultPart; import com.mckoi.database.jdbc.SQLQuery; import java.io.File; import java.sql.SQLException; /** * A bootable object that filters through to a JDBCDatabaseInterface but * is thread-safe and multi-threaded. This is to be used when you have a * local JDBC Client accessing a stand-alone database. * * @author Tobias Downer */ public class DefaultLocalBootable implements LocalBootable { /** * Set to true if the database is booted. */ private boolean booted = false; /** * Set to true when this interface is active. */ private boolean active = false; /** * The local DBSystem database object. */ private DBSystem dbsys; /** * The connection id. This is incremented by 1 each time an * interface connects to the local JVM. */ private int connect_id = 0; /** * The number of connections that are current open. */ private int open_connections = 0; /** * The connection lock object. */ private Object connection_lock = new Object(); /** * Creates and boots a local database with the given configuration. This * is implemented from LocalBootable. * * @param config the configuration variables. */ public DatabaseInterface create(String username, String password, DBConfig config) throws SQLException { if (username.equals("") || password.equals("")) { throw new SQLException("Username and Password must both be set."); } if (!booted) { // Local connections are formatted as; // 'Local/[type]/[connect_id]' String host_string = "Local/Create/"; // Create the DBSystem and bind it to a DatabaseInterface. DBController controller = DBController.getDefault(); dbsys = controller.createDatabase(config, username, password); DatabaseInterface db_interface = new LocalJDBCDatabaseInterface(dbsys.getDatabase(), host_string); booted = true; ++open_connections; active = true; return db_interface; } throw new SQLException("Database is already created."); } /** * Boots the local database with the given configuration. This is * implemented from LocalBootable. * * @param config the configuration variables. */ public DatabaseInterface boot(DBConfig config) throws SQLException { if (!booted) { // Local connections are formatted as; // 'Local/[type]/[connect_id]' String host_string = "Local/Boot/"; // Start the DBSystem and bind it to a DatabaseInterface. DBController controller = DBController.getDefault(); dbsys = controller.startDatabase(config); DatabaseInterface db_interface = new LocalJDBCDatabaseInterface(dbsys.getDatabase(), host_string); booted = true; ++open_connections; active = true; return db_interface; } else { throw new SQLException("Database was booted more than once."); } } /** * Attempts to test if the database exists or not. Returns true if the * database exists. * * @param config the configuration variables. */ public boolean checkExists(DBConfig config) throws SQLException { if (!booted) { DBController controller = DBController.getDefault(); return controller.databaseExists(config); } else { throw new SQLException("The database is already booted."); } } /** * Returns true if a database has successfully been booted in this JVM. If * a database hasn't been botted then it returns false. */ public boolean isBooted() throws SQLException { return booted; } /** * Creates a new LocalDatabaseInterface that is connected to the database * currently running in this VM. Calling this method must guarentee that * either 'boot' or 'create' has been called in this VM beforehand. */ public DatabaseInterface connectToJVM() throws SQLException { if (booted) { // Local connections are formatted as; // 'Local/[type]/[connect_id]' String host_string = "Local/Connection/" + connect_id; // Create a DatabaseInterface, DatabaseInterface db_interface = new LocalJDBCDatabaseInterface(dbsys.getDatabase(), host_string); ++connect_id; ++open_connections; active = true; return db_interface; } else { throw new SQLException("The database is not started."); } } // ---------- Inner classes ---------- /** * A local implementation of JDBCDatabaseInterface that will dispose the * parent LocalBootable object when the last open connection is disposed. */ private class LocalJDBCDatabaseInterface extends JDBCDatabaseInterface { boolean closed = false; public LocalJDBCDatabaseInterface(Database database, String host_string) { super(database, host_string); } // ---------- Overwritten from JDBCDatabaseInterface ---------- public void dispose() throws SQLException { if (!closed) { super.dispose(); --open_connections; // When all connections are closed, shut down... if (open_connections <= 0) { // When the local database interface is disposed, we must shut down // the database system. dbsys.close(); active = false; booted = false; dbsys = null; } closed = true; } } // ---------- Clean up ---------- public void finalize() throws Throwable { super.finalize(); dispose(); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/JDBCDatabaseInterface.java000066400000000000000000000255151330501023400306710ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.JDBCDatabaseInterface 16 Aug 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.*; import com.mckoi.database.jdbc.*; import com.mckoi.util.StringUtil; import com.mckoi.debug.*; import java.sql.SQLException; /** * An implementation of jdbc.DatabaseInterface on the server-side. *

* This receives database commands and dispatches them to the database * system. This assumes that all calls to the methods here are in a * UserWorkerThread thread. *

* NOTE: Currently, the client/server use of this object isn't multi-threaded, * however the local connection could be. Therefore, this object has been * made multi-thread safe. * * @author Tobias Downer */ public class JDBCDatabaseInterface extends AbstractJDBCDatabaseInterface { /** * Set this to true if command logging is enabled. */ private static final boolean COMMAND_LOGGING = true; /** * The unique host name denoting the client that's connected. */ private String host_name; /** * Sets up the processor. */ public JDBCDatabaseInterface(Database database, String host_name) { super(database); this.host_name = host_name; } /** * Tries to authenticate the username and password against the given * database. Returns true if we are successful. If successful, alters the * state of this object to reflect the fact the user has logged in. */ private boolean authenticate(Database database, String default_schema, String username, String password, final DatabaseCallBack database_call_back) { // If the 'user' variable is null, no one is currently logged in to this // connection. if (getUser() == null) { if (COMMAND_LOGGING && database.getSystem().logQueries()) { // Output the instruction to the commands log. StringBuffer log_str = new StringBuffer(); log_str.append("[JDBC] ["); log_str.append(username); log_str.append("] "); log_str.append('['); log_str.append(host_name); log_str.append("] "); log_str.append("Log in.\n"); database.getCommandsLog().log(new String(log_str)); } // Write debug message, if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Authenticate User: " + username); } // Create a UserCallBack class. DatabaseConnection.CallBack call_back = new DatabaseConnection.CallBack() { public void triggerNotify(String trigger_name, int trigger_event, String trigger_source, int fire_count) { StringBuffer message = new StringBuffer(); message.append(trigger_name); message.append(' '); message.append(trigger_source); message.append(' '); message.append(fire_count); database_call_back.databaseEvent(99, new String(message)); } }; // Try to create a User object. User this_user = database.authenticateUser(username, password, host_name); DatabaseConnection database_connection = null; // If successful, ask the engine for a DatabaseConnection object. if (this_user != null) { database_connection = database.createNewConnection(this_user, call_back); // Put the connection in exclusive mode LockingMechanism locker = database_connection.getLockingMechanism(); locker.setMode(LockingMechanism.EXCLUSIVE_MODE); try { // By default, JDBC connections are auto-commit database_connection.setAutoCommit(true); // Set the default schema for this connection if it exists if (database_connection.schemaExists(default_schema)) { database_connection.setDefaultSchema(default_schema); } else { Debug().write(Lvl.WARNING, this, "Couldn't change to '" + default_schema + "' schema."); // If we can't change to the schema then change to the APP schema database_connection.setDefaultSchema("APP"); } } finally { try { // Make sure we commit the connection. database_connection.commit(); } catch (TransactionException e) { // Just issue a warning... Debug().writeException(Lvl.WARNING, e); } finally { // Guarentee that we unluck from EXCLUSIVE locker.finishMode(LockingMechanism.EXCLUSIVE_MODE); } } } // If we have a user object, then init the object, if (this_user != null) { init(this_user, database_connection); return true; } else { // Otherwise, return false. return false; } } else { throw new RuntimeException("Attempt to authenticate user twice"); } } // ---------- Implemented from DatabaseInterface ---------- public boolean login(String default_schema, String username, String password, DatabaseCallBack database_call_back) throws SQLException { Database database = getDatabase(); boolean b = authenticate(database, default_schema, username, password, database_call_back); return b; } public QueryResponse execQuery(SQLQuery query) throws SQLException { // Check the interface isn't disposed (connection was closed). checkNotDisposed(); User user = getUser(); DatabaseConnection database_connection = getDatabaseConnection(); // Log this query if query logging is enabled if (COMMAND_LOGGING && getDatabase().getSystem().logQueries()) { // Output the instruction to the commands log. StringBuffer log_str = new StringBuffer(); log_str.append("[JDBC] ["); log_str.append(user.getUserName()); log_str.append("] "); log_str.append('['); log_str.append(host_name); log_str.append("] "); log_str.append("Query: "); log_str.append(query.getQuery()); log_str.append('\n'); user.getDatabase().getCommandsLog().log(new String(log_str)); } // Write debug message (INFORMATION level) if (Debug().isInterestedIn(Lvl.INFORMATION)) { Debug().write(Lvl.INFORMATION, this, "Query From User: " + user.getUserName() + "@" + host_name); Debug().write(Lvl.INFORMATION, this, "Query: " + query.getQuery().trim()); } // Get the locking mechanism. LockingMechanism locker = database_connection.getLockingMechanism(); int lock_mode = -1; QueryResponse response = null; try { try { // For simplicity - all database locking is now exclusive inside // a transaction. This means it is not possible to execute // queries concurrently inside a transaction. However, we are // still able to execute queries concurrently from different // connections. // // It's debatable whether we even need to perform this lock anymore // because we could change the contract of this method so that // it is not thread safe. This would require that the callee ensures // more than one thread can not execute queries on the connection. lock_mode = LockingMechanism.EXCLUSIVE_MODE; locker.setMode(lock_mode); // Execute the query (behaviour for this comes from super). response = super.execQuery(query); // Return the result. return response; } finally { try { // This is executed no matter what happens. Very important we // unlock the tables. if (lock_mode != -1) { locker.finishMode(lock_mode); } } catch (Throwable e) { // If this throws an exception, we should output it to the debug // log and screen. e.printStackTrace(System.err); Debug().write(Lvl.ERROR, this, "Exception finishing locks"); Debug().writeException(e); // Note, we can't throw an error here because we may already be in // an exception that happened in the above 'try' block. } } } finally { // This always happens after tables are unlocked. // Also guarenteed to happen even if something fails. // If we are in auto-commit mode then commit the query here. // Do we auto-commit? if (database_connection.getAutoCommit()) { // Yes, so grab an exclusive lock and auto-commit. try { // Lock into exclusive mode. locker.setMode(LockingMechanism.EXCLUSIVE_MODE); // If an error occured then roll-back if (response == null) { // Rollback. database_connection.rollback(); } else { try { // Otherwise commit. database_connection.commit(); } catch (Throwable e) { // Dispose this response if the commit failed. disposeResult(response.getResultID()); // And throw the SQL Exception throw handleExecuteThrowable(e, query); } } } finally { locker.finishMode(LockingMechanism.EXCLUSIVE_MODE); } } } } public void dispose() throws SQLException { if (getUser() != null) { DatabaseConnection database = getDatabaseConnection(); LockingMechanism locker = database.getLockingMechanism(); try { // Lock into exclusive mode, locker.setMode(LockingMechanism.EXCLUSIVE_MODE); // Roll back any open transaction. database.rollback(); } finally { // Finish being in exclusive mode. locker.finishMode(LockingMechanism.EXCLUSIVE_MODE); // Close the database connection object. database.close(); // Log out the user getUser().logout(); // Call the internal dispose method. internalDispose(); } } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/JDBCProcessor.java000066400000000000000000000405241330501023400273200ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.JDBCProcessor 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.global.ObjectTransfer; import com.mckoi.database.jdbc.StreamableObjectPart; import com.mckoi.database.jdbc.ProtocolConstants; import com.mckoi.database.jdbc.MSQLException; import com.mckoi.database.jdbc.DatabaseCallBack; import com.mckoi.database.jdbc.DatabaseInterface; import com.mckoi.database.jdbc.QueryResponse; import com.mckoi.database.jdbc.ResultPart; import com.mckoi.database.jdbc.SQLQuery; import com.mckoi.debug.*; import com.mckoi.util.ByteArrayUtil; import java.sql.SQLException; import java.io.*; /** * This processes JDBC commands from a JDBC client and dispatches the commands * to the database. This is a state based class. There is a single processor * for each JDBC client connected. This class is designed to be flexible * enough to handle packet based protocols as well as stream based * protocols. * * @author Tobias Downer */ abstract class JDBCProcessor implements ProtocolConstants { /** * The version of the server protocol. */ private static final int SERVER_VERSION = 1; /** * The current state we are in. 0 indicates we haven't logged in yet. 100 * indicates we are logged in. */ private int state; /** * Number of authentications tried. */ private int authentication_tries; /** * The interface to the database. */ private DatabaseInterface db_interface; /** * An object the debug information can be logged to. */ private DebugLogger debug; /** * Sets up the processor. */ JDBCProcessor(DatabaseInterface db_interface, DebugLogger logger) { this.debug = logger; this.db_interface = db_interface; state = 0; authentication_tries = 0; } /** * The database call back method that sends database events back to the * client. */ private DatabaseCallBack db_call_back = new DatabaseCallBack() { public void databaseEvent(int event_type, String event_message) { try { // Format the call back and send the event. ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(event_type); dout.writeUTF(event_message); sendEvent(bout.toByteArray()); } catch (IOException e) { debug.write(Lvl.ERROR, this, "IO Error: " + e.getMessage()); debug.writeException(e); } } }; protected static void printByteArray(byte[] array) { System.out.println("Length: " + array.length); for (int i = 0; i < array.length; ++i) { System.out.print(array[i]); System.out.print(", "); } } /** * Processes a single JDBCCommand from the client. The command comes in as * a byte[] array and the response is written out as a byte[] array. If * it returns 'null' then it means the connection has been closed. */ byte[] processJDBCCommand(byte[] command) throws IOException { // printByteArray(command); if (state == 0) { // State 0 means we looking for the header... int magic = ByteArrayUtil.getInt(command, 0); // The driver version number int maj_ver = ByteArrayUtil.getInt(command, 4); int min_ver = ByteArrayUtil.getInt(command, 8); byte[] ack_command = new byte[4 + 1 + 4 + 1]; // Send back an acknowledgement and the version number of the server ByteArrayUtil.setInt(ACKNOWLEDGEMENT, ack_command, 0); ack_command[4] = 1; ByteArrayUtil.setInt(SERVER_VERSION, ack_command, 5); ack_command[9] = 0; // Set to the next state. state = 4; // Return the acknowledgement return ack_command; // // We accept drivers equal or less than 1.00 currently. // if ((maj_ver == 1 && min_ver == 0) || maj_ver == 0) { // // Go to next state. // state = 4; // return single(ACKNOWLEDGEMENT); // } // else { // // Close the connection if driver invalid. // close(); // } // // return null; } else if (state == 4) { // State 4 means we looking for username and password... ByteArrayInputStream bin = new ByteArrayInputStream(command); DataInputStream din = new DataInputStream(bin); String default_schema = din.readUTF(); String username = din.readUTF(); String password = din.readUTF(); try { boolean good = db_interface.login(default_schema, username, password, db_call_back); if (good == false) { // Close after 12 tries. if (authentication_tries >= 12) { close(); } else { ++authentication_tries; return single(USER_AUTHENTICATION_FAILED); } } else { state = 100; return single(USER_AUTHENTICATION_PASSED); } } catch (SQLException e) { } return null; } else if (state == 100) { // Process the query return processQuery(command); } else { throw new Error("Illegal state: " + state); } } /** * Returns the state of the connection. 0 = not logged in yet. 1 = logged * in. */ int getState() { return state; } /** * Convenience, returns a single 4 byte array with the given int encoded * into it. */ private byte[] single(int val) { byte[] buf = new byte[4]; ByteArrayUtil.setInt(val, buf, 0); return buf; } /** * Creates a response that represents an SQL exception failure. */ private byte[] exception(int dispatch_id, SQLException e) throws IOException { int code = e.getErrorCode(); String msg = e.getMessage(); if (msg == null) { msg = "NULL exception message"; } String server_msg = ""; String stack_trace = ""; if (e instanceof MSQLException) { MSQLException me = (MSQLException) e; server_msg = me.getServerErrorMsg(); stack_trace = me.getServerErrorStackTrace(); } else { StringWriter writer = new StringWriter(); e.printStackTrace(new PrintWriter(writer)); stack_trace = writer.toString(); } ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(dispatch_id); dout.writeInt(EXCEPTION); dout.writeInt(code); dout.writeUTF(msg); dout.writeUTF(stack_trace); return bout.toByteArray(); } /** * Creates a response that indicates a simple success of an operation with * the given dispatch id. */ private byte[] simpleSuccess(int dispatch_id) throws IOException { byte[] buf = new byte[8]; ByteArrayUtil.setInt(dispatch_id, buf, 0); ByteArrayUtil.setInt(SUCCESS, buf, 4); return buf; } /** * Processes a query on the byte[] array and returns the result. */ private byte[] processQuery(byte[] command) throws IOException { byte[] result; // The first int is the command. int ins = ByteArrayUtil.getInt(command, 0); // Otherwise must be a dispatch type request. // The second is the dispatch id. int dispatch_id = ByteArrayUtil.getInt(command, 4); if (dispatch_id == -1) { throw new Error("Special case dispatch id of -1 in query"); } if (ins == RESULT_SECTION) { result = resultSection(dispatch_id, command); } else if (ins == QUERY) { result = queryCommand(dispatch_id, command); } else if (ins == PUSH_STREAMABLE_OBJECT_PART) { result = pushStreamableObjectPart(dispatch_id, command); } else if (ins == DISPOSE_RESULT) { result = disposeResult(dispatch_id, command); } else if (ins == STREAMABLE_OBJECT_SECTION) { result = streamableObjectSection(dispatch_id, command); } else if (ins == DISPOSE_STREAMABLE_OBJECT) { result = disposeStreamableObject(dispatch_id, command); } else if (ins == CLOSE) { close(); result = null; } else { throw new Error("Command (" + ins + ") not understood."); } return result; } /** * Disposes of this processor. */ void dispose() { try { db_interface.dispose(); } catch (Throwable e) { debug.writeException(Lvl.ERROR, e); } } // ---------- JDBC primitive commands ---------- /** * Executes a query and returns the header for the result in the response. * This keeps track of all result sets because sections of the result are * later queries via the 'RESULT_SECTION' command. *

* 'dispatch_id' is the number we need to respond with. */ private byte[] queryCommand(int dispatch_id, byte[] command) throws IOException { // Read the query from the command. ByteArrayInputStream bin = new ByteArrayInputStream(command, 8, command.length - 8); DataInputStream din = new DataInputStream(bin); SQLQuery query = SQLQuery.readFrom(din); try { // Do the query QueryResponse response = db_interface.execQuery(query); // Prepare the stream to output the response to, ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(dispatch_id); dout.writeInt(SUCCESS); // The response sends the result id, the time the query took, the // total row count, and description of each column in the result. dout.writeInt(response.getResultID()); dout.writeInt(response.getQueryTimeMillis()); dout.writeInt(response.getRowCount()); int col_count = response.getColumnCount(); dout.writeInt(col_count); for (int i = 0; i < col_count; ++i) { response.getColumnDescription(i).writeTo(dout); } return bout.toByteArray(); } catch (SQLException e) { // debug.writeException(e); return exception(dispatch_id, e); } } /** * Pushes a part of a streamable object onto the server. *

* 'dispatch_id' is the number we need to respond with. */ private byte[] pushStreamableObjectPart(int dispatch_id, byte[] command) throws IOException { byte type = command[8]; long object_id = ByteArrayUtil.getLong(command, 9); long object_length = ByteArrayUtil.getLong(command, 17); int length = ByteArrayUtil.getInt(command, 25); byte[] ob_buf = new byte[length]; System.arraycopy(command, 29, ob_buf, 0, length); long offset = ByteArrayUtil.getLong(command, 29 + length); try { // Pass this through to the underlying database interface. db_interface.pushStreamableObjectPart(type, object_id, object_length, ob_buf, offset, length); // Return operation success. return simpleSuccess(dispatch_id); } catch (SQLException e) { return exception(dispatch_id, e); } } /** * Responds with a part of the result set of a query made via the 'QUERY' * command. *

* 'dispatch_id' is the number we need to respond with. */ private byte[] resultSection(int dispatch_id, byte[] command) throws IOException { int result_id = ByteArrayUtil.getInt(command, 8); int row_number = ByteArrayUtil.getInt(command, 12); int row_count = ByteArrayUtil.getInt(command, 16); try { // Get the result part... ResultPart block = db_interface.getResultPart(result_id, row_number, row_count); ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(dispatch_id); dout.writeInt(SUCCESS); // Send the contents of the result set. // HACK - Work out column count by dividing number of entries in block // by number of rows. int col_count = block.size() / row_count; dout.writeInt(col_count); int bsize = block.size(); for (int index = 0; index < bsize; ++index) { ObjectTransfer.writeTo(dout, block.elementAt(index)); } return bout.toByteArray(); } catch (SQLException e) { return exception(dispatch_id, e); } } /** * Returns a section of a streamable object. *

* 'dispatch_id' is the number we need to respond with. */ private byte[] streamableObjectSection(int dispatch_id, byte[] command) throws IOException { int result_id = ByteArrayUtil.getInt(command, 8); long streamable_object_id = ByteArrayUtil.getLong(command, 12); long offset = ByteArrayUtil.getLong(command, 20); int length = ByteArrayUtil.getInt(command, 28); try { StreamableObjectPart ob_part = db_interface.getStreamableObjectPart(result_id, streamable_object_id, offset, length); ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream(bout); dout.writeInt(dispatch_id); dout.writeInt(SUCCESS); byte[] buf = ob_part.getContents(); dout.writeInt(buf.length); dout.write(buf, 0, buf.length); return bout.toByteArray(); } catch (SQLException e) { return exception(dispatch_id, e); } } /** * Disposes of a streamable object. *

* 'dispatch_id' is the number we need to respond with. */ private byte[] disposeStreamableObject(int dispatch_id, byte[] command) throws IOException { int result_id = ByteArrayUtil.getInt(command, 8); long streamable_object_id = ByteArrayUtil.getLong(command, 12); try { // Pass this through to the underlying database interface. db_interface.disposeStreamableObject(result_id, streamable_object_id); // Return operation success. return simpleSuccess(dispatch_id); } catch (SQLException e) { return exception(dispatch_id, e); } } /** * Disposes of a result set we queries via the 'QUERY' command. *

* 'dispatch_id' is the number we need to respond with. */ private byte[] disposeResult(int dispatch_id, byte[] command) throws IOException { // Get the result id. int result_id = ByteArrayUtil.getInt(command, 8); try { // Dispose the table. db_interface.disposeResult(result_id); // Return operation success. return simpleSuccess(dispatch_id); } catch (SQLException e) { return exception(dispatch_id, e); } } // ---------- Abstract methods ---------- /** * Sends an event to the client. This is used to notify the client of * trigger events, etc. *

* SECURITY ISSUE: This is always invoked by the DatabaseDispatcher. We * have to be careful that this method isn't allowed to block. Otherwise * the DatabaseDispatcher thread will be out of operation. Unfortunately * assuring this may not be possible until Java has non-blocking IO, or we * use datagrams for transmission. I know for sure that the TCP * implementation is vunrable. If the client doesn't 'read' what we are * sending then this'll block when the buffers become full. */ public abstract void sendEvent(byte[] event_msg) throws IOException; /** * Closes the connection with the client. */ public abstract void close() throws IOException; /** * Returns true if the connection to the client is closed. */ public abstract boolean isClosed() throws IOException; // ---------- Finalize ---------- public final void finalize() throws Throwable { super.finalize(); try { dispose(); } catch (Throwable e) { /* ignore */ } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/MultiThreadedConnectionPoolServer.java000066400000000000000000000147501330501023400335140ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.MultiThreadedConnectionPoolServer 21 Jun 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.User; import com.mckoi.database.Database; import com.mckoi.database.DatabaseSystem; import com.mckoi.debug.*; import java.io.IOException; import java.util.ArrayList; import java.util.LinkedList; import java.util.ResourceBundle; /** * A multi-threaded implementation of a connection pool server. This starts * a new thread for each connection made and processes each command as they * arrive. * * @author Tobias Downer */ final class MultiThreadedConnectionPoolServer implements ConnectionPoolServer { /** * If this is set to true then the server periodically outputs statistics * about the connections. */ private static final boolean DISPLAY_STATS = false; /** * The Database parent. */ private Database database; /** * The list of all threads that were created to deal with incoming * commands. */ private ArrayList client_threads; /** * The Constructor. The argument is the configuration file. */ MultiThreadedConnectionPoolServer(Database database) { this.database = database; client_threads = new ArrayList(); } /** * Returns a DebugLogger object that we can log debug messages to. */ public final DebugLogger Debug() { return database.Debug(); } /** * Connects a new ServerConnection into the pool of connections to clients * that this server maintains. We then cycle through these connections * determining whether any commands are pending. If a command is pending * we spawn off a worker thread to do the task. */ public void addConnection(ServerConnection connection) { ClientThread client_thread = new ClientThread(connection); synchronized(client_threads) { client_threads.add(client_thread); } client_thread.start(); } /** * Closes this connection pool server down. */ public void close() { synchronized (client_threads) { int size = client_threads.size(); for (int i = 0; i < size; ++i) { ((ClientThread) client_threads.get(i)).close(); } } } // ---------- Inner classes ---------- /** * This thread blocks waiting for a complete command to arrive from the * client it is connected to. */ private class ClientThread extends Thread { /** * The ServerConnection object being serviced by this thread. */ private ServerConnection server_connection; /** * If this is set to true, the thread run method should close off. */ private boolean client_closed; /** * This is set to true if we are processing a request from the client. */ private boolean processing_command; /** * The Constructor. */ public ClientThread(ServerConnection connection) { super(); // setPriority(NORM_PRIORITY - 1); setName("Mckoi - Client Connection"); this.server_connection = connection; client_closed = false; processing_command = false; } /** * Checks each connection in the 'service_connection_list' list. If there * is a command pending, and any previous commands on this connection have * completed, then this will spawn off a new process to deal with the * command. */ private void checkCurrentConnection() throws InterruptedException { try { // Wait if we are processing a command synchronized(this) { while (processing_command) { if (client_closed) { return; } // Wait 2 minutes just to make sure we don't miss a poll, wait(120000); } } // Block until a complete command is available server_connection.blockForRequest(); processing_command = true; database.execute(null, null, new Runnable() { public void run() { try { // Process the next request that's pending. server_connection.processRequest(); } catch (IOException e) { Debug().writeException(Lvl.INFORMATION, e); } finally { // Not processing a command anymore so notify the ClientThread processing_command = false; synchronized (ClientThread.this) { ClientThread.this.notifyAll(); } } } }); } catch (IOException e) { // If an IOException is generated, we must remove this provider from // the list. close(); // This happens if the connection closes. Debug().write(Lvl.INFORMATION, this, "IOException generated while checking connections, " + "removing provider."); Debug().writeException(Lvl.INFORMATION, e); } } /** * Call this method to stop the thread. */ public synchronized void close() { client_closed = true; try { server_connection.close(); } catch (Throwable e) { // ignore } notifyAll(); } /** * The Runnable method of the farmer thread. */ public void run() { while (true) { try { boolean closed = false; synchronized (this) { closed = client_closed; } // Exit if the farmer thread has been closed... if (closed == true) { // Remove this thread from the list of client threads. synchronized(client_threads) { client_threads.remove(this); } return; } checkCurrentConnection(); } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Connection Pool Farmer Error"); Debug().writeException(e); } } } }; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/ServerConnection.java000066400000000000000000000061071330501023400302030ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.ServerConnection 21 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.Database; import com.mckoi.database.User; import java.io.IOException; /** * A server side connection with a client. Each client that is connected * to the database has a ServerConnection object. * * @author Tobias Downer */ interface ServerConnection { /** * This should return true if it has been determined that there is an * entire command waiting to be serviced on this connection. This method * is always run on the same thread for all connections. It is called * many times a second by the connection pool server so it must execute * extremely fast. *

* ISSUE: Method is polled! Unfortunately can't get around this because * of the limitation in Java that TCP connections must block on a thread, * and we can't block if we are to be servicing 100+ connections. */ boolean requestPending() throws IOException; /** * Processes a pending command on the connection. This method is called * from a database worker thread. The method will block until a request * has been received and processed. Note, it is not desirable is some * cases to allow this method to block. If a call to 'requestPending' * returns true then then method is guarenteed not to block. *

* The first call to this method will handle the hand shaking protocol * between the client and server. *

* While this method is doing something, it can not be called again even * if another request arrives from the client. All calls to this method * are sequential. This method will only be called if the 'ping' method is * not currently being processed. */ void processRequest() throws IOException; /** * Blocks until a complete command is available to be processed. This is * used for a blocking implementation. As soon as this method returns then * a call to 'processRequest' will process the incoming command. */ void blockForRequest() throws IOException; /** * Pings the connection. This is used to determine if the connection is * alive or not. If it's not, we should throw an IOException. *

* This method will only be called if the 'processRequest' method is not * being processed. */ void ping() throws IOException; /** * Closes this connection. */ void close() throws IOException; } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/SingleThreadedConnectionPoolServer.java000066400000000000000000000354561330501023400336510ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.SingleThreadedConnectionPoolServer 22 Jun 2001 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.User; import com.mckoi.database.Database; import com.mckoi.database.DatabaseSystem; import com.mckoi.debug.*; import java.io.IOException; import java.util.ArrayList; import java.util.LinkedList; import java.util.ResourceBundle; /** * A generic database server class that provides a thread that dispatches * commands to the underlying database. This class only provides a framework * for creating a server. It doesn't provide any implementation * specifics for protocols. *

* An TCP implementation of this class would wait for connections and then * create a ServerConnection implementation and feed it into the pool for * processing. This object will then poll the ServerConnection until a * command is pending, and then dispatch the command to a database worker * thread. *

* This object will ping the clients every so often to see if they are alive. * * @author Tobias Downer */ final class SingleThreadedConnectionPoolServer implements ConnectionPoolServer { /** * The number of milliseconds between client pings. * NOTE: Should this be a configurable variable in the '.conf' file? * (45 seconds) */ private static final int PING_BREAK = 45 * 1000; //4 * 60 * 1000; /** * If this is set to true then the server periodically outputs statistics * about the connections. */ private static final boolean DISPLAY_STATS = false; /** * The Database context. */ private Database database; /** * The list of ServerConnection objects that are pending to be added into the * current service provider list next time it is checked. */ private ArrayList pending_connections_list; /** * The ServerFarmer object that polls for information from the clients and * dispatches the request to the worker threads. */ private ServerFarmer farmer; /** * The Constructor. The argument is the configuration file. */ SingleThreadedConnectionPoolServer(Database database) { this.database = database; pending_connections_list = new ArrayList(); // Create the farmer thread that services all the connections. farmer = new ServerFarmer(); farmer.start(); } /** * Returns a DebugLogger object that we can log debug messages to. */ public final DebugLogger Debug() { return database.Debug(); } /** * Connects a new ServerConnection into the pool of connections to clients * that this server maintains. We then cycle through these connections * determining whether any commands are pending. If a command is pending * we spawn off a worker thread to do the task. */ public void addConnection(ServerConnection connection) { synchronized(pending_connections_list) { pending_connections_list.add(connection); } } /** * Closes this connection pool server down. */ public void close() { farmer.close(); } // ---------- Inner classes ---------- /** * This thread is a low priority thread that checks all the current service * providers periodically to determine if there's any commands pending. */ private class ServerFarmer extends Thread { /** * The list of ServerConnection objects that are currently being serviced * by this server. */ private ArrayList server_connections_list; /** * Staticial information collected. */ private int stat_display = 0; private int commands_run = 0; private int commands_waited = 0; /** * If this is set to true, then the farmer run method should close off. */ private boolean farmer_closed; /** * The number of milliseconds to wait between each poll of the 'available' * method of the socket. This value is determined by the configuration * file during initialization. */ private int poll_wait_time; /** * The Constructor. */ public ServerFarmer() { super(); // setPriority(NORM_PRIORITY - 1); // The time in ms between each poll of the 'available' method. // Default is '3 ms' poll_wait_time = 3; server_connections_list = new ArrayList(); farmer_closed = false; } /** * Establishes a connection with any current pending connections in the * 'pending_connections_list'. */ private void establishPendingConnections() throws IOException { synchronized (pending_connections_list) { int len = pending_connections_list.size(); // Move all pending connections into the current connections list. for (int i = 0; i < len; ++i) { // Get the connection and create the new connection state ServerConnection connection = (ServerConnection) pending_connections_list.remove(0); server_connections_list.add(new ServerConnectionState(connection)); } } } /** * Checks each connection in the 'service_connection_list' list. If there * is a command pending, and any previous commands on this connection have * completed, then this will spawn off a new process to deal with the * command. */ private void checkCurrentConnections() { int len = server_connections_list.size(); for (int i = len - 1; i >= 0; --i) { ServerConnectionState connection_state = (ServerConnectionState) server_connections_list.get(i); try { // Is this connection not currently processing a command? if (!connection_state.isProcessingRequest()) { ServerConnection connection = connection_state.getConnection(); // Does this connection have a request pending? if (connection_state.hasPendingCommand() || connection.requestPending()) { // Set that we have a pending command connection_state.setPendingCommand(); connection_state.setProcessingRequest(); final ServerConnectionState current_state = connection_state; // // Execute this on a database worker thread. // final User conn_user = connection.getUser(); // DatabaseSystem.execute(conn_user, connection.getDatabase(), // new Runnable() { database.execute(null, null, new Runnable() { public void run() { try { // Process the next request that's pending. current_state.getConnection().processRequest(); } catch (IOException e) { Debug().writeException(Lvl.INFORMATION, e); } finally { // Then clear the state // This makes sure that this provider may accept new // commands again. current_state.clearInternal(); } } }); } // if (provider_state.hasPendingCommand() .... } // if (!provider_state.isProcessRequest()) } catch (IOException e) { // If an IOException is generated, we must remove this provider from // the list. try { connection_state.getConnection().close(); } catch (IOException e2) { /* ignore */ } server_connections_list.remove(i); // This happens if the connection closes. Debug().write(Lvl.INFORMATION, this, "IOException generated while checking connections, " + "removing provider."); Debug().writeException(Lvl.INFORMATION, e); } } } /** * Performs a ping on a single random connection. If the ping fails then * the connection is closed. */ private void doPings() { int len = server_connections_list.size(); if (len == 0) { if (DISPLAY_STATS) { System.out.print("[TCPServer Stats] "); System.out.println("Ping tried but no connections."); } return; } int i = (int) (Math.random() * len); if (DISPLAY_STATS) { System.out.print("[TCPServer Stats] "); System.out.print("Pinging client "); System.out.print(i); System.out.println("."); } final ServerConnectionState connection_state = (ServerConnectionState) server_connections_list.get(i); // Is this provider not currently processing a command? if (!connection_state.isProcessingRequest()) { // Don't let a command interrupt the ping. connection_state.setProcessingRequest(); // ISSUE: Pings are executed under 'null' user and database... database.execute(null, null, new Runnable() { public void run() { try { // Ping the client? - This closes the provider if the // ping fails. connection_state.getConnection().ping(); } catch (IOException e) { // Close connection try { connection_state.getConnection().close(); } catch (IOException e2) { /* ignore */ } Debug().write(Lvl.ALERT, ServerFarmer.this, "Closed because ping failed."); Debug().writeException(Lvl.ALERT, e); } finally { connection_state.clearProcessingRequest(); } } }); } // if (!provider_state.isProcessRequest()) } /** * Displays statistics about the server. */ private void displayStatistics() { if (DISPLAY_STATS) { if (stat_display == 0) { stat_display = 500; System.out.print("[TCPServer Stats] "); System.out.print(commands_run); System.out.print(" run, "); System.out.print(commands_waited); System.out.print(" wait, "); System.out.print(server_connections_list.size()); System.out.print(" worker count"); System.out.println(); } else { --stat_display; } } } /** * Call this method to stop the farmer thread. */ public synchronized void close() { farmer_closed = true; } /** * The Runnable method of the farmer thread. */ public void run() { int yield_count = 0; long do_ping_time = System.currentTimeMillis() + PING_BREAK; int ping_count = 200; final int method_poll_wait_time = poll_wait_time; Debug().write(Lvl.MESSAGE, this, "Polling frequency: " + method_poll_wait_time + "ms."); while (true) { try { // First, determine if there are any pending service providers // waiting to be established. if (pending_connections_list.size() > 0) { establishPendingConnections(); } checkCurrentConnections(); // Is it time to ping the clients? --ping_count; if (ping_count <= 0) { ping_count = 2000; long current_time = System.currentTimeMillis(); if (current_time > do_ping_time) { // Randomly ping doPings(); do_ping_time = current_time + PING_BREAK; } } if (yield_count <= 0) { synchronized (this) { // Wait for 3ms to give everything room to breath wait(method_poll_wait_time); yield_count = 3; } } else { synchronized (this) { // Exit if the farmer thread has been closed... if (farmer_closed == true) { return; } } Thread.yield(); --yield_count; } // Print out connection statistics every so often displayStatistics(); } catch (Throwable e) { Debug().write(Lvl.ERROR, this, "Connection Pool Farmer Error"); Debug().writeException(e); // Wait for two seconds (so debug log isn't spammed) synchronized (this) { try { wait(2000); } catch (InterruptedException e2) { /* ignore */ } } } } } }; /** * This contains state information about a ServerConnection that is being * maintained by the server. */ private final class ServerConnectionState { /** * The local variables. */ private ServerConnection connection; // private boolean is_establish; private boolean is_processing_request; private boolean is_pending_command; private boolean is_ping_client; /** * The Constructor. */ ServerConnectionState(ServerConnection connection) { this.connection = connection; clearInternal(); // is_establish = true; } /** * Sets the various states to true. */ public synchronized void setProcessingRequest() { is_processing_request = true; } public synchronized void setPendingCommand() { is_pending_command = true; } public synchronized void setPingClient() { is_ping_client = true; } /** * Clears the internal state. */ public synchronized void clearInternal() { is_processing_request = false; is_pending_command = false; // is_establish = false; is_ping_client = false; } /** * Clears the flag that says we are processing a request. */ public synchronized void clearProcessingRequest() { is_processing_request = false; } /** * Queries the internal state. */ public synchronized ServerConnection getConnection() { return connection; } public synchronized boolean isProcessingRequest() { return is_processing_request; } public synchronized boolean hasPendingCommand() { return is_pending_command; } // public synchronized boolean isEstablishConnection() { // return is_establish; // } public synchronized boolean isPingClient() { return is_ping_client; } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/StreamJDBCServerConnection.java000066400000000000000000000123421330501023400320000ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.StreamJDBCServerConnection 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.debug.DebugLogger; import com.mckoi.database.Database; import com.mckoi.database.jdbc.ProtocolConstants; import com.mckoi.database.jdbc.DatabaseInterface; import com.mckoi.util.LengthMarkedBufferedInputStream; import java.io.*; /** * A generic JDBC stream protocol server that reads JDBC commands from a * stream from each connection and dispatches the commands appropriately. * * @author Tobias Downer */ abstract class StreamJDBCServerConnection extends JDBCProcessor implements ServerConnection { /** * The size in bytes of the buffer used for writing information onto the * output stream to the client. */ private static final int OUTPUT_BUFFER_SIZE = 32768; /** * The size in bytes of the buffer used for reading information from the * input stream from the client. */ private static final int INPUT_BUFFER_SIZE = 16384; /** * The LengthMarkedBufferedInputStream we use to poll for commands from the * client. */ private LengthMarkedBufferedInputStream marked_input; /** * The output stream to the client formatted as a DataOutputStream. */ private DataOutputStream out; /** * Sets up the protocol connection. */ StreamJDBCServerConnection(DatabaseInterface db_interface, InputStream in, OutputStream out, DebugLogger logger) throws IOException { super(db_interface, logger); this.marked_input = new LengthMarkedBufferedInputStream(in); this.out = new DataOutputStream( new BufferedOutputStream(out, OUTPUT_BUFFER_SIZE)); } // ---------- Implemented from JDBCConnection ---------- // NOTE: There's a security issue for this method. See JDBCProcessor // for the details. public void sendEvent(byte[] event_msg) throws IOException { synchronized (out) { // Command length... out.writeInt(4 + 4 + event_msg.length); // Dispatch id... out.writeInt(-1); // Command id... out.writeInt(ProtocolConstants.DATABASE_EVENT); // The message... out.write(event_msg, 0, event_msg.length); // Flush command to server. out.flush(); } } // ---------- Implemented from ServerConnection ---------- /** * Inspects the input stream and determines in there's a command pending * to be processed. */ public boolean requestPending() throws IOException { int state = getState(); if (state == 100) { return marked_input.pollForCommand(Integer.MAX_VALUE); } else { return marked_input.pollForCommand(256); } } /** * Processes a request from this connection. */ public void processRequest() throws IOException { // Only allow 8 commands to execute in sequence before we free this // worker to the worker pool. // We have a limit incase of potential DOS problems. int sequence_limit = 8; // Read the command into a 'byte[]' array and pass to the command // processor. int com_length = marked_input.available(); while (com_length > 0) { byte[] command = new byte[com_length]; int read_index = 0; while (read_index < com_length) { read_index += marked_input.read(command, read_index, (com_length - read_index)); } // Process the command byte[] response = processJDBCCommand(command); if (response != null) { synchronized (out) { // Write the response to the client. out.writeInt(response.length); out.write(response); out.flush(); } } // If there's another command pending then process that one also, com_length = 0; if (sequence_limit > 0) { if (requestPending()) { com_length = marked_input.available(); --sequence_limit; } } } // while (com_length > 0) // // Response... // printByteArray(response); } /** * Block waiting for a complete command to become available. */ public void blockForRequest() throws IOException { marked_input.blockForCommand(); } /** * Pings the client to check it's still alive. */ public void ping() throws IOException { synchronized (out) { // Command length... out.writeInt(8); // Dispatch id... out.writeInt(-1); // Ping command id... out.writeInt(ProtocolConstants.PING); // Flush command to server. out.flush(); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/TCPJDBCServerConnection.java000066400000000000000000000040611330501023400311720ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.TCPJDBCServerConnection 22 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.jdbc.DatabaseInterface; import com.mckoi.debug.DebugLogger; import java.net.Socket; import java.io.*; /** * A ServerConnection that processes JDBC queries from a client from a * TCP Socket. * * @author Tobias Downer */ final class TCPJDBCServerConnection extends StreamJDBCServerConnection { /** * The socket connection with the client. */ private Socket connection; /** * Is set to true when the connection to the client is closed. */ private boolean is_closed = false; /** * Constructs the ServerConnection object. */ TCPJDBCServerConnection(DatabaseInterface db_interface, Socket socket, DebugLogger logger) throws IOException { super(db_interface, socket.getInputStream(), socket.getOutputStream(), logger); this.connection = socket; } /** * Completely closes the connection to the client. */ public void close() throws IOException { try { // Dispose the processor dispose(); } catch (Throwable e) { e.printStackTrace(); } // Close the socket connection.close(); is_closed = true; } /** * Returns true if the connection to the client has been closed. */ public boolean isClosed() throws IOException { return is_closed; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/TCPServer.java000066400000000000000000000167061330501023400265400ustar00rootroot00000000000000/** * com.mckoi.database.jdbcserver.TCPServer 21 Jul 2000 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.jdbcserver; import com.mckoi.database.DatabaseSystem; import com.mckoi.database.Database; import com.mckoi.debug.*; import java.net.ServerSocket; import java.net.Socket; import java.net.InetAddress; import java.io.IOException; import java.util.HashMap; import java.util.ResourceBundle; /** * A TCP/IP socket server that opens a single port and allows JDBC clients * to connect through the port to talk with the database. * * @author Tobias Downer */ public final class TCPServer { /** * The parent Database object that describes everything about the * database this TCP server is for. */ private Database database; /** * The ConnectionPoolServer that polls the ServerConnection for new commands * to process. */ private ConnectionPoolServer connection_pool; /** * The ServerSocket object where the database server is bound. */ private ServerSocket server_socket; /** * The InetAddress the JDBC server is bound to. */ private InetAddress address; /** * The port the JDBC server is on. */ private int port; /** * The connection pool model used for this server. */ private String connection_pool_model; /** * Constructs the TCPServer over the given DatabaseSystem configuration. */ public TCPServer(Database database) { this.database = database; } /** * Returns a DebugLogger object that we can log debug messages to. */ public final DebugLogger Debug() { return database.Debug(); } /** * Returns the port the JDBC server is on. */ public int getJDBCPort() { return port; } /** * Checks to see if there's already something listening on the jdbc * port. Returns true if the jdbc port in the configuration is available, * otherwise returns false. */ public boolean checkAvailable(InetAddress bind_address, int tcp_port) { // MAJOR MAJOR HACK: We attempt to bind to the JDBC Port and if we get // an error then most likely there's already a database running on this // host. int port = tcp_port; // // Get information about how to set up the TCP port from the config // // bundle. // int port = Integer.parseInt(config.getString("jdbc_server_port")); try { // Bind the ServerSocket objects to the ports. server_socket = new ServerSocket(port, 50, bind_address); server_socket.close(); } catch (IOException e) { // If error then return false. return false; } return true; } /** * Starts the server running. This method returns immediately but spawns * its own thread. */ public void start(InetAddress bind_address, int tcp_port, String connection_pool_model) { this.address = bind_address; this.port = tcp_port; this.connection_pool_model = connection_pool_model; // // Get information about how to set up the TCP port from the config // // bundle. // port = Integer.parseInt(config.getString("jdbc_server_port")); // // // The 'tcp_connection_pool_thread_model' property determines the // // connection pool object to use. // connection_pool_model = "multi_threaded"; // try { // String cptm = config.getString("tcp_connection_pool_thread_model"); // if (cptm.equalsIgnoreCase("single_threaded")) { // connection_pool_model = "single_threaded"; // } // // Multi-threaded if 'tcp_connection_pool_thread_model' is anything // // other than 'single_threaded' // } // catch (java.util.MissingResourceException e) { // // If no property in the config assume multi-threaded // } // Choose our connection pool implementation if (connection_pool_model.equals("multi_threaded")) { this.connection_pool = new MultiThreadedConnectionPoolServer(database); } else if (connection_pool_model.equals("single_threaded")) { this.connection_pool = new SingleThreadedConnectionPoolServer(database); } try { // Bind the ServerSocket object to the port. server_socket = new ServerSocket(port, 50, bind_address); server_socket.setSoTimeout(0); } catch (IOException e) { Debug().writeException(e); Debug().write(Lvl.ERROR, this, "Unable to start a server socket on port: " + port); throw new Error(e.getMessage()); } // This thread blocks on the server socket. Thread listen_thread = new Thread() { public void run() { try { // Accept new connections and notify when one arrives while(true) { Socket s = server_socket.accept(); portConnection(s); } } catch (IOException e) { Debug().writeException(Lvl.WARNING, e); Debug().write(Lvl.WARNING, this, "Socket listen thread died."); } } }; // listen_thread.setDaemon(true); listen_thread.setName("Mckoi - TCP/IP Socket Accept"); listen_thread.start(); } /** * Called whenever a new connection has been received on the port. */ private void portConnection(Socket socket) throws IOException { // TCP connections are formatted as; // 'TCP/[ip address]:[remote port]:[local port]' String host_string = "TCP/" + socket.getInetAddress().getHostAddress() + ":" + socket.getPort() + "@" + socket.getLocalAddress().getHostAddress() + ":" + socket.getLocalPort(); // String host_string = // "Host [" + socket.getInetAddress().getHostAddress() + "] " + // "port=" + socket.getPort() + " localport=" + socket.getLocalPort(); // Make a new DatabaseInterface for this connection, JDBCDatabaseInterface db_interface = new JDBCDatabaseInterface(database, host_string); TCPJDBCServerConnection connection = new TCPJDBCServerConnection(db_interface, socket, Debug()); // Add the provider onto the queue of providers that are serviced by // the server. connection_pool.addConnection(connection); } /** * Closes the JDBC Server. */ public void close() { if (server_socket != null) { try { server_socket.close(); } catch (IOException e) { Debug().write(Lvl.ERROR, this, "Error closing JDBC Server."); Debug().writeException(e); } } connection_pool.close(); } /** * Returns human understandable information about the server. */ public String toString() { StringBuffer buf = new StringBuffer(); buf.append("TCP JDBC Server ("); buf.append(connection_pool_model); buf.append(") on "); if (address != null) { buf.append(address.getHostAddress()); buf.append(" "); } buf.append("port: "); buf.append(getJDBCPort()); return new String(buf); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/jdbcserver/package.html000066400000000000000000000212311330501023400263260ustar00rootroot00000000000000 com.mckoi.database.jdbcserver - Server and embedded-database classes

Implementation of the DatabaseInterface interface, including classes to handle local (embedded) mode and remote (client/server) mode.

See DatabaseInterface.

Local (Embedded) mode

When accessing a local database, the client uses an instance of LocalDatabaseInterface (as set up while Establishing a Connection) to directly access the underlying database. See Local Queries.

Remote (Client/Server) mode

When accessing a remote database, requests in the client are directed to an instance of JDBCDatabaseInterface (as set up while Establishing a Connection) to send the requests over a TCP connection to the database server. The server uses an instance of LocalDatabaseInterface to execute SQL statements in local mode on behalf of the client. See Remote Queries.

Local Queries

When an application is accessing a local database, or when considering query processing within the database server, SQL queries all go through LocalDatabaseInterface.execQuery. This method uses com.mckoi.database.DatabaseSystem.execute to execute the execQuery method (see Query Execution) of the contained JDBCDatabaseInterface in the first available WorkerThread. The calling thread waits for the WorkerThread to finish and return a QueryResponse, which is used by the caller to set up a MResultSet.

Remote Queries

When an application is running in remote mode, SQL queries are processed by the private ConnectionThread class within
com.mckoi.jdbc.RemoteDatabaseInterface. The executeQuery method in this class writes the sql query string to the socket attached to the server and returns an integer ID for that sql request. This is followed by a call to the getCommand method, which blocks until it times out or until it gets a reply from the server for the right ID. If a reply is received, the first byte is decoded as a status byte. On success, an inner-class instance of QueryResponse is returned; on error, error info is read from the response and used to create and throw an exception.

Data is written to the server in the calling thread. There is a separate thread (an instance of the private class RemoteDatabaseInterface.ConnectionThread in RemoteDatabaseInterface) to read the data coming back from the server, which is placed into a buffer where it can be accessed by other threads.

Query Execution

Queries in local mode and queries handled by the database server are both processed by JDBCDatabaseInterface.execQuery. This method always runs in a WorkerThread. The execQuery method takes these steps to execute the query:

Service Initialization

After the database server has been started and initialized its database, it begins listening for client connections. This happens in TCPServer.start. The server creates a ConnectionPoolServer, either a SingleThreadedConnectionPoolServer (when running in single-thead mode) or a MultiThreadedConnectionPoolServer (when running in multi-thread mode). The start method starts a listener thread and then returns.

The server listener thread sits in a loop blocked waiting for new connections. When a new connection is accepted, the server creates a new JDBCDatabaseInterface for that connection, wraps it in a TCPJDBCServerConnection, and adds that to the ConnectionPoolServer.

Client Requests

In single-thread mode, the SingleThreadedConnectionPoolServer handles all client reads in a single thread. For each connection with a request, it calls TCPJDBCServerConnection.processRequest on that connection, using com.mckoi.database.DatabaseSystem.execute to execute it in a WorkerThread.

In multi-thread mode, the MultiThreadedConnectionPoolServer starts a separate thread to handle reads for each connection. Each thread blocks until it has a request available, then calls TCPJDBCServerConnection.processRequest on that connection, using com.mckoi.database.DatabaseSystem.execute to execute it in a WorkerThread.

In either single-thread or multi-thread mode, client requests end up getting passed to TCPJDBCServerConnection.processRequest. This method reads the command data, then calls JDBCProcessor.processJDBCCommand with those bytes. That method calls JDBCProcessor.processQuery, which looks at the first byte of the requests to see what it is. If the request type is a database query (as opposed to retrieval of part of a ResultSet), it creates a SQLQuery from the data and calls execQuery on its JDBCDatabaseInterface (see Query Execution above). The result is encoded as a QueryResponse, which is converted back to bytes and written back to the client. Note that this is all running in a WorkerThread, so the call to DatabaseInterface.execQuery is in a WorkerThread, as required.

If there is an exception during processing of a client request, the message and traceback for the exception are encoded and returned to the client, which recognizes the packet as an exception, decodes the data on its side, and throws an exception in the client. mckoisqldb-1.0.6/src/main/java/com/mckoi/database/package.html000066400000000000000000000220241330501023400241760ustar00rootroot00000000000000 com.mckoi.database - The core database classes for Mckoi

The core database classes for Mckoi.

Data Representation

A database is represented by a single TableDataConglomerate, which contains a collection of MasterTableDataSource objects, each of which represents a single table, including both committed and uncommitted rows.

Access to a table is usually through the TableDataSource interface, or its subclass MutableTableDataSource The MasterTableDataSource objects wrap themselves in an instance of the private class MasterTableDataSource.MMutableTableDataSource, which implements MutableTableDataSource.

The format of a table is defined by DataTableDef, which is returned by TableDataSource.getDataTableDef, and which does not change during the lifetime of the table; if the table format is changed, a new MasterTableDataSource is created within the Transaction with the new format, and the data and unchanged columns from the old table are copied to the new one. The format of a column is defined by DataTableColumnDef.

Modifications to a row of a table are handled by a RowData, which carries a reference to the TableDataSource which it modified, and from which data is retrieved. Each column within the row is represented by a DataCell, which is either retrieved from the table or created by DataCellFactory.

Transactions

A database can be associated with one or more simultaneous Transaction objects. All changes to the data in the database are done through Transactions. The current set of open Transaction objects is managed by an OpenTransactionList object, which is pointed to by both the TableDataConglomerate and all of the MasterTableDataSource objects.

Changes to a row are handled by creating a new row in the MasterTableDataSource containing the changed data plus any old data which is unchanged. The data for the modified row are kept in a RowData, which references a QueryContext, which references a DatabaseConnection, which references a Transaction, which is the Transaction under which that change was made. Each field of the row is represented by a DataCell.

When an application issues an SQL request to update the database, it eventually makes it down to Statement.evaluate, for example in Insert. That evaluate method uses its DatabaseConnection to get a DataTable for a table name, which is a wrapper around the MutableTableDataSource returned by the DatabaseConnection's Transaction for the table of that name. The MutableTableDataSource (created by Transaction.getTable) is in turn a wrapper around a MasterTableDataSource created using the private class MasterTableDataSource.MMutableTableDataSource. The Statement uses its DataTable to create a RowData, then passes it to its DataTable, which passes it to the MutableTableDataSource, which passes it to the MasterTableDataSource, which actually makes the change.

The Transaction maintains a TransactionJournal, in which are listed all of the tables which have been changed by the Transaction.

Eventually the Transaction is closed (committed or rolled back), which is handled by the TableDataConglomerate ( processCommit or processRollback), which, for each MasterTableDataSource, gets a MasterTableJournal for it from the TransactionJournal specifying what changes have been made in that table from this Transaction, and tells that MasterTableDataSource to commit or roll back the changes in that MasterTableJournal.

Locking

Locking is used to control concurrent access by two requests in the same Transaction. This is handled during query execution in JDBCDatabaseInterface.execQuery

Each DatabaseConnection has associated with it a single LockingMechanism object, which is used to lock and unlock the DatabaseConnection's Transaction as a whole. The active lock is represented by a LockHandle, which is returned by LockingMechanism.lockTables, and which is passed back to LockingMechanism.unlockTables to drop the lock.

A lock on an individual table is represented by a Lock, which is kept in a LockingQueue, which maintains the link to the locked table.

Update Sequence

When a change is made to the database (insert, update, or delete), the following list shows the sequence in which various steps are taken:

mckoisqldb-1.0.6/src/main/java/com/mckoi/database/procedure/000077500000000000000000000000001330501023400237055ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/procedure/SystemBackup.java000066400000000000000000000033371330501023400271700ustar00rootroot00000000000000/** * com.mckoi.database.procedure.SystemBackup 27 Feb 2003 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.procedure; import com.mckoi.database.ProcedureConnection; import com.mckoi.database.ProcedureException; import java.io.File; import java.io.IOException; /** * A stored procedure that backs up the entire database to the given directory * in the file system. Requires one parameter, the locate to back up the * database to. * * @author Tobias Downer */ public class SystemBackup { /** * The stored procedure invokation method. */ public static String invoke(ProcedureConnection db_connection, String path) { File f = new File(path); if (!f.exists() || !f.isDirectory()) { throw new ProcedureException("Path '" + path + "' doesn't exist or is not a directory."); } try { db_connection.getDatabase().liveCopyTo(f); return path; } catch (IOException e) { e.printStackTrace(); throw new ProcedureException("IO Error: " + e.getMessage()); } } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/regexbridge/000077500000000000000000000000001330501023400242045ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/regexbridge/JavaRegex.java000066400000000000000000000073041330501023400267270ustar00rootroot00000000000000/** * com.mckoi.database.regexbridge.JavaRegex 06 Mar 2002 * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000-2018 Diehl and Associates, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.mckoi.database.regexbridge; import com.mckoi.database.Table; import com.mckoi.database.TObject; import com.mckoi.util.IntegerVector; import java.util.regex.*; /** * A bridge to the internal Java regular expression library that was introduced * in Java 1.4. This bridge will only work if the regular expression API * is available in the class library. It is not available in 1.3 and 1.2. * * @author Tobias Downer */ public class JavaRegex implements com.mckoi.database.RegexLibrary { public boolean regexMatch(String regular_expression, String expression_ops, String value) { try { // PENDING: Compile and cache most commonly used regular expressions... int c_flags = 0; if (expression_ops != null) { if (expression_ops.indexOf('i') != -1) { c_flags += Pattern.CASE_INSENSITIVE; } if (expression_ops.indexOf('s') != -1) { c_flags += Pattern.DOTALL; } if (expression_ops.indexOf('m') != -1) { c_flags += Pattern.MULTILINE; } } Pattern pattern = Pattern.compile(regular_expression, c_flags); Matcher matcher = pattern.matcher(value); return matcher.matches(); } catch (PatternSyntaxException e) { // Incorrect syntax means we always match to false, return false; } } public IntegerVector regexSearch(Table table, int column, String regular_expression, String expression_ops) { // Get the ordered column, IntegerVector row_list = table.selectAll(column); // The result matched rows, IntegerVector result_list = new IntegerVector(); // Make into a new list that matches the pattern, Pattern pattern; try { // PENDING: Compile and cache most commonly used regular expressions... int c_flags = 0; if (expression_ops != null) { if (expression_ops.indexOf('i') != -1) { c_flags += Pattern.CASE_INSENSITIVE; } if (expression_ops.indexOf('s') != -1) { c_flags += Pattern.DOTALL; } if (expression_ops.indexOf('m') != -1) { c_flags += Pattern.MULTILINE; } } pattern = Pattern.compile(regular_expression, c_flags); } catch (PatternSyntaxException e) { // Incorrect syntax means we always match to an empty list, return result_list; } // For each row in the column, test it against the regular expression. int size = row_list.size(); for (int i = 0; i < size; ++i) { int row_index = row_list.intAt(i); TObject cell = table.getCellContents(column, row_index); // Only try and match against non-null cells. if (!cell.isNull()) { Object ob = cell.getObject(); String str = ob.toString(); // If the column matches the regular expression then return it, if (pattern.matcher(str).matches()) { result_list.addInt(row_index); } } } return result_list; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/regexbridge/package.html000066400000000000000000000005001330501023400264600ustar00rootroot00000000000000 com.mckoi.database.regexbridge - Implementations of the RegEx interface

Implementations of the RegexLibrary interface, which allows the application to select which regular expression package to use.

See RegexLibrary. mckoisqldb-1.0.6/src/main/java/com/mckoi/database/sql/000077500000000000000000000000001330501023400225145ustar00rootroot00000000000000mckoisqldb-1.0.6/src/main/java/com/mckoi/database/sql/ParseException.java000066400000000000000000000143371330501023400263200ustar00rootroot00000000000000/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 2.1 */ package com.mckoi.database.sql; /** * This exception is thrown when parse errors are encountered. * You can explicitly create objects of this exception type by * calling the method generateParseException in the generated * parser. * * You can modify this class to customize your error reporting * mechanisms so long as you retain the public fields. */ public class ParseException extends Exception { /** * This constructor is used by the method "generateParseException" * in the generated parser. Calling this constructor generates * a new object of this type with the fields "currentToken", * "expectedTokenSequences", and "tokenImage" set. The boolean * flag "specialConstructor" is also set to true to indicate that * this constructor was used to create this object. * This constructor calls its super class with the empty string * to force the "toString" method of parent class "Throwable" to * print the error message in the form: * ParseException: */ public ParseException(Token currentTokenVal, int[][] expectedTokenSequencesVal, String[] tokenImageVal ) { super(""); specialConstructor = true; currentToken = currentTokenVal; expectedTokenSequences = expectedTokenSequencesVal; tokenImage = tokenImageVal; } /** * The following constructors are for use by you for whatever * purpose you can think of. Constructing the exception in this * manner makes the exception behave in the normal way - i.e., as * documented in the class "Throwable". The fields "errorToken", * "expectedTokenSequences", and "tokenImage" do not contain * relevant information. The JavaCC generated code does not use * these constructors. */ public ParseException() { super(); specialConstructor = false; } public ParseException(String message) { super(message); specialConstructor = false; } /** * This variable determines which constructor was used to create * this object and thereby affects the semantics of the * "getMessage" method (see below). */ protected boolean specialConstructor; /** * This is the last token that has been consumed successfully. If * this object has been created due to a parse error, the token * followng this token will (therefore) be the first error token. */ public Token currentToken; /** * Each entry in this array is an array of integers. Each array * of integers represents a sequence of tokens (by their ordinal * values) that is expected at this point of the parse. */ public int[][] expectedTokenSequences; /** * This is a reference to the "tokenImage" array of the generated * parser within which the parse error occurred. This array is * defined in the generated ...Constants interface. */ public String[] tokenImage; /** * This method has the standard behavior when this object has been * created using the standard constructors. Otherwise, it uses * "currentToken" and "expectedTokenSequences" to generate a parse * error message and returns it. If this object has been created * due to a parse error, and you do not catch it (it gets thrown * from the parser), then this method is called during the printing * of the final stack trace, and hence the correct error message * gets displayed. */ public String getMessage() { if (!specialConstructor) { return super.getMessage(); } String expected = ""; int maxSize = 0; for (int i = 0; i < expectedTokenSequences.length; i++) { if (maxSize < expectedTokenSequences[i].length) { maxSize = expectedTokenSequences[i].length; } for (int j = 0; j < expectedTokenSequences[i].length; j++) { expected += tokenImage[expectedTokenSequences[i][j]] + " "; } if (expectedTokenSequences[i][expectedTokenSequences[i].length - 1] != 0) { expected += "..."; } expected += eol + " "; } String retval = "Encountered \""; Token tok = currentToken.next; for (int i = 0; i < maxSize; i++) { if (i != 0) retval += " "; if (tok.kind == 0) { retval += tokenImage[0]; break; } retval += add_escapes(tok.image); tok = tok.next; } retval += "\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn; retval += "." + eol; if (expectedTokenSequences.length == 1) { retval += "Was expecting:" + eol + " "; } else { retval += "Was expecting one of:" + eol + " "; } retval += expected; return retval; } /** * The end of line string for this machine. */ protected String eol = System.getProperty("line.separator", "\n"); /** * Used to convert raw characters to their escaped version * when these raw version cannot be used as part of an ASCII * string literal. */ protected String add_escapes(String str) { StringBuffer retval = new StringBuffer(); char ch; for (int i = 0; i < str.length(); i++) { switch (str.charAt(i)) { case 0 : continue; case '\b': retval.append("\\b"); continue; case '\t': retval.append("\\t"); continue; case '\n': retval.append("\\n"); continue; case '\f': retval.append("\\f"); continue; case '\r': retval.append("\\r"); continue; case '\"': retval.append("\\\""); continue; case '\'': retval.append("\\\'"); continue; case '\\': retval.append("\\\\"); continue; default: if ((ch = str.charAt(i)) < 0x20 || ch > 0x7e) { String s = "0000" + Integer.toString(ch, 16); retval.append("\\u" + s.substring(s.length() - 4, s.length())); } else { retval.append(ch); } continue; } } return retval.toString(); } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/sql/SQL.java000066400000000000000000005414441330501023400240320ustar00rootroot00000000000000/* Generated By:JavaCC: Do not edit this line. SQL.java */ package com.mckoi.database.sql; import com.mckoi.database.TType; import com.mckoi.database.Assignment; import com.mckoi.database.JoiningSet; import com.mckoi.database.Expression; import com.mckoi.database.Variable; import com.mckoi.database.FunctionDef; import com.mckoi.database.FunctionFactory; import com.mckoi.database.Operator; import com.mckoi.database.StatementTree; import com.mckoi.database.ParameterSubstitution; import com.mckoi.database.global.*; import com.mckoi.database.interpret.*; import com.mckoi.database.TObject; import java.util.ArrayList; import java.util.Stack; public class SQL implements SQLConstants { // State variables for the parser, /** * Set to true if the SQL identifiers are converted to upper case. * NOTE: Purely experimental feature! */ private boolean case_insensitive_identifiers = false; /** * The parameter id. */ private int parameter_id = 0; /** * Resets the parameter id. This MUST be called before a parser is used * to parse a statement. */ public void reset() { parameter_id = 0; } /** * Creates and returns a parameter substitution. This is called when the * parser comes across a '?' style object. This object is used to mark an * expression with a place mark that can be substituted for a value later. */ public ParameterSubstitution createSubstitution(String image) { ParameterSubstitution ps = new ParameterSubstitution(parameter_id); ++parameter_id; return ps; } /** * If the parser has been defined as case insensitive then this * returns the uppercase version of the given string. * * NOTE: This actually doesn't do anything because the case is now resolved * outside the parser. */ public String caseCheck(String identif) { // if (case_insensitive_identifiers) { // return identif.toUpperCase(); // } return identif; } /** * Helper for expression parsing. * Called when an end parenthese has been found. */ public void expEndParen(Expression exp, Stack stack) { Operator op = (Operator) stack.pop(); while (!op.is("(")) { addOperatorToExpression(exp, op); op = (Operator) stack.pop(); } } /** * Helper for expression parsing. * Called when an operator has been read in. This needs to check precedence * and add the operator to the expression as appropriate. */ public void expOperator(Expression exp, Stack stack, Operator op) { int precedence = op.precedence(); flushOperatorStack(exp, stack, precedence); stack.push(op); } /** * Flush the operator stack until the stack is either empty or the top * element is either a "(" or of a precedence lower than the given * precedence. */ public void flushOperatorStack(Expression exp, Stack stack, int precedence) { if (!stack.empty()) { Operator top_op = (Operator) stack.pop(); while (!top_op.is("(") && top_op.precedence() >= precedence) { addOperatorToExpression(exp, top_op); if (stack.empty()) { return; } top_op = (Operator) stack.pop(); } stack.push(top_op); } } /** * Helper for expression parsing. * Called when an entire expression has been read in. We need to empty * the stack. */ public void expEnd(Expression exp, Stack stack) { while (!stack.empty()) { Operator op = (Operator) stack.pop(); addOperatorToExpression(exp, op); } } /** * Helper for expression parsing. * Adds an operator to the given expression. */ public void addOperatorToExpression(Expression exp, Operator op) { if (op.is("not")) { exp.addElement(null); } exp.addOperator(op); } public static void main(String args[]) throws ParseException { SQL parser = new SQL(System.in); parser.Test(); } final public void Test() throws ParseException { parseExpression(); jj_consume_token(207); } // Parses a single expression. Useed in 'com.mckoi.database.Expression.parse' method. final public Expression parseExpression() throws ParseException { Expression exp; exp = DoExpression(); jj_consume_token(0); {if (true) return exp;} throw new Error("Missing return statement in function"); } // Statement that ends with a ';' final public StatementTree Statement() throws ParseException { StatementTree ob; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SELECT: ob = Select(); break; case UPDATE: ob = Update(); break; case ALTER: ob = Alter(); break; case COMPACT: ob = Compact(); break; case CREATE: ob = Create(); break; case DROP: ob = Drop(); break; case DELETE: ob = Delete(); break; case INSERT: ob = Insert(); break; case DESCRIBE: ob = Describe(); break; case SHOW: ob = Show(); break; case CALL: ob = Call(); break; case GRANT: ob = Grant(); break; case REVOKE: ob = Revoke(); break; case COMMIT: case ROLLBACK: ob = CompleteTransaction(); break; case SET: ob = Set(); break; case SHUTDOWN: ob = ShutDown(); break; default: jj_la1[0] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 207: jj_consume_token(207); break; case 0: jj_consume_token(0); break; default: jj_la1[1] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return ob;} throw new Error("Missing return statement in function"); } // All statements that start with final public StatementTree Create() throws ParseException { StatementTree ob; jj_consume_token(CREATE); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TABLE: case TEMPORARY: ob = CreateTable(); break; case TRIGGER: case CALLBACK: ob = CreateTrigger(); break; case FUNCTION: ob = CreateFunction(); break; case INDEX: case UNIQUE: ob = CreateIndex(); break; case SCHEMA: ob = CreateSchema(); break; case SEQUENCE: ob = CreateSequence(); break; case USER: ob = CreateUser(); break; case VIEW: ob = CreateView(); break; default: jj_la1[2] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return ob;} throw new Error("Missing return statement in function"); } // All statements that start with final public StatementTree Drop() throws ParseException { StatementTree ob; jj_consume_token(DROP); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TABLE: ob = DropTable(); break; case TRIGGER: case CALLBACK: ob = DropTrigger(); break; case FUNCTION: ob = DropFunction(); break; case INDEX: ob = DropIndex(); break; case SCHEMA: ob = DropSchema(); break; case SEQUENCE: ob = DropSequence(); break; case USER: ob = DropUser(); break; case VIEW: ob = DropView(); break; default: jj_la1[3] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return ob;} throw new Error("Missing return statement in function"); } final public StatementTree Select() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Select"); TableSelectExpression table_expr; ArrayList order_by = new ArrayList(); table_expr = GetTableSelectExpression(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ORDERBY: jj_consume_token(ORDERBY); SelectOrderByList(order_by); break; default: jj_la1[4] = jj_gen; ; } cmd.putObject("table_expression", table_expr); cmd.putObject("order_by", order_by); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Update() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UpdateTable"); String table_name; ArrayList assignments = new ArrayList(); SearchExpression where_clause = new SearchExpression(); int limit = -1; jj_consume_token(UPDATE); table_name = TableName(); jj_consume_token(SET); AssignmentList(assignments); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case WHERE: jj_consume_token(WHERE); ConditionsExpression(where_clause); break; default: jj_la1[5] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LIMIT: jj_consume_token(LIMIT); limit = PositiveIntegerConstant(); break; default: jj_la1[6] = jj_gen; ; } cmd.putObject("table_name", table_name); cmd.putObject("assignments", assignments); cmd.putObject("where_clause", where_clause); cmd.putInt("limit", limit); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Alter() throws ParseException { StatementTree cmd; jj_consume_token(ALTER); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CREATE: case TABLE: cmd = AlterTable(); break; case USER: cmd = AlterUser(); break; default: jj_la1[7] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree AlterTable() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.AlterTable"); String table_name; AlterTableAction action; StatementTree create_statement; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TABLE: jj_consume_token(TABLE); table_name = TableName(); action = GetAlterTableAction(); cmd.putObject("table_name", table_name); cmd.putObject("alter_action", action); break; case CREATE: jj_consume_token(CREATE); create_statement = CreateTable(); cmd.putObject("create_statement", create_statement); break; default: jj_la1[8] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Compact() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Compact"); String table_name; jj_consume_token(COMPACT); jj_consume_token(TABLE); table_name = TableName(); cmd.putObject("table_name", table_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateTable() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.CreateTable"); boolean temporary = false; boolean only_if_not_exists = false; String table_name; ArrayList column_list = new ArrayList(); ArrayList constraint_list = new ArrayList(); Expression check_expression; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TEMPORARY: jj_consume_token(TEMPORARY); temporary = true; break; default: jj_la1[9] = jj_gen; ; } jj_consume_token(TABLE); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IF: jj_consume_token(IF); jj_consume_token(NOT); jj_consume_token(EXISTS); only_if_not_exists = true; break; default: jj_la1[10] = jj_gen; ; } table_name = TableName(); ColumnDeclarationList(column_list, constraint_list); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CHECK: jj_consume_token(CHECK); check_expression = DoExpression(); ConstraintDef check_constraint = new ConstraintDef(); check_constraint.setCheck(check_expression); constraint_list.add(check_constraint); break; default: jj_la1[11] = jj_gen; ; } cmd.putBoolean("temporary", temporary); cmd.putBoolean("only_if_not_exists", only_if_not_exists); cmd.putObject("table_name", table_name); cmd.putObject("column_list", column_list); cmd.putObject("constraint_list", constraint_list); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateTrigger() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.CreateTrigger"); boolean callback; String trigger_name; ArrayList trigger_types = new ArrayList(); String table_name; String before_after; String procedure_name; Expression[] procedure_args; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CALLBACK: jj_consume_token(CALLBACK); jj_consume_token(TRIGGER); trigger_name = TriggerName(); TriggerTypes(trigger_types); jj_consume_token(ON); table_name = TableName(); cmd.putObject("type", "callback_trigger"); break; case TRIGGER: jj_consume_token(TRIGGER); trigger_name = TriggerName(); before_after = BeforeOrAfter(); TriggerTypes(trigger_types); jj_consume_token(ON); table_name = TableName(); jj_consume_token(FOR); jj_consume_token(EACH); jj_consume_token(ROW); jj_consume_token(EXECUTE); jj_consume_token(PROCEDURE); procedure_name = FunctionName(); jj_consume_token(208); procedure_args = ExpressionList(); jj_consume_token(209); cmd.putObject("type", "procedure_trigger"); cmd.putObject("before_after", before_after); cmd.putObject("procedure_name", procedure_name); cmd.putObject("procedure_args", procedure_args); break; default: jj_la1[12] = jj_gen; jj_consume_token(-1); throw new ParseException(); } cmd.putObject("trigger_name", trigger_name); cmd.putObject("trigger_types", trigger_types); cmd.putObject("table_name", table_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropTrigger() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.DropTrigger"); String trigger_name; String type = null; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CALLBACK: jj_consume_token(CALLBACK); jj_consume_token(TRIGGER); trigger_name = TriggerName(); type = "callback_trigger"; break; case TRIGGER: jj_consume_token(TRIGGER); trigger_name = TriggerName(); type = "procedure_trigger"; cmd.putObject("trigger_name", trigger_name); cmd.putObject("type", type); {if (true) return cmd;} break; default: jj_la1[13] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public StatementTree CreateFunction() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Function"); cmd.putObject("type", "create"); String function_name; ArrayList arg_names = new ArrayList(); ArrayList arg_types = new ArrayList(); Token loc_name; TType return_type = null; jj_consume_token(FUNCTION); function_name = FunctionName(); jj_consume_token(208); ProcParameterList(arg_names, arg_types); jj_consume_token(209); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case RETURNS: jj_consume_token(RETURNS); return_type = GetTType(); break; default: jj_la1[14] = jj_gen; ; } jj_consume_token(LANGUAGE); jj_consume_token(JAVA); jj_consume_token(NAME); loc_name = jj_consume_token(STRING_LITERAL); cmd.putObject("function_name", function_name); cmd.putObject("arg_names", arg_names); cmd.putObject("arg_types", arg_types); // Note that 'location_name' will be a TObject cmd.putObject("location_name", Util.toParamObject(loc_name, case_insensitive_identifiers)); cmd.putObject("return_type", return_type); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropFunction() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Function"); cmd.putObject("type", "drop"); String function_name; jj_consume_token(FUNCTION); function_name = FunctionName(); cmd.putObject("function_name", function_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateSchema() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Schema"); cmd.putObject("type", "create"); String schema_name; jj_consume_token(SCHEMA); schema_name = SchemaName(); cmd.putObject("schema_name", schema_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropSchema() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Schema"); cmd.putObject("type", "drop"); String schema_name; jj_consume_token(SCHEMA); schema_name = SchemaName(); cmd.putObject("schema_name", schema_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateView() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.ViewManager"); String view_name; TableSelectExpression select_cmd; ArrayList col_list = new ArrayList(); jj_consume_token(VIEW); view_name = TableName(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); BasicColumnList(col_list); jj_consume_token(209); break; default: jj_la1[15] = jj_gen; ; } jj_consume_token(AS); select_cmd = GetTableSelectExpression(); cmd.putObject("type", "create"); cmd.putObject("view_name", view_name); cmd.putObject("column_list", col_list); cmd.putObject("select_expression", select_cmd); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropView() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.ViewManager"); String view_name; jj_consume_token(VIEW); view_name = TableName(); cmd.putObject("type", "drop"); cmd.putObject("view_name", view_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateIndex() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.NoOp"); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case UNIQUE: jj_consume_token(UNIQUE); break; default: jj_la1[16] = jj_gen; ; } jj_consume_token(INDEX); IndexName(); jj_consume_token(ON); TableName(); jj_consume_token(208); BasicColumnList(new ArrayList()); jj_consume_token(209); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropTable() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.DropTable"); boolean only_if_exists = false; String table_name; ArrayList table_list = new ArrayList(); jj_consume_token(TABLE); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IF: jj_consume_token(IF); jj_consume_token(EXISTS); only_if_exists = true; break; default: jj_la1[17] = jj_gen; ; } table_name = TableName(); table_list.add(table_name); label_1: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[18] = jj_gen; break label_1; } jj_consume_token(210); table_name = TableName(); table_list.add(table_name); } cmd.putBoolean("only_if_exists", only_if_exists); cmd.putObject("table_list", table_list); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropIndex() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.NoOp"); jj_consume_token(INDEX); IndexName(); jj_consume_token(ON); TableName(); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Call() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Call"); String proc_name; Expression[] args = null; jj_consume_token(CALL); proc_name = ProcedureName(); jj_consume_token(208); args = ExpressionList(); jj_consume_token(209); cmd.putObject("proc_name", proc_name); cmd.putObject("args", args); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateSequence() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Sequence"); cmd.putObject("type", "create"); String seq_name; Expression v; jj_consume_token(SEQUENCE); seq_name = SequenceName(); cmd.putObject("seq_name", seq_name); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INCREMENT: jj_consume_token(INCREMENT); v = DoExpression(); cmd.putObject("increment", v); break; default: jj_la1[19] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case MINVALUE: jj_consume_token(MINVALUE); v = DoExpression(); cmd.putObject("min_value", v); break; default: jj_la1[20] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case MAXVALUE: jj_consume_token(MAXVALUE); v = DoExpression(); cmd.putObject("max_value", v); break; default: jj_la1[21] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case START: jj_consume_token(START); v = DoExpression(); cmd.putObject("start", v); break; default: jj_la1[22] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CACHE: jj_consume_token(CACHE); v = DoExpression(); cmd.putObject("cache", v); break; default: jj_la1[23] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CYCLE: jj_consume_token(CYCLE); cmd.putObject("cycle", "yes"); break; default: jj_la1[24] = jj_gen; ; } {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropSequence() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Sequence"); cmd.putObject("type", "drop"); String seq_name; jj_consume_token(SEQUENCE); seq_name = SequenceName(); cmd.putObject("seq_name", seq_name); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CreateUser() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UserManager"); cmd.putObject("type", "CREATE USER"); jj_consume_token(USER); UserManagerCommand(cmd); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree AlterUser() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UserManager"); cmd.putObject("type", "ALTER USER"); jj_consume_token(USER); UserManagerCommand(cmd); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree DropUser() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UserManager"); cmd.putObject("type", "DROP USER"); String username; jj_consume_token(USER); username = UserName(); cmd.putObject("username", username); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public void UserManagerCommand(StatementTree cmd) throws ParseException { String username; Expression password_exp; Expression[] groups_list = null; String lock_status = null; username = UserName(); jj_consume_token(SET); jj_consume_token(PASSWORD); password_exp = DoExpression(); if (jj_2_1(2)) { jj_consume_token(SET); jj_consume_token(GROUPS); groups_list = ExpressionList(); } else { ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SET: jj_consume_token(SET); jj_consume_token(ACCOUNT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LOCK: jj_consume_token(LOCK); lock_status="LOCK"; break; case UNLOCK: jj_consume_token(UNLOCK); lock_status="UNLOCK"; break; default: jj_la1[25] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[26] = jj_gen; ; } cmd.putObject("username", username); cmd.putObject("password_expression", password_exp); cmd.putObject("groups_list", groups_list); cmd.putObject("lock_status", lock_status); } final public StatementTree Delete() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Delete"); String table_name; SearchExpression where_clause = new SearchExpression(); int limit = -1; jj_consume_token(DELETE); jj_consume_token(FROM); table_name = TableName(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case WHERE: jj_consume_token(WHERE); ConditionsExpression(where_clause); break; default: jj_la1[27] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LIMIT: jj_consume_token(LIMIT); limit = PositiveIntegerConstant(); break; default: jj_la1[28] = jj_gen; ; } cmd.putObject("table_name", table_name); cmd.putObject("where_clause", where_clause); cmd.putInt("limit", limit); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Insert() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Insert"); String table_name; ArrayList col_list = new ArrayList(); ArrayList data_list = new ArrayList(); // ( Array of Expression[] ) StatementTree select = null; ArrayList assignments = new ArrayList(); String type; jj_consume_token(INSERT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INTO: jj_consume_token(INTO); break; default: jj_la1[29] = jj_gen; ; } table_name = TableName(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SELECT: case VALUES: case 208: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); BasicColumnList(col_list); jj_consume_token(209); break; default: jj_la1[30] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case VALUES: jj_consume_token(VALUES); InsertDataList(data_list); type = "from_values"; break; case SELECT: select = Select(); type = "from_select"; break; default: jj_la1[31] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; case SET: jj_consume_token(SET); AssignmentList(assignments); type = "from_set"; break; default: jj_la1[32] = jj_gen; jj_consume_token(-1); throw new ParseException(); } cmd.putObject("table_name", table_name); cmd.putObject("col_list", col_list); cmd.putObject("data_list", data_list); cmd.putObject("select", select); cmd.putObject("assignments", assignments); cmd.putObject("type", type); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Describe() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Show"); cmd.putObject("show", "describe_table"); String table_name; jj_consume_token(DESCRIBE); table_name = TableName(); cmd.putObject("table_name", table_name); cmd.putObject("where_clause", new SearchExpression()); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Show() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Show"); Expression[] args = null; SearchExpression where_clause = new SearchExpression(); Token t; jj_consume_token(SHOW); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IDENTIFIER: t = jj_consume_token(IDENTIFIER); break; case SCHEMA: t = jj_consume_token(SCHEMA); break; default: jj_la1[33] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); args = ExpressionList(); jj_consume_token(209); break; default: jj_la1[34] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case WHERE: jj_consume_token(WHERE); ConditionsExpression(where_clause); break; default: jj_la1[35] = jj_gen; ; } cmd.putObject("show", t.image); cmd.putObject("args", args); cmd.putObject("where_clause", where_clause); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Grant() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.PrivManager"); ArrayList priv_list = new ArrayList(); String priv_object; ArrayList grant_to; boolean grant_option = false; jj_consume_token(GRANT); PrivList(priv_list); jj_consume_token(ON); priv_object = PrivObject(); jj_consume_token(TO); grant_to = UserNameList(new ArrayList()); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case WITH: jj_consume_token(WITH); jj_consume_token(GRANT); jj_consume_token(OPTION); grant_option = true; break; default: jj_la1[36] = jj_gen; ; } cmd.putObject("command", "GRANT"); cmd.putObject("priv_list", priv_list); cmd.putObject("priv_object", priv_object); cmd.putObject("grant_to", grant_to); cmd.putBoolean("grant_option", grant_option); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Revoke() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.PrivManager"); ArrayList priv_list = new ArrayList(); String priv_object; ArrayList revoke_from; boolean revoke_grant_option = false; jj_consume_token(REVOKE); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case GRANT: jj_consume_token(GRANT); jj_consume_token(OPTION); jj_consume_token(FOR); revoke_grant_option = true; break; default: jj_la1[37] = jj_gen; ; } PrivList(priv_list); jj_consume_token(ON); priv_object = PrivObject(); jj_consume_token(FROM); revoke_from = UserNameList(new ArrayList()); cmd.putObject("command", "REVOKE"); cmd.putObject("priv_list", priv_list); cmd.putObject("priv_object", priv_object); cmd.putObject("revoke_from", revoke_from); cmd.putBoolean("revoke_grant_option", revoke_grant_option); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree CompleteTransaction() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.CompleteTransaction"); String command; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case COMMIT: jj_consume_token(COMMIT); command = "commit"; break; case ROLLBACK: jj_consume_token(ROLLBACK); command = "rollback"; break; default: jj_la1[38] = jj_gen; jj_consume_token(-1); throw new ParseException(); } cmd.putObject("command", command); {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree Set() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Set"); Token t1; String value; Expression exp; String name; jj_consume_token(SET); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IDENTIFIER: t1 = jj_consume_token(IDENTIFIER); jj_consume_token(ASSIGNMENT); exp = DoExpression(); cmd.putObject("type", "VARSET"); cmd.putObject("var_name", t1.image); cmd.putObject("exp", exp); break; case TRANSACTIONISOLATIONLEVEL: jj_consume_token(TRANSACTIONISOLATIONLEVEL); t1 = jj_consume_token(SERIALIZABLE); cmd.putObject("type", "ISOLATIONSET"); cmd.putObject("var_name", "TRANSACTION ISOLATION LEVEL"); cmd.putObject("value", t1.image); break; case AUTOCOMMIT: jj_consume_token(AUTOCOMMIT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ON: t1 = jj_consume_token(ON); break; case IDENTIFIER: t1 = jj_consume_token(IDENTIFIER); break; default: jj_la1[39] = jj_gen; jj_consume_token(-1); throw new ParseException(); } cmd.putObject("type", "AUTOCOMMIT"); cmd.putObject("value", t1.image); break; case SCHEMA: jj_consume_token(SCHEMA); name = SchemaName(); cmd.putObject("type", "SCHEMA"); cmd.putObject("value", name); break; default: jj_la1[40] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return cmd;} throw new Error("Missing return statement in function"); } final public StatementTree ShutDown() throws ParseException { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Misc"); jj_consume_token(SHUTDOWN); cmd.putObject("command", "shutdown"); {if (true) return cmd;} throw new Error("Missing return statement in function"); } // ---------- final public String TriggerType() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INSERT: jj_consume_token(INSERT); {if (true) return "insert";} break; case DELETE: jj_consume_token(DELETE); {if (true) return "delete";} break; case UPDATE: jj_consume_token(UPDATE); {if (true) return "update";} break; default: jj_la1[41] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public String BeforeOrAfter() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case BEFORE: jj_consume_token(BEFORE); {if (true) return "before";} break; case AFTER: jj_consume_token(AFTER); {if (true) return "after";} break; default: jj_la1[42] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } // A list of triggered actions separated by 'OR' delimination, for example, // INSERT OR DELETE OR UPDATE final public void TriggerTypes(ArrayList list) throws ParseException { String trig_type; trig_type = TriggerType(); list.add(trig_type); label_2: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case OR: ; break; default: jj_la1[43] = jj_gen; break label_2; } jj_consume_token(OR); trig_type = TriggerType(); list.add(trig_type); } } // A priv object // Note we add a 2 character prefix to the priv object for future enhancements. // In the future an object may be something other than a table. final public String PrivObject() throws ParseException { String table_name; String schema_name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case OLD: case NEW: case NAME: case JAVA: case TABLE: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TABLE: jj_consume_token(TABLE); break; default: jj_la1[44] = jj_gen; ; } table_name = TableName(); {if (true) return "T:" + table_name;} break; case SCHEMA: jj_consume_token(SCHEMA); schema_name = SchemaName(); {if (true) return "S:" + schema_name;} break; default: jj_la1[45] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } // A list of privs final public ArrayList PrivList(ArrayList list) throws ParseException { PrivListItem(list); label_3: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[46] = jj_gen; break label_3; } jj_consume_token(210); PrivListItem(list); } {if (true) return list;} throw new Error("Missing return statement in function"); } // Adds an item in a priv list final public void PrivListItem(ArrayList list) throws ParseException { Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SELECT: t = jj_consume_token(SELECT); break; case INSERT: t = jj_consume_token(INSERT); break; case UPDATE: t = jj_consume_token(UPDATE); break; case DELETE: t = jj_consume_token(DELETE); break; case REFERENCES: t = jj_consume_token(REFERENCES); break; case USAGE: t = jj_consume_token(USAGE); break; case ALL: t = jj_consume_token(ALL); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case PRIVILEGES: jj_consume_token(PRIVILEGES); break; default: jj_la1[47] = jj_gen; ; } break; default: jj_la1[48] = jj_gen; jj_consume_token(-1); throw new ParseException(); } list.add(t.image); } // A table expression final public TableSelectExpression GetTableSelectExpression() throws ParseException { TableSelectExpression table_expr = new TableSelectExpression(); String composite = ""; boolean is_all = false; TableSelectExpression next_composite_expression; jj_consume_token(SELECT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ALL: case DISTINCT: table_expr.distinct = SetQuantifier(); break; default: jj_la1[49] = jj_gen; ; } SelectColumnList(table_expr.columns); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case FROM: jj_consume_token(FROM); SelectTableList(table_expr.from_clause); break; default: jj_la1[50] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case WHERE: jj_consume_token(WHERE); ConditionsExpression(table_expr.where_clause); break; default: jj_la1[51] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case GROUPBY: jj_consume_token(GROUPBY); SelectGroupByList(table_expr.group_by); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case GROUPMAX: jj_consume_token(GROUPMAX); table_expr.group_max = GroupMaxColumn(); break; default: jj_la1[52] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case HAVING: jj_consume_token(HAVING); ConditionsExpression(table_expr.having_clause); break; default: jj_la1[53] = jj_gen; ; } break; default: jj_la1[54] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case UNION: case EXCEPT: case INTERSECT: composite = GetComposite(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ALL: jj_consume_token(ALL); is_all = true; break; default: jj_la1[55] = jj_gen; ; } next_composite_expression = GetTableSelectExpression(); table_expr.chainComposite(next_composite_expression, composite, is_all); break; default: jj_la1[56] = jj_gen; ; } {if (true) return table_expr;} throw new Error("Missing return statement in function"); } final public AlterTableAction GetAlterTableAction() throws ParseException { String col_name, con_name; ColumnDef column_def; ConstraintDef constraint_def; Expression default_exp; AlterTableAction action = new AlterTableAction(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SQLADD: jj_consume_token(SQLADD); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case SQLCOLUMN: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SQLCOLUMN: jj_consume_token(SQLCOLUMN); break; default: jj_la1[57] = jj_gen; ; } column_def = ColumnDefinition(); action.setAction("ADD"); action.addElement(column_def); break; case CHECK: case UNIQUE: case CONSTRAINT: case PRIMARY: case FOREIGN: constraint_def = TableConstraintDefinition(); action.setAction("ADD_CONSTRAINT"); action.addElement(constraint_def); break; default: jj_la1[58] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; case ALTER: jj_consume_token(ALTER); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SQLCOLUMN: jj_consume_token(SQLCOLUMN); break; default: jj_la1[59] = jj_gen; ; } col_name = ColumnName(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SET: jj_consume_token(SET); default_exp = DoExpression(); action.setAction("ALTERSET"); action.addElement(col_name); action.addElement(default_exp); break; case DROP: jj_consume_token(DROP); jj_consume_token(SQLDEFAULT); action.setAction("DROPDEFAULT"); action.addElement(col_name); break; default: jj_la1[60] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; case DROP: jj_consume_token(DROP); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case SQLCOLUMN: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SQLCOLUMN: jj_consume_token(SQLCOLUMN); break; default: jj_la1[61] = jj_gen; ; } col_name = ColumnName(); action.setAction("DROP"); action.addElement(col_name); break; case CONSTRAINT: jj_consume_token(CONSTRAINT); con_name = ConstraintName(); action.setAction("DROP_CONSTRAINT"); action.addElement(con_name); break; case PRIMARY: jj_consume_token(PRIMARY); jj_consume_token(KEY); action.setAction("DROP_CONSTRAINT_PRIMARY_KEY"); break; default: jj_la1[62] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[63] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return action;} throw new Error("Missing return statement in function"); } // An element to insert, either an expression or DEFAULT for the default // element. final public Object InsertElement() throws ParseException { Expression e; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SQLDEFAULT: jj_consume_token(SQLDEFAULT); {if (true) return "DEFAULT";} break; case ADD: case SUBTRACT: case BOOLEAN_LITERAL: case NULL_LITERAL: case IF: case NEW: case TRIM: case USER: case CAST: case NAME: case JAVA: case COUNT: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case DATE: case TIME: case TIMESTAMP: case CURRENT_TIME: case CURRENT_DATE: case CURRENT_TIMESTAMP: case NOT: case NUMBER_LITERAL: case STRING_LITERAL: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: case PARAMETER_REF: case 208: e = DoExpression(); {if (true) return e;} break; default: jj_la1[64] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public ArrayList InsertExpressionList() throws ParseException { ArrayList list = new ArrayList(); Object elem; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ADD: case SUBTRACT: case BOOLEAN_LITERAL: case NULL_LITERAL: case IF: case NEW: case TRIM: case USER: case CAST: case NAME: case JAVA: case COUNT: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case SQLDEFAULT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case DATE: case TIME: case TIMESTAMP: case CURRENT_TIME: case CURRENT_DATE: case CURRENT_TIMESTAMP: case NOT: case NUMBER_LITERAL: case STRING_LITERAL: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: case PARAMETER_REF: case 208: elem = InsertElement(); list.add(elem); label_4: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[65] = jj_gen; break label_4; } jj_consume_token(210); elem = InsertElement(); list.add(elem); } break; default: jj_la1[66] = jj_gen; ; } {if (true) return list;} throw new Error("Missing return statement in function"); } // The list of columns to insert formatted as; eg. (9, 4), (3, 2), (9, 9), .... final public void InsertDataList(ArrayList data_list) throws ParseException { ArrayList insert_vals; jj_consume_token(208); insert_vals = InsertExpressionList(); jj_consume_token(209); data_list.add(insert_vals); label_5: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[67] = jj_gen; break label_5; } jj_consume_token(210); jj_consume_token(208); insert_vals = InsertExpressionList(); jj_consume_token(209); data_list.add(insert_vals); } } // Returning true means distinct, false means all. final public boolean SetQuantifier() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DISTINCT: jj_consume_token(DISTINCT); {if (true) return true;} break; case ALL: jj_consume_token(ALL); {if (true) return false;} break; default: jj_la1[68] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public void SelectColumnList(ArrayList list) throws ParseException { SelectColumn col; col = SelectColumn(); list.add(col); label_6: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[69] = jj_gen; break label_6; } jj_consume_token(210); col = SelectColumn(); list.add(col); } } final public SelectColumn SelectColumn() throws ParseException { SelectColumn col = new SelectColumn(); String aliased_name; Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ADD: case SUBTRACT: case BOOLEAN_LITERAL: case NULL_LITERAL: case IF: case NEW: case TRIM: case USER: case CAST: case NAME: case JAVA: case COUNT: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case DATE: case TIME: case TIMESTAMP: case CURRENT_TIME: case CURRENT_DATE: case CURRENT_TIMESTAMP: case NOT: case NUMBER_LITERAL: case STRING_LITERAL: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: case PARAMETER_REF: case 208: col.expression = DoExpression(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case AS: jj_consume_token(AS); break; default: jj_la1[70] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: col.alias = TableAliasName(); break; default: jj_la1[71] = jj_gen; ; } break; case STAR: jj_consume_token(STAR); col.glob_name = "*"; break; case GLOBVARIABLE: t = jj_consume_token(GLOBVARIABLE); col.glob_name = caseCheck(t.image); break; case QUOTEDGLOBVARIABLE: t = jj_consume_token(QUOTEDGLOBVARIABLE); col.glob_name = caseCheck(Util.asNonQuotedRef(t)); break; default: jj_la1[72] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return col;} throw new Error("Missing return statement in function"); } final public void SelectGroupByList(ArrayList list) throws ParseException { ByColumn col; Expression exp; exp = DoExpression(); col = new ByColumn(); col.exp = exp; list.add(col); label_7: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[73] = jj_gen; break label_7; } jj_consume_token(210); exp = DoExpression(); col = new ByColumn(); col.exp = exp; list.add(col); } } /** * NOTE: This is an extension, allows for us to specify a column to return the * max value for each row representing a group. */ final public Variable GroupMaxColumn() throws ParseException { Variable var; var = ColumnNameVariable(); {if (true) return var;} throw new Error("Missing return statement in function"); } final public void SelectOrderByList(ArrayList list) throws ParseException { ByColumn col; Expression exp; boolean ascending = true; exp = DoExpression(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASC: case DESC: ascending = OrderingSpec(); break; default: jj_la1[74] = jj_gen; ; } col = new ByColumn(); col.exp = exp; col.ascending = ascending; list.add(col); label_8: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[75] = jj_gen; break label_8; } jj_consume_token(210); exp = DoExpression(); ascending=true; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASC: case DESC: ascending = OrderingSpec(); break; default: jj_la1[76] = jj_gen; ; } col = new ByColumn(); col.exp = exp; col.ascending = ascending; list.add(col); } } final public boolean OrderingSpec() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASC: jj_consume_token(ASC); {if (true) return true;} break; case DESC: jj_consume_token(DESC); {if (true) return false;} break; default: jj_la1[77] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return true;} throw new Error("Missing return statement in function"); } final public void TableDeclaration(FromClause from_clause) throws ParseException { String table=null, declare_as = null; TableSelectExpression select_stmt = null; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case OLD: case NEW: case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: table = TableName(); break; case 208: jj_consume_token(208); select_stmt = GetTableSelectExpression(); jj_consume_token(209); break; default: jj_la1[78] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case AS: case OLD: case NEW: case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case AS: jj_consume_token(AS); break; default: jj_la1[79] = jj_gen; ; } declare_as = TableName(); break; default: jj_la1[80] = jj_gen; ; } from_clause.addTableDeclaration(table, select_stmt, declare_as); } final public void SelectTableList(FromClause from_clause) throws ParseException { TableDeclaration(from_clause); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LEFT: case JOIN: case RIGHT: case INNER: case 210: FromClauseJoin(from_clause); break; default: jj_la1[81] = jj_gen; ; } } final public void FromClauseJoin(FromClause from_clause) throws ParseException { Expression on_expression; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: jj_consume_token(210); from_clause.addJoin(JoiningSet.INNER_JOIN); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case OLD: case NEW: case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: case 208: SelectTableList(from_clause); break; default: jj_la1[82] = jj_gen; ; } break; case JOIN: case INNER: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INNER: jj_consume_token(INNER); break; default: jj_la1[83] = jj_gen; ; } jj_consume_token(JOIN); TableDeclaration(from_clause); jj_consume_token(ON); on_expression = DoExpression(); from_clause.addPreviousJoin(JoiningSet.INNER_JOIN, on_expression); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LEFT: case JOIN: case RIGHT: case INNER: case 210: FromClauseJoin(from_clause); break; default: jj_la1[84] = jj_gen; ; } break; case LEFT: jj_consume_token(LEFT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case OUTER: jj_consume_token(OUTER); break; default: jj_la1[85] = jj_gen; ; } jj_consume_token(JOIN); TableDeclaration(from_clause); jj_consume_token(ON); on_expression = DoExpression(); from_clause.addPreviousJoin(JoiningSet.LEFT_OUTER_JOIN, on_expression); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LEFT: case JOIN: case RIGHT: case INNER: case 210: FromClauseJoin(from_clause); break; default: jj_la1[86] = jj_gen; ; } break; case RIGHT: jj_consume_token(RIGHT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case OUTER: jj_consume_token(OUTER); break; default: jj_la1[87] = jj_gen; ; } jj_consume_token(JOIN); TableDeclaration(from_clause); jj_consume_token(ON); on_expression = DoExpression(); from_clause.addPreviousJoin(JoiningSet.RIGHT_OUTER_JOIN, on_expression); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LEFT: case JOIN: case RIGHT: case INNER: case 210: FromClauseJoin(from_clause); break; default: jj_la1[88] = jj_gen; ; } break; default: jj_la1[89] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } // A list of parameters in a function or procedure declaration. For example, // ' p1 NUMERIC, p2 NUMERIC, s1 CHARACTER VARYING ' // First array contains parameter names, and second contains TType representing // the type. final public void ProcParameterList(ArrayList decl_names, ArrayList decl_types) throws ParseException { String name; TType type; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LONG: case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case CHARACTER: case PRIVILEGES: case BIT: case INT: case REAL: case CLOB: case BLOB: case CHAR: case TEXT: case DATE: case TIME: case FLOAT: case BIGINT: case DOUBLE: case STRING: case BINARY: case NUMERIC: case DECIMAL: case BOOLEAN: case TINYINT: case INTEGER: case VARCHAR: case SMALLINT: case VARBINARY: case TIMESTAMP: case JAVA_OBJECT: case LONGVARCHAR: case LONGVARBINARY: case QUOTED_VARIABLE: case IDENTIFIER: name = null; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: name = ProcArgumentName(); break; default: jj_la1[90] = jj_gen; ; } type = GetTType(); decl_names.add(name); decl_types.add(type); label_9: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[91] = jj_gen; break label_9; } jj_consume_token(210); name = null; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: name = ProcArgumentName(); break; default: jj_la1[92] = jj_gen; ; } type = GetTType(); decl_names.add(name); decl_types.add(type); } break; default: jj_la1[93] = jj_gen; ; } } // The ' set a = (a * 9), b = concat(b, "aa") ' part of the 'update', 'insert' statement final public void AssignmentList(ArrayList assignment_list) throws ParseException { String column; Expression exp; column = ColumnName(); jj_consume_token(ASSIGNMENT); exp = DoExpression(); assignment_list.add(new Assignment(Variable.resolve(column), exp)); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: jj_consume_token(210); AssignmentList(assignment_list); break; default: jj_la1[94] = jj_gen; ; } } // Parses a list of column declarations. eg. ' id NUMERIC(5, 20), number VARCHAR(90), ... ' // and also any constraints. final public void ColumnDeclarationList(ArrayList column_list, ArrayList constraint_list) throws ParseException { jj_consume_token(208); ColumnOrConstraintDefinition(column_list, constraint_list); label_10: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[95] = jj_gen; break label_10; } jj_consume_token(210); ColumnOrConstraintDefinition(column_list, constraint_list); } jj_consume_token(209); } final public void ColumnOrConstraintDefinition(ArrayList column_list, ArrayList constraint_list) throws ParseException { ColumnDef coldef = null; ConstraintDef condef = null; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: coldef = ColumnDefinition(); column_list.add(coldef); break; case CHECK: case UNIQUE: case CONSTRAINT: case PRIMARY: case FOREIGN: condef = TableConstraintDefinition(); constraint_list.add(condef); break; default: jj_la1[96] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } final public ColumnDef ColumnDefinition() throws ParseException { ColumnDef column = new ColumnDef(); Token t; Token col_constraint; Expression default_exp; String col_name; col_name = ColumnName(); column.setName(col_name); ColumnDataType(column); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SQLDEFAULT: jj_consume_token(SQLDEFAULT); default_exp = DoExpression(); column.setDefaultExpression(default_exp); break; default: jj_la1[97] = jj_gen; ; } label_11: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NULL_LITERAL: case UNIQUE: case PRIMARY: case NOT: ; break; default: jj_la1[98] = jj_gen; break label_11; } ColumnConstraint(column); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INDEX_NONE: case INDEX_BLIST: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INDEX_BLIST: t = jj_consume_token(INDEX_BLIST); break; case INDEX_NONE: t = jj_consume_token(INDEX_NONE); break; default: jj_la1[99] = jj_gen; jj_consume_token(-1); throw new ParseException(); } column.setIndex(t); break; default: jj_la1[100] = jj_gen; ; } {if (true) return column;} throw new Error("Missing return statement in function"); } // Constraint on a column, eg. 'NOT NULL', 'NULL', 'PRIMARY KEY', 'UNIQUE', etc. final public void ColumnConstraint(ColumnDef column) throws ParseException { Token t; String table_name; ArrayList col_list = new ArrayList(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NOT: jj_consume_token(NOT); jj_consume_token(NULL_LITERAL); column.addConstraint("NOT NULL"); break; case NULL_LITERAL: jj_consume_token(NULL_LITERAL); column.addConstraint("NULL"); break; case PRIMARY: jj_consume_token(PRIMARY); jj_consume_token(KEY); column.addConstraint("PRIMARY"); break; case UNIQUE: jj_consume_token(UNIQUE); column.addConstraint("UNIQUE"); break; default: jj_la1[101] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } final public int GetCollateStrength() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case PRIMARY_STRENGTH: jj_consume_token(PRIMARY_STRENGTH); {if (true) return java.text.Collator.PRIMARY;} break; case SECONDARY_STRENGTH: jj_consume_token(SECONDARY_STRENGTH); {if (true) return java.text.Collator.SECONDARY;} break; case TERTIARY_STRENGTH: jj_consume_token(TERTIARY_STRENGTH); {if (true) return java.text.Collator.TERTIARY;} break; case IDENTICAL_STRENGTH: jj_consume_token(IDENTICAL_STRENGTH); {if (true) return java.text.Collator.IDENTICAL;} break; default: jj_la1[102] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public int GetCollateDecomposition() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NO_DECOMPOSITION: jj_consume_token(NO_DECOMPOSITION); {if (true) return java.text.Collator.NO_DECOMPOSITION;} break; case CANONICAL_DECOMPOSITION: jj_consume_token(CANONICAL_DECOMPOSITION); {if (true) return java.text.Collator.CANONICAL_DECOMPOSITION;} break; case FULL_DECOMPOSITION: jj_consume_token(FULL_DECOMPOSITION); {if (true) return java.text.Collator.FULL_DECOMPOSITION;} break; default: jj_la1[103] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public int GetStringSQLType() throws ParseException { if (jj_2_2(2)) { jj_consume_token(CHARACTER); jj_consume_token(VARYING); {if (true) return SQLTypes.VARCHAR;} } else if (jj_2_3(3)) { jj_consume_token(LONG); jj_consume_token(CHARACTER); jj_consume_token(VARYING); {if (true) return SQLTypes.LONGVARCHAR;} } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TEXT: case STRING: case LONGVARCHAR: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TEXT: jj_consume_token(TEXT); break; case STRING: jj_consume_token(STRING); break; case LONGVARCHAR: jj_consume_token(LONGVARCHAR); break; default: jj_la1[104] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return SQLTypes.LONGVARCHAR;} break; case CHARACTER: case CHAR: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CHAR: jj_consume_token(CHAR); break; case CHARACTER: jj_consume_token(CHARACTER); break; default: jj_la1[105] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return SQLTypes.CHAR;} break; case VARCHAR: jj_consume_token(VARCHAR); {if (true) return SQLTypes.VARCHAR;} break; case CLOB: jj_consume_token(CLOB); {if (true) return SQLTypes.CLOB;} break; default: jj_la1[106] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } throw new Error("Missing return statement in function"); } final public int GetNumericSQLType() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INT: case INTEGER: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INT: jj_consume_token(INT); break; case INTEGER: jj_consume_token(INTEGER); break; default: jj_la1[107] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return SQLTypes.INTEGER;} break; case TINYINT: jj_consume_token(TINYINT); {if (true) return SQLTypes.TINYINT;} break; case SMALLINT: jj_consume_token(SMALLINT); {if (true) return SQLTypes.SMALLINT;} break; case BIGINT: jj_consume_token(BIGINT); {if (true) return SQLTypes.BIGINT;} break; case FLOAT: jj_consume_token(FLOAT); {if (true) return SQLTypes.FLOAT;} break; case REAL: jj_consume_token(REAL); {if (true) return SQLTypes.REAL;} break; case DOUBLE: jj_consume_token(DOUBLE); {if (true) return SQLTypes.DOUBLE;} break; case NUMERIC: jj_consume_token(NUMERIC); {if (true) return SQLTypes.NUMERIC;} break; case DECIMAL: jj_consume_token(DECIMAL); {if (true) return SQLTypes.DECIMAL;} break; default: jj_la1[108] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public int GetBooleanSQLType() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case BOOLEAN: jj_consume_token(BOOLEAN); break; case BIT: jj_consume_token(BIT); break; default: jj_la1[109] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return SQLTypes.BOOLEAN;} throw new Error("Missing return statement in function"); } final public int GetDateSQLType() throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case TIMESTAMP: jj_consume_token(TIMESTAMP); {if (true) return SQLTypes.TIMESTAMP;} break; case TIME: jj_consume_token(TIME); {if (true) return SQLTypes.TIME;} break; case DATE: jj_consume_token(DATE); {if (true) return SQLTypes.DATE;} break; default: jj_la1[110] = jj_gen; jj_consume_token(-1); throw new ParseException(); } throw new Error("Missing return statement in function"); } final public int GetBinarySQLType() throws ParseException { if (jj_2_4(2)) { jj_consume_token(BINARY); jj_consume_token(VARYING); {if (true) return SQLTypes.VARBINARY;} } else if (jj_2_5(3)) { jj_consume_token(LONG); jj_consume_token(BINARY); jj_consume_token(VARYING); {if (true) return SQLTypes.LONGVARBINARY;} } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LONGVARBINARY: jj_consume_token(LONGVARBINARY); {if (true) return SQLTypes.LONGVARBINARY;} break; case VARBINARY: jj_consume_token(VARBINARY); {if (true) return SQLTypes.VARBINARY;} break; case BINARY: jj_consume_token(BINARY); {if (true) return SQLTypes.BINARY;} break; case BLOB: jj_consume_token(BLOB); {if (true) return SQLTypes.BLOB;} break; default: jj_la1[111] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } throw new Error("Missing return statement in function"); } // Parses an SQL type and forms a TType object. For example, "CHAR(500)" is // parsed to a TStringType with a maximum size of 500 and lexicographical // collation. final public TType GetTType() throws ParseException { Token t; int data_type; int size = -1; int scale = -1; Token class_tok = null; int strength = -1; int decomposition = -1; String loc = null; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case JAVA_OBJECT: jj_consume_token(JAVA_OBJECT); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DOT_DELIMINATED_REF: class_tok = jj_consume_token(DOT_DELIMINATED_REF); break; case JAVA_OBJECT_ARRAY_REF: class_tok = jj_consume_token(JAVA_OBJECT_ARRAY_REF); break; default: jj_la1[112] = jj_gen; jj_consume_token(-1); throw new ParseException(); } jj_consume_token(209); break; default: jj_la1[113] = jj_gen; ; } String class_str = "java.lang.Object"; if (class_tok != null) { class_str = class_tok.image; } {if (true) return TType.javaObjectType(class_str);} break; default: jj_la1[121] = jj_gen; if (jj_2_6(2147483647)) { data_type = GetStringSQLType(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); size = PositiveIntegerConstant(); jj_consume_token(209); break; default: jj_la1[114] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case COLLATE: jj_consume_token(COLLATE); t = jj_consume_token(STRING_LITERAL); loc = ((TObject) Util.toParamObject(t, case_insensitive_identifiers)).toString(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case PRIMARY_STRENGTH: case SECONDARY_STRENGTH: case TERTIARY_STRENGTH: case IDENTICAL_STRENGTH: strength = GetCollateStrength(); break; default: jj_la1[115] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NO_DECOMPOSITION: case CANONICAL_DECOMPOSITION: case FULL_DECOMPOSITION: decomposition = GetCollateDecomposition(); break; default: jj_la1[116] = jj_gen; ; } break; default: jj_la1[117] = jj_gen; ; } {if (true) return TType.stringType(data_type, size, loc, strength, decomposition);} } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INT: case REAL: case FLOAT: case BIGINT: case DOUBLE: case NUMERIC: case DECIMAL: case TINYINT: case INTEGER: case SMALLINT: data_type = GetNumericSQLType(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); size = PositiveIntegerConstant(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: jj_consume_token(210); scale = PositiveIntegerConstant(); break; default: jj_la1[118] = jj_gen; ; } jj_consume_token(209); break; default: jj_la1[119] = jj_gen; ; } {if (true) return TType.numericType(data_type, size, scale);} break; case BIT: case BOOLEAN: data_type = GetBooleanSQLType(); {if (true) return TType.booleanType(data_type);} break; case DATE: case TIME: case TIMESTAMP: data_type = GetDateSQLType(); {if (true) return TType.dateType(data_type);} break; case LONG: case BLOB: case BINARY: case VARBINARY: case LONGVARBINARY: data_type = GetBinarySQLType(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); size = PositiveIntegerConstant(); jj_consume_token(209); break; default: jj_la1[120] = jj_gen; ; } {if (true) return TType.binaryType(data_type, size);} break; default: jj_la1[122] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } } throw new Error("Missing return statement in function"); } // Data type of a ColumnDef (eg. "varchar(50)", etc) final public void ColumnDataType(ColumnDef column) throws ParseException { TType type; type = GetTType(); column.setDataType(type); } final public ConstraintDef TableConstraintDefinition() throws ParseException { ConstraintDef constraint = new ConstraintDef(); ArrayList column_list = new ArrayList(); ArrayList column_list2 = new ArrayList(); String constraint_name; String update_rule = "NO ACTION"; String delete_rule = "NO ACTION"; Expression expression; String name; String reference_table; Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CONSTRAINT: jj_consume_token(CONSTRAINT); constraint_name = ConstraintName(); constraint.setName(constraint_name); break; default: jj_la1[123] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case PRIMARY: jj_consume_token(PRIMARY); jj_consume_token(KEY); jj_consume_token(208); BasicColumnList(column_list); jj_consume_token(209); constraint.setPrimaryKey(column_list); break; case UNIQUE: jj_consume_token(UNIQUE); jj_consume_token(208); BasicColumnList(column_list); jj_consume_token(209); constraint.setUnique(column_list); break; case CHECK: jj_consume_token(CHECK); jj_consume_token(208); expression = DoExpression(); jj_consume_token(209); constraint.setCheck(expression); break; case FOREIGN: jj_consume_token(FOREIGN); jj_consume_token(KEY); jj_consume_token(208); BasicColumnList(column_list); jj_consume_token(209); jj_consume_token(REFERENCES); reference_table = TableName(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); BasicColumnList(column_list2); jj_consume_token(209); break; default: jj_la1[124] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ON: if (jj_2_7(2)) { jj_consume_token(ON); jj_consume_token(DELETE); delete_rule = ReferentialTrigger(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ON: jj_consume_token(ON); jj_consume_token(UPDATE); update_rule = ReferentialTrigger(); break; default: jj_la1[125] = jj_gen; ; } } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ON: jj_consume_token(ON); jj_consume_token(UPDATE); update_rule = ReferentialTrigger(); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ON: jj_consume_token(ON); jj_consume_token(DELETE); delete_rule = ReferentialTrigger(); break; default: jj_la1[126] = jj_gen; ; } break; default: jj_la1[127] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } break; default: jj_la1[128] = jj_gen; ; } constraint.setForeignKey(reference_table, column_list, column_list2, delete_rule, update_rule); break; default: jj_la1[129] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INITIALLY: case DEFERRABLE: case NOT: ConstraintAttributes(constraint); break; default: jj_la1[130] = jj_gen; ; } {if (true) return constraint;} throw new Error("Missing return statement in function"); } final public String ReferentialTrigger() throws ParseException { Token t; String trigger_str; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NO: jj_consume_token(NO); jj_consume_token(ACTION); trigger_str="NO ACTION"; break; case RESTRICT: jj_consume_token(RESTRICT); trigger_str="NO ACTION"; break; case CASCADE: jj_consume_token(CASCADE); trigger_str="CASCADE"; break; default: jj_la1[131] = jj_gen; if (jj_2_8(2)) { jj_consume_token(SET); jj_consume_token(NULL_LITERAL); trigger_str="SET NULL"; } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SET: jj_consume_token(SET); jj_consume_token(SQLDEFAULT); trigger_str="SET DEFAULT"; break; default: jj_la1[132] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } } {if (true) return trigger_str;} throw new Error("Missing return statement in function"); } final public void ConstraintAttributes(ConstraintDef constraint) throws ParseException { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INITIALLY: jj_consume_token(INITIALLY); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DEFERRED: jj_consume_token(DEFERRED); constraint.setInitiallyDeferred(); break; case IMMEDIATE: jj_consume_token(IMMEDIATE); break; default: jj_la1[133] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DEFERRABLE: case NOT: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NOT: jj_consume_token(NOT); jj_consume_token(DEFERRABLE); constraint.setNotDeferrable(); break; case DEFERRABLE: jj_consume_token(DEFERRABLE); break; default: jj_la1[134] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[135] = jj_gen; ; } break; case DEFERRABLE: case NOT: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NOT: jj_consume_token(NOT); jj_consume_token(DEFERRABLE); constraint.setNotDeferrable(); break; case DEFERRABLE: jj_consume_token(DEFERRABLE); break; default: jj_la1[136] = jj_gen; jj_consume_token(-1); throw new ParseException(); } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case INITIALLY: jj_consume_token(INITIALLY); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DEFERRED: jj_consume_token(DEFERRED); constraint.setInitiallyDeferred(); break; case IMMEDIATE: jj_consume_token(IMMEDIATE); break; default: jj_la1[137] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[138] = jj_gen; ; } break; default: jj_la1[139] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } // A list of column names final public ArrayList BasicColumnList(ArrayList list) throws ParseException { String col_name; col_name = ColumnName(); list.add(col_name); label_12: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[140] = jj_gen; break label_12; } jj_consume_token(210); col_name = ColumnName(); list.add(col_name); } {if (true) return list;} throw new Error("Missing return statement in function"); } // A list of user names final public ArrayList UserNameList(ArrayList list) throws ParseException { String username; username = UserName(); list.add(username); label_13: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[141] = jj_gen; break label_13; } jj_consume_token(210); username = UserName(); list.add(username); } {if (true) return list;} throw new Error("Missing return statement in function"); } final public void ConditionsExpression(SearchExpression se) throws ParseException { Expression exp; exp = DoExpression(); se.setFromExpression(exp); } final public Expression ExpressionTest() throws ParseException { Expression exp; exp = DoExpression(); jj_consume_token(207); {if (true) return exp;} throw new Error("Missing return statement in function"); } final public Expression DoExpression() throws ParseException { Stack stack = new Stack(); Expression exp = new Expression(); expression(exp, stack); expEnd(exp, stack); // Normalize the expression (remove any NOT operators) Expression normalized_exp = Util.normalize(exp); normalized_exp.copyTextFrom(exp); {if (true) return normalized_exp;} throw new Error("Missing return statement in function"); } final public Expression DoNonBooleanExpression() throws ParseException { Stack stack = new Stack(); Expression exp = new Expression(); nonBooleanExpression(exp, stack); expEnd(exp, stack); {if (true) return exp;} throw new Error("Missing return statement in function"); } /** * Parse an expression. */ final public void expression(Expression exp, Stack stack) throws ParseException { Operand(exp, stack); label_14: while (true) { if (jj_2_9(2)) { ; } else { break label_14; } OpPart(exp, stack); } } /** * Parses a non-boolean expression. */ final public void nonBooleanExpression(Expression exp, Stack stack) throws ParseException { Operand(exp, stack); label_15: while (true) { if (jj_2_10(2)) { ; } else { break label_15; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CONCAT: StringOperator(exp, stack); break; case STAR: case DIVIDE: case ADD: case SUBTRACT: NumericOperator(exp, stack); break; default: jj_la1[142] = jj_gen; jj_consume_token(-1); throw new ParseException(); } Operand(exp, stack); } } final public void OpPart(Expression exp, Stack stack) throws ParseException { Token t; // SelectStatement select; // Expression[] exp_arr; Expression regex_expression; Object regex_ob; if (jj_2_11(3)) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASSIGNMENT: case EQUALS: case GR: case LE: case GREQ: case LEEQ: case NOTEQ: case IS: case LIKE: case AND: case OR: case NOT: BooleanOperator(exp, stack); break; case STAR: case DIVIDE: case ADD: case SUBTRACT: NumericOperator(exp, stack); break; case CONCAT: StringOperator(exp, stack); break; default: jj_la1[143] = jj_gen; jj_consume_token(-1); throw new ParseException(); } Operand(exp, stack); } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case REGEX_LITERAL: case REGEX: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case REGEX: jj_consume_token(REGEX); exp.text().append(" regex "); expOperator(exp, stack, Operator.get("regex")); expression(exp, stack); break; case REGEX_LITERAL: t = jj_consume_token(REGEX_LITERAL); regex_ob = Util.toParamObject(t, case_insensitive_identifiers); exp.text().append(" regex " + regex_ob); expOperator(exp, stack, Operator.get("regex")); exp.addElement(regex_ob); break; default: jj_la1[144] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[145] = jj_gen; if (jj_2_12(2)) { SubQueryOperator(exp, stack); SubQueryExpression(exp, stack); } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case BETWEEN: case NOT: BetweenPredicate(exp, stack); break; default: jj_la1[146] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } } } } final public void Operand(Expression exp, Stack stack) throws ParseException { Token t, tt; FunctionDef f; Expression[] exp_list; String time_fname; boolean negative = false; Object param_ob; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 208: jj_consume_token(208); stack.push(Operator.get("(")); exp.text().append("("); expression(exp, stack); jj_consume_token(209); expEndParen(exp, stack); exp.text().append(")"); break; case PARAMETER_REF: t = jj_consume_token(PARAMETER_REF); Object param_resolve = createSubstitution(t.image); exp.addElement(param_resolve); exp.text().append('?'); break; default: jj_la1[153] = jj_gen; if (jj_2_13(2)) { jj_consume_token(NOT); expOperator(exp, stack, Operator.get("not")); exp.text().append(" not "); Operand(exp, stack); } else if (jj_2_14(3)) { f = Function(); exp.addElement(f); exp.text().append(f); } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DATE: case TIME: case TIMESTAMP: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DATE: tt = jj_consume_token(DATE); time_fname="DATEOB"; break; case TIME: tt = jj_consume_token(TIME); time_fname="TIMEOB"; break; case TIMESTAMP: tt = jj_consume_token(TIMESTAMP); time_fname="TIMESTAMPOB"; break; default: jj_la1[147] = jj_gen; jj_consume_token(-1); throw new ParseException(); } t = jj_consume_token(STRING_LITERAL); Object param_ob1 = Util.toParamObject(t, case_insensitive_identifiers); exp_list = new Expression[] { new Expression(param_ob1) }; f = Util.resolveFunctionName(time_fname, exp_list); exp.addElement(f); exp.text().append(tt.image).append(" ").append(t.image); break; case CURRENT_TIME: case CURRENT_DATE: case CURRENT_TIMESTAMP: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case CURRENT_TIMESTAMP: tt = jj_consume_token(CURRENT_TIMESTAMP); time_fname="TIMESTAMPOB"; break; case CURRENT_TIME: tt = jj_consume_token(CURRENT_TIME); time_fname="TIMEOB"; break; case CURRENT_DATE: tt = jj_consume_token(CURRENT_DATE); time_fname="DATEOB"; break; default: jj_la1[148] = jj_gen; jj_consume_token(-1); throw new ParseException(); } exp_list = new Expression[0]; f = Util.resolveFunctionName(time_fname, exp_list); exp.addElement(f); exp.text().append(tt.image); break; case NEW: jj_consume_token(NEW); f = JavaInstantiation(); exp.addElement(f); exp.text().append(f); break; case BOOLEAN_LITERAL: case NULL_LITERAL: case STRING_LITERAL: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case STRING_LITERAL: t = jj_consume_token(STRING_LITERAL); break; case BOOLEAN_LITERAL: t = jj_consume_token(BOOLEAN_LITERAL); break; case NULL_LITERAL: t = jj_consume_token(NULL_LITERAL); break; default: jj_la1[149] = jj_gen; jj_consume_token(-1); throw new ParseException(); } param_ob = Util.toParamObject(t, case_insensitive_identifiers); exp.addElement(param_ob); exp.text().append(t.image); break; case ADD: case SUBTRACT: case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case NUMBER_LITERAL: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ADD: case SUBTRACT: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ADD: jj_consume_token(ADD); break; case SUBTRACT: jj_consume_token(SUBTRACT); negative = true; break; default: jj_la1[150] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[151] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NUMBER_LITERAL: t = jj_consume_token(NUMBER_LITERAL); break; case QUOTED_VARIABLE: t = jj_consume_token(QUOTED_VARIABLE); break; case DOT_DELIMINATED_REF: t = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: t = jj_consume_token(QUOTED_DELIMINATED_REF); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: t = SQLIdentifier(); break; default: jj_la1[152] = jj_gen; jj_consume_token(-1); throw new ParseException(); } if (t.kind == SQLConstants.NUMBER_LITERAL) { param_ob = Util.parseNumberToken(t, negative); exp.addElement(param_ob); } else { param_ob = Util.toParamObject(t, case_insensitive_identifiers); if (negative) { exp.addElement(Util.zeroNumber()); exp.addElement(param_ob); exp.addElement(Operator.get("-")); } else { exp.addElement(param_ob); } } if (negative) { exp.text().append('-'); } exp.text().append(t.image); break; default: jj_la1[154] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } } } final public void SubQueryExpression(Expression exp, Stack stack) throws ParseException { TableSelectExpression select; Expression[] exp_arr; jj_consume_token(208); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case SELECT: select = GetTableSelectExpression(); exp.addElement(select); exp.text().append(" [SELECT]"); break; default: jj_la1[155] = jj_gen; exp_arr = ExpressionList(); exp.addElement(Util.toArrayParamObject(exp_arr)); exp.text().append(" (" + Util.expressionListToString(exp_arr) + ")"); } jj_consume_token(209); } // Parses a simple positive integer constant. final public int PositiveIntegerConstant() throws ParseException { Token t; t = jj_consume_token(NUMBER_LITERAL); int val = Integer.parseInt(t.image); if (val < 0) generateParseException(); {if (true) return val;} throw new Error("Missing return statement in function"); } final public void SubQueryOperator(Expression exp, Stack stack) throws ParseException { Token t; String op_string; String query_type = "SINGLE"; Operator op; if (jj_2_15(2)) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IN: jj_consume_token(IN); op = Operator.get("IN"); break; case NOT: jj_consume_token(NOT); jj_consume_token(IN); op = Operator.get("NOT IN"); break; default: jj_la1[156] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASSIGNMENT: case EQUALS: case GR: case LE: case GREQ: case LEEQ: case NOTEQ: op_string = GetSubQueryBooleanOperator(); op = Operator.get(op_string); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ALL: case ANY: case SOME: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ANY: t = jj_consume_token(ANY); break; case ALL: t = jj_consume_token(ALL); break; case SOME: t = jj_consume_token(SOME); break; default: jj_la1[157] = jj_gen; jj_consume_token(-1); throw new ParseException(); } query_type=t.image; break; default: jj_la1[158] = jj_gen; ; } op = op.getSubQueryForm(query_type); break; default: jj_la1[159] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } expOperator(exp, stack, op); exp.text().append(" " + op + " "); } final public void BetweenPredicate(Expression exp, Stack stack) throws ParseException { boolean not_s = false; Expression exp1, exp2; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case NOT: jj_consume_token(NOT); not_s = true; break; default: jj_la1[160] = jj_gen; ; } jj_consume_token(BETWEEN); exp1 = DoNonBooleanExpression(); jj_consume_token(AND); exp2 = DoNonBooleanExpression(); // Flush the operator stack to precedence 8 flushOperatorStack(exp, stack, 8); // Get the end expression Expression end_exp = exp.getEndExpression(); if (not_s) { exp.concat(exp1); exp.addElement(Operator.get("<")); exp.concat(end_exp); exp.concat(exp2); exp.addElement(Operator.get(">")); exp.addElement(Operator.get("or")); exp.text().append(" not between "); } else { exp.concat(exp1); exp.addElement(Operator.get(">=")); exp.concat(end_exp); exp.concat(exp2); exp.addElement(Operator.get("<=")); exp.addElement(Operator.get("and")); exp.text().append(" between "); } exp.text().append(exp1.text().toString()); exp.text().append(" and "); exp.text().append(exp2.text().toString()); } final public void BooleanOperator(Expression exp, Stack stack) throws ParseException { Token t; String op_string; Operator op; op_string = GetBooleanOperator(); op = Operator.get(op_string); expOperator(exp, stack, op); exp.text().append(" " + op + " "); } final public void NumericOperator(Expression exp, Stack stack) throws ParseException { Token t; String op_string; Operator op; op_string = GetNumericOperator(); op = Operator.get(op_string); expOperator(exp, stack, op); exp.text().append(" " + op + " "); } final public void StringOperator(Expression exp, Stack stack) throws ParseException { Token t; String op_string; Operator op; op_string = GetStringOperator(); op = Operator.get(op_string); expOperator(exp, stack, op); exp.text().append(" " + op + " "); } final public String GetBooleanOperator() throws ParseException { Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASSIGNMENT: t = jj_consume_token(ASSIGNMENT); break; case EQUALS: t = jj_consume_token(EQUALS); break; case GR: t = jj_consume_token(GR); break; case LE: t = jj_consume_token(LE); break; case GREQ: t = jj_consume_token(GREQ); break; case LEEQ: t = jj_consume_token(LEEQ); break; case NOTEQ: t = jj_consume_token(NOTEQ); break; default: jj_la1[161] = jj_gen; if (jj_2_16(2)) { jj_consume_token(IS); jj_consume_token(NOT); {if (true) return "IS NOT";} } else { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IS: t = jj_consume_token(IS); break; case LIKE: t = jj_consume_token(LIKE); break; case NOT: jj_consume_token(NOT); jj_consume_token(LIKE); {if (true) return "NOT LIKE";} break; case AND: t = jj_consume_token(AND); break; case OR: t = jj_consume_token(OR); break; default: jj_la1[162] = jj_gen; jj_consume_token(-1); throw new ParseException(); } } } {if (true) return t.image;} throw new Error("Missing return statement in function"); } final public String GetSubQueryBooleanOperator() throws ParseException { Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ASSIGNMENT: t = jj_consume_token(ASSIGNMENT); break; case EQUALS: t = jj_consume_token(EQUALS); break; case GR: t = jj_consume_token(GR); break; case LE: t = jj_consume_token(LE); break; case GREQ: t = jj_consume_token(GREQ); break; case LEEQ: t = jj_consume_token(LEEQ); break; case NOTEQ: t = jj_consume_token(NOTEQ); break; default: jj_la1[163] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return t.image;} throw new Error("Missing return statement in function"); } final public String GetNumericOperator() throws ParseException { Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DIVIDE: t = jj_consume_token(DIVIDE); break; case ADD: t = jj_consume_token(ADD); break; case SUBTRACT: t = jj_consume_token(SUBTRACT); break; case STAR: t = jj_consume_token(STAR); break; default: jj_la1[164] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return t.image;} throw new Error("Missing return statement in function"); } final public String GetStringOperator() throws ParseException { Token t; t = jj_consume_token(CONCAT); {if (true) return t.image;} throw new Error("Missing return statement in function"); } final public Token FunctionIdentifier() throws ParseException { Token t; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IF: t = jj_consume_token(IF); break; case USER: t = jj_consume_token(USER); break; case IDENTIFIER: t = jj_consume_token(IDENTIFIER); break; default: jj_la1[165] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return t;} throw new Error("Missing return statement in function"); } final public FunctionDef Function() throws ParseException { Token t, t2 = null, t3 = null; FunctionDef f; Expression exp1, exp2; Expression[] exp_list; TType cast_type; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case COUNT: t = jj_consume_token(COUNT); jj_consume_token(208); switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case DISTINCT: jj_consume_token(DISTINCT); t.image="distinct_count"; break; default: jj_la1[166] = jj_gen; ; } exp_list = FunctionParams(); jj_consume_token(209); break; case TRIM: t = jj_consume_token(TRIM); jj_consume_token(208); if (jj_2_17(3)) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case BOTH: case LEADING: case TRAILING: switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case LEADING: t2 = jj_consume_token(LEADING); break; case BOTH: t2 = jj_consume_token(BOTH); break; case TRAILING: t2 = jj_consume_token(TRAILING); break; default: jj_la1[167] = jj_gen; jj_consume_token(-1); throw new ParseException(); } break; default: jj_la1[168] = jj_gen; ; } switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case STRING_LITERAL: t3 = jj_consume_token(STRING_LITERAL); break; default: jj_la1[169] = jj_gen; ; } jj_consume_token(FROM); } else { ; } exp1 = DoExpression(); jj_consume_token(209); exp_list = new Expression[3]; String ttype = t2 == null ? "both" : t2.image.toLowerCase(); Object str_char = t3 == null ? TObject.stringVal(" ") : Util.toParamObject(t3, case_insensitive_identifiers); exp_list[0] = new Expression(TObject.stringVal(ttype)); exp_list[0].text().append("'" + ttype + "'"); exp_list[1] = new Expression(str_char); exp_list[1].text().append("'" + str_char + "'"); exp_list[2] = exp1; {if (true) return Util.resolveFunctionName("sql_trim", exp_list);} break; case CAST: t = jj_consume_token(CAST); jj_consume_token(208); exp1 = DoExpression(); jj_consume_token(AS); cast_type = GetTType(); jj_consume_token(209); exp_list = new Expression[2]; String enc_form = TType.asEncodedString(cast_type); exp_list[0] = exp1; exp_list[1] = new Expression(TObject.stringVal(enc_form)); exp_list[1].text().append("'" + enc_form + "'"); {if (true) return Util.resolveFunctionName("sql_cast", exp_list);} break; case IF: case USER: case IDENTIFIER: t = FunctionIdentifier(); jj_consume_token(208); exp_list = FunctionParams(); jj_consume_token(209); break; default: jj_la1[170] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return Util.resolveFunctionName(t.image, exp_list);} throw new Error("Missing return statement in function"); } // An instantiation of a java object. For example, 'java.awt.Point(40, 30)' final public FunctionDef JavaInstantiation() throws ParseException { Token t; Expression[] args; // PENDING: Handling arrays (eg. 'java.lang.String[] { 'Tobias', 'Downer' }' or 'double[] { 25, 2, 75, 26 }' ) t = jj_consume_token(DOT_DELIMINATED_REF); jj_consume_token(208); args = ExpressionList(); jj_consume_token(209); Expression[] comp_args = new Expression[args.length + 1]; System.arraycopy(args, 0, comp_args, 1, args.length); comp_args[0] = new Expression(TObject.stringVal(t.image)); comp_args[0].text().append("'" + t.image + "'"); {if (true) return Util.resolveFunctionName("_new_JavaObject", comp_args);} throw new Error("Missing return statement in function"); } // Parameters for a function final public Expression[] FunctionParams() throws ParseException { Expression[] exp_list; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case STAR: jj_consume_token(STAR); exp_list = FunctionFactory.GLOB_LIST; break; default: jj_la1[171] = jj_gen; exp_list = ExpressionList(); } {if (true) return exp_list;} throw new Error("Missing return statement in function"); } final public Expression[] ExpressionList() throws ParseException { ArrayList list = new ArrayList(); Expression e; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case ADD: case SUBTRACT: case BOOLEAN_LITERAL: case NULL_LITERAL: case IF: case NEW: case TRIM: case USER: case CAST: case NAME: case JAVA: case COUNT: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case DATE: case TIME: case TIMESTAMP: case CURRENT_TIME: case CURRENT_DATE: case CURRENT_TIMESTAMP: case NOT: case NUMBER_LITERAL: case STRING_LITERAL: case QUOTED_VARIABLE: case IDENTIFIER: case DOT_DELIMINATED_REF: case QUOTED_DELIMINATED_REF: case PARAMETER_REF: case 208: e = DoExpression(); list.add(e); label_16: while (true) { switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case 210: ; break; default: jj_la1[172] = jj_gen; break label_16; } jj_consume_token(210); e = DoExpression(); list.add(e); } break; default: jj_la1[173] = jj_gen; ; } {if (true) return (Expression[]) list.toArray(new Expression[list.size()]);} throw new Error("Missing return statement in function"); } final public String GetComposite() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case UNION: name = jj_consume_token(UNION); break; case INTERSECT: name = jj_consume_token(INTERSECT); break; case EXCEPT: name = jj_consume_token(EXCEPT); break; default: jj_la1[174] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return name.image;} throw new Error("Missing return statement in function"); } final public String TableName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; case OLD: name = jj_consume_token(OLD); break; case NEW: name = jj_consume_token(NEW); break; case DOT_DELIMINATED_REF: name = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: name = jj_consume_token(QUOTED_DELIMINATED_REF); break; default: jj_la1[175] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } final public String SequenceName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; case DOT_DELIMINATED_REF: name = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: name = jj_consume_token(QUOTED_DELIMINATED_REF); break; default: jj_la1[176] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } final public String TriggerName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; default: jj_la1[177] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } final public String IndexName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; default: jj_la1[178] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // A username final public String UserName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case IDENTIFIER: name = jj_consume_token(IDENTIFIER); break; case PUBLIC: name = jj_consume_token(PUBLIC); break; default: jj_la1[179] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Name of a schema final public String SchemaName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; default: jj_la1[180] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Name of a constraint name final public String ConstraintName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; default: jj_la1[181] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Parses a column name final public String ColumnName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; case DOT_DELIMINATED_REF: name = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: name = jj_consume_token(QUOTED_DELIMINATED_REF); break; default: jj_la1[182] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Parses a column name as a Variable object final public Variable ColumnNameVariable() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; case DOT_DELIMINATED_REF: name = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: name = jj_consume_token(QUOTED_DELIMINATED_REF); break; default: jj_la1[183] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return (Variable) Util.toParamObject(name, case_insensitive_identifiers);} throw new Error("Missing return statement in function"); } // Parses an aliased table name final public String TableAliasName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; default: jj_la1[184] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Parses a procedure name final public String ProcedureName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; case DOT_DELIMINATED_REF: name = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: name = jj_consume_token(QUOTED_DELIMINATED_REF); break; default: jj_la1[185] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Parses a function name final public String FunctionName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; case DOT_DELIMINATED_REF: name = jj_consume_token(DOT_DELIMINATED_REF); break; case QUOTED_DELIMINATED_REF: name = jj_consume_token(QUOTED_DELIMINATED_REF); break; default: jj_la1[186] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Parses the name of an argument in a procedure/function declaration final public String ProcArgumentName() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case QUOTED_VARIABLE: name = jj_consume_token(QUOTED_VARIABLE); break; case NAME: case JAVA: case ACTION: case GROUPS: case OPTION: case ACCOUNT: case PASSWORD: case LANGUAGE: case PRIVILEGES: case IDENTIFIER: name = SQLIdentifier(); break; default: jj_la1[187] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return caseCheck(Util.asNonQuotedRef(name));} throw new Error("Missing return statement in function"); } // Parses an SQL identifier final public Token SQLIdentifier() throws ParseException { Token name; switch ((jj_ntk==-1)?jj_ntk():jj_ntk) { case IDENTIFIER: name = jj_consume_token(IDENTIFIER); break; case OPTION: name = jj_consume_token(OPTION); break; case ACCOUNT: name = jj_consume_token(ACCOUNT); break; case PASSWORD: name = jj_consume_token(PASSWORD); break; case PRIVILEGES: name = jj_consume_token(PRIVILEGES); break; case GROUPS: name = jj_consume_token(GROUPS); break; case LANGUAGE: name = jj_consume_token(LANGUAGE); break; case NAME: name = jj_consume_token(NAME); break; case JAVA: name = jj_consume_token(JAVA); break; case ACTION: name = jj_consume_token(ACTION); break; default: jj_la1[188] = jj_gen; jj_consume_token(-1); throw new ParseException(); } {if (true) return name;} throw new Error("Missing return statement in function"); } final private boolean jj_2_1(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_1(); jj_save(0, xla); return retval; } final private boolean jj_2_2(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_2(); jj_save(1, xla); return retval; } final private boolean jj_2_3(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_3(); jj_save(2, xla); return retval; } final private boolean jj_2_4(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_4(); jj_save(3, xla); return retval; } final private boolean jj_2_5(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_5(); jj_save(4, xla); return retval; } final private boolean jj_2_6(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_6(); jj_save(5, xla); return retval; } final private boolean jj_2_7(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_7(); jj_save(6, xla); return retval; } final private boolean jj_2_8(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_8(); jj_save(7, xla); return retval; } final private boolean jj_2_9(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_9(); jj_save(8, xla); return retval; } final private boolean jj_2_10(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_10(); jj_save(9, xla); return retval; } final private boolean jj_2_11(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_11(); jj_save(10, xla); return retval; } final private boolean jj_2_12(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_12(); jj_save(11, xla); return retval; } final private boolean jj_2_13(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_13(); jj_save(12, xla); return retval; } final private boolean jj_2_14(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_14(); jj_save(13, xla); return retval; } final private boolean jj_2_15(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_15(); jj_save(14, xla); return retval; } final private boolean jj_2_16(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_16(); jj_save(15, xla); return retval; } final private boolean jj_2_17(int xla) { jj_la = xla; jj_lastpos = jj_scanpos = token; boolean retval = !jj_3_17(); jj_save(16, xla); return retval; } final private boolean jj_3R_137() { Token xsp; xsp = jj_scanpos; if (jj_3R_138()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_36() { Token xsp; xsp = jj_scanpos; if (jj_3R_61()) { jj_scanpos = xsp; if (jj_3R_62()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_130() { if (jj_scan_token(PASSWORD)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_120() { if (jj_scan_token(SOME)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_55() { if (jj_scan_token(TRAILING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_20() { if (jj_3R_39()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_136() { if (jj_scan_token(ACTION)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_19() { if (jj_3R_38()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_11() { Token xsp; xsp = jj_scanpos; if (jj_3R_22()) { jj_scanpos = xsp; if (jj_3R_23()) { jj_scanpos = xsp; if (jj_3R_24()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_21()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_122() { if (jj_3R_137()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_90() { if (jj_scan_token(NOT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_18() { Token xsp; xsp = jj_scanpos; if (jj_3_11()) { jj_scanpos = xsp; if (jj_3R_36()) { jj_scanpos = xsp; if (jj_3_12()) { jj_scanpos = xsp; if (jj_3R_37()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_63() { Token xsp; xsp = jj_scanpos; if (jj_3R_90()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(BETWEEN)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_91()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_121() { if (jj_scan_token(STAR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_29() { if (jj_scan_token(NOT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(IN)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_119() { if (jj_scan_token(ALL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_28() { if (jj_scan_token(IN)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_87() { Token xsp; xsp = jj_scanpos; if (jj_3R_121()) { jj_scanpos = xsp; if (jj_3R_122()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_54() { if (jj_scan_token(BOTH)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_132() { if (jj_scan_token(GROUPS)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_9() { if (jj_3R_18()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_135() { if (jj_scan_token(JAVA)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_129() { if (jj_scan_token(ACCOUNT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_10() { Token xsp; xsp = jj_scanpos; if (jj_3R_19()) { jj_scanpos = xsp; if (jj_3R_20()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_21()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_118() { if (jj_scan_token(ANY)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_6() { if (jj_3R_17()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_85() { Token xsp; xsp = jj_scanpos; if (jj_3R_118()) { jj_scanpos = xsp; if (jj_3R_119()) { jj_scanpos = xsp; if (jj_3R_120()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_48() { if (jj_3R_84()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; Token xsp; xsp = jj_scanpos; if (jj_3R_85()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_126() { if (jj_3R_21()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_83() { if (jj_scan_token(DOT_DELIMINATED_REF)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_15() { Token xsp; xsp = jj_scanpos; if (jj_3R_28()) { jj_scanpos = xsp; if (jj_3R_29()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_30() { Token xsp; xsp = jj_scanpos; if (jj_3R_53()) { jj_scanpos = xsp; if (jj_3R_54()) { jj_scanpos = xsp; if (jj_3R_55()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_53() { if (jj_scan_token(LEADING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_25() { Token xsp; xsp = jj_scanpos; if (jj_3_15()) { jj_scanpos = xsp; if (jj_3R_48()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_134() { if (jj_scan_token(NAME)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_131() { if (jj_scan_token(PRIVILEGES)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_117() { if (jj_scan_token(NOTEQ)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_128() { if (jj_scan_token(OPTION)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_82() { if (jj_3R_21()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_127() { if (jj_scan_token(IDENTIFIER)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_52() { if (jj_3R_89()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(208)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_87()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(209)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_31() { if (jj_scan_token(STRING_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_98() { Token xsp; xsp = jj_scanpos; if (jj_3R_127()) { jj_scanpos = xsp; if (jj_3R_128()) { jj_scanpos = xsp; if (jj_3R_129()) { jj_scanpos = xsp; if (jj_3R_130()) { jj_scanpos = xsp; if (jj_3R_131()) { jj_scanpos = xsp; if (jj_3R_132()) { jj_scanpos = xsp; if (jj_3R_133()) { jj_scanpos = xsp; if (jj_3R_134()) { jj_scanpos = xsp; if (jj_3R_135()) { jj_scanpos = xsp; if (jj_3R_136()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_17() { Token xsp; xsp = jj_scanpos; if (jj_3R_30()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; xsp = jj_scanpos; if (jj_3R_31()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(FROM)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_86() { if (jj_scan_token(DISTINCT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_116() { if (jj_scan_token(LEEQ)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_51() { if (jj_scan_token(CAST)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(208)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_88()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_91() { if (jj_3R_126()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_125() { if (jj_scan_token(IDENTIFIER)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_115() { if (jj_scan_token(GREQ)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_104() { if (jj_scan_token(LEEQ)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_50() { if (jj_scan_token(TRIM)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(208)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; Token xsp; xsp = jj_scanpos; if (jj_3_17()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_88()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_5() { if (jj_scan_token(LONG)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(BINARY)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(VARYING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_4() { if (jj_scan_token(BINARY)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(VARYING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_49() { if (jj_scan_token(COUNT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(208)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; Token xsp; xsp = jj_scanpos; if (jj_3R_86()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_87()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(209)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_26() { if (jj_scan_token(208)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_114() { if (jj_scan_token(LE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_124() { if (jj_scan_token(USER)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_88() { if (jj_3R_82()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_27() { Token xsp; xsp = jj_scanpos; if (jj_3R_49()) { jj_scanpos = xsp; if (jj_3R_50()) { jj_scanpos = xsp; if (jj_3R_51()) { jj_scanpos = xsp; if (jj_3R_52()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_106() { if (jj_scan_token(IS)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_103() { if (jj_scan_token(GREQ)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_94() { if (jj_scan_token(SUBTRACT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_113() { if (jj_scan_token(GR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_123() { if (jj_scan_token(IF)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_58() { if (jj_scan_token(LONGVARCHAR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_102() { if (jj_scan_token(LE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_89() { Token xsp; xsp = jj_scanpos; if (jj_3R_123()) { jj_scanpos = xsp; if (jj_3R_124()) { jj_scanpos = xsp; if (jj_3R_125()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_93() { if (jj_scan_token(ADD)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_112() { if (jj_scan_token(EQUALS)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_101() { if (jj_scan_token(GR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_60() { if (jj_scan_token(CHARACTER)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_97() { if (jj_scan_token(SUBTRACT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_80() { if (jj_3R_98()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_57() { if (jj_scan_token(STRING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_64() { if (jj_scan_token(CONCAT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_79() { if (jj_scan_token(QUOTED_DELIMINATED_REF)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_78() { if (jj_scan_token(DOT_DELIMINATED_REF)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_77() { if (jj_scan_token(QUOTED_VARIABLE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_76() { if (jj_scan_token(NUMBER_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_95() { if (jj_scan_token(STAR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_92() { if (jj_scan_token(DIVIDE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_96() { if (jj_scan_token(ADD)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_75() { Token xsp; xsp = jj_scanpos; if (jj_3R_96()) { jj_scanpos = xsp; if (jj_3R_97()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_35() { if (jj_scan_token(CLOB)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_59() { if (jj_scan_token(CHAR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_34() { if (jj_scan_token(VARCHAR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_56() { if (jj_scan_token(TEXT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_33() { Token xsp; xsp = jj_scanpos; if (jj_3R_59()) { jj_scanpos = xsp; if (jj_3R_60()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_65() { Token xsp; xsp = jj_scanpos; if (jj_3R_92()) { jj_scanpos = xsp; if (jj_3R_93()) { jj_scanpos = xsp; if (jj_3R_94()) { jj_scanpos = xsp; if (jj_3R_95()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_32() { Token xsp; xsp = jj_scanpos; if (jj_3R_56()) { jj_scanpos = xsp; if (jj_3R_57()) { jj_scanpos = xsp; if (jj_3R_58()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_100() { if (jj_scan_token(EQUALS)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_46() { Token xsp; xsp = jj_scanpos; if (jj_3R_75()) jj_scanpos = xsp; else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; xsp = jj_scanpos; if (jj_3R_76()) { jj_scanpos = xsp; if (jj_3R_77()) { jj_scanpos = xsp; if (jj_3R_78()) { jj_scanpos = xsp; if (jj_3R_79()) { jj_scanpos = xsp; if (jj_3R_80()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_3() { if (jj_scan_token(LONG)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(CHARACTER)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(VARYING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_17() { Token xsp; xsp = jj_scanpos; if (jj_3_2()) { jj_scanpos = xsp; if (jj_3_3()) { jj_scanpos = xsp; if (jj_3R_32()) { jj_scanpos = xsp; if (jj_3R_33()) { jj_scanpos = xsp; if (jj_3R_34()) { jj_scanpos = xsp; if (jj_3R_35()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_2() { if (jj_scan_token(CHARACTER)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(VARYING)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_74() { if (jj_scan_token(NULL_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_110() { if (jj_scan_token(OR)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_108() { if (jj_scan_token(NOT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(LIKE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_73() { if (jj_scan_token(BOOLEAN_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_111() { if (jj_scan_token(ASSIGNMENT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_72() { if (jj_scan_token(STRING_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_84() { Token xsp; xsp = jj_scanpos; if (jj_3R_111()) { jj_scanpos = xsp; if (jj_3R_112()) { jj_scanpos = xsp; if (jj_3R_113()) { jj_scanpos = xsp; if (jj_3R_114()) { jj_scanpos = xsp; if (jj_3R_115()) { jj_scanpos = xsp; if (jj_3R_116()) { jj_scanpos = xsp; if (jj_3R_117()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_45() { Token xsp; xsp = jj_scanpos; if (jj_3R_72()) { jj_scanpos = xsp; if (jj_3R_73()) { jj_scanpos = xsp; if (jj_3R_74()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_44() { if (jj_scan_token(NEW)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_83()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_109() { if (jj_scan_token(AND)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_107() { if (jj_scan_token(LIKE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_16() { if (jj_scan_token(IS)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(NOT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_105() { if (jj_scan_token(NOTEQ)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_99() { if (jj_scan_token(ASSIGNMENT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_71() { if (jj_scan_token(CURRENT_DATE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_81() { Token xsp; xsp = jj_scanpos; if (jj_3R_99()) { jj_scanpos = xsp; if (jj_3R_100()) { jj_scanpos = xsp; if (jj_3R_101()) { jj_scanpos = xsp; if (jj_3R_102()) { jj_scanpos = xsp; if (jj_3R_103()) { jj_scanpos = xsp; if (jj_3R_104()) { jj_scanpos = xsp; if (jj_3R_105()) { jj_scanpos = xsp; if (jj_3_16()) { jj_scanpos = xsp; if (jj_3R_106()) { jj_scanpos = xsp; if (jj_3R_107()) { jj_scanpos = xsp; if (jj_3R_108()) { jj_scanpos = xsp; if (jj_3R_109()) { jj_scanpos = xsp; if (jj_3R_110()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_70() { if (jj_scan_token(CURRENT_TIME)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_69() { if (jj_scan_token(CURRENT_TIMESTAMP)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_43() { Token xsp; xsp = jj_scanpos; if (jj_3R_69()) { jj_scanpos = xsp; if (jj_3R_70()) { jj_scanpos = xsp; if (jj_3R_71()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_38() { if (jj_3R_64()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_68() { if (jj_scan_token(TIMESTAMP)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_67() { if (jj_scan_token(TIME)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_66() { if (jj_scan_token(DATE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_42() { Token xsp; xsp = jj_scanpos; if (jj_3R_66()) { jj_scanpos = xsp; if (jj_3R_67()) { jj_scanpos = xsp; if (jj_3R_68()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(STRING_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_8() { if (jj_scan_token(SET)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(NULL_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_14() { if (jj_3R_27()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_39() { if (jj_3R_65()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_13() { if (jj_scan_token(NOT)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_21()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_1() { if (jj_scan_token(SET)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(GROUPS)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_41() { if (jj_scan_token(PARAMETER_REF)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_40() { if (jj_scan_token(208)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_82()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_21() { Token xsp; xsp = jj_scanpos; if (jj_3R_40()) { jj_scanpos = xsp; if (jj_3R_41()) { jj_scanpos = xsp; if (jj_3_13()) { jj_scanpos = xsp; if (jj_3_14()) { jj_scanpos = xsp; if (jj_3R_42()) { jj_scanpos = xsp; if (jj_3R_43()) { jj_scanpos = xsp; if (jj_3R_44()) { jj_scanpos = xsp; if (jj_3R_45()) { jj_scanpos = xsp; if (jj_3R_46()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; } else if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_47() { if (jj_3R_81()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_7() { if (jj_scan_token(ON)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_scan_token(DELETE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_62() { if (jj_scan_token(REGEX_LITERAL)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_37() { if (jj_3R_63()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_24() { if (jj_3R_38()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3_12() { if (jj_3R_25()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_26()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_23() { if (jj_3R_39()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_22() { if (jj_3R_47()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_61() { if (jj_scan_token(REGEX)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; if (jj_3R_82()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_138() { if (jj_3R_88()) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } final private boolean jj_3R_133() { if (jj_scan_token(LANGUAGE)) return true; if (jj_la == 0 && jj_scanpos == jj_lastpos) return false; return false; } public SQLTokenManager token_source; SimpleCharStream jj_input_stream; public Token token, jj_nt; private int jj_ntk; private Token jj_scanpos, jj_lastpos; private int jj_la; public boolean lookingAhead = false; private boolean jj_semLA; private int jj_gen; final private int[] jj_la1 = new int[189]; final private int[] jj_la1_0 = {0xffc00000,0x1,0x0,0x0,0x0,0x0,0x0,0x8000000,0x8000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000000,0x2000000,0x0,0x0,0x0,0x0,0x0,0x40000000,0x0,0x0,0x34000000,0x0,0x0,0x0,0x0,0x0,0x0,0x36000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x400000,0x0,0x0,0x1400000,0x1b0000,0x0,0x1b0000,0x0,0x0,0x0,0x0,0x0,0x1b0080,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100000,0x0,0x0,0x100000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x78080,0x7ff80,0x200000,0x200000,0x0,0x0,0x0,0x180000,0x30000,0x30000,0x0,0x0,0x1b0000,0x2000000,0x0,0x0,0x0,0x7f00,0x0,0x7f00,0x0,0x7f00,0x38080,0x0,0x0,0x0,0x0,0x0,0x0,0x80,0x0,0x1b0000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,}; final private int[] jj_la1_1 = {0x40201a,0x0,0x80000000,0x80000000,0x0,0x0,0x0,0x0,0x0,0x0,0x100,0x0,0x0,0x0,0x0,0x0,0x0,0x100,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000,0x0,0x0,0x10000000,0x0,0x0,0x2000,0x0,0x0,0x0,0x0,0x0,0x2,0x80,0x0,0x0,0x0,0x0,0x0,0x30000,0x0,0x0,0x800,0x800,0x2000000,0x0,0x0,0x0,0x0,0x800,0x0,0x0,0x0,0x0,0x2000,0x0,0x0,0x40000,0x40020100,0x0,0x40020100,0x0,0x800,0x0,0x40,0x0,0x40020100,0x0,0x8008000,0x0,0x8008000,0x8008000,0x30000,0x40,0x30040,0x24000000,0x30000,0x0,0x24000000,0x0,0x24000000,0x0,0x24000000,0x24000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80,0x80,0x80,0x80,0x0,0x0,0x400,0x2000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x20,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x20000,0x0,0x0,0x1001800,0x1001800,0x0,0x0,0x0,0x20,0x0,0x0,0x100,0x0,0x800000,0x800000,0x0,0x40000100,0x0,0x0,0x40020100,0x0,0x30000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,}; final private int[] jj_la1_2 = {0x40800000,0x0,0x24004,0x24004,0x0,0x400,0x8000,0x4004,0x4000,0x0,0x0,0x100000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x100,0x1000,0x800,0x8000001,0x0,0x400,0x8000,0x0,0x0,0x0,0x0,0x0,0x0,0x400,0x2,0x800000,0x0,0x0,0x0,0x0,0x4000080,0x0,0x4000,0xb0004060,0x0,0x0,0x1000000,0x0,0x0,0x400,0x0,0x0,0x0,0x0,0x400000,0x0,0xb0100060,0x0,0x0,0x0,0xb0000060,0x0,0xb000026c,0x0,0xb000026c,0x0,0x0,0x0,0x0,0xb0000060,0xb000026c,0x0,0x0,0x0,0x0,0x0,0xb0000060,0x0,0xb0000060,0x12000,0xb0000060,0x10000,0x12000,0x80000,0x12000,0x80000,0x12000,0x12000,0xb0000060,0x0,0xb0000060,0xb0000070,0x0,0x0,0xb0100060,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x100000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xb0000060,0x0,0xb0000060,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x4,0x0,0x0,0x0,0x0,0x20c,0x0,0x0,0xb000026c,0x400000,0xb0000060,0xb0000060,0xb0000060,0xb0000060,0x0,0xb0000060,0xb0000060,0xb0000060,0xb0000060,0xb0000060,0xb0000060,0xb0000060,0xb0000060,0xb0000060,}; final private int[] jj_la1_3 = {0x0,0x0,0x644088,0x644008,0x8000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x44000,0x44000,0x200,0x0,0x80,0x0,0x0,0x80000000,0x80000,0x100000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x20,0x20,0x8,0x0,0x0,0x0,0x0,0x0,0x0,0x8,0x0,0x0,0x0,0x0,0x41000408,0x0,0x0,0x0,0x20000000,0x0,0x0,0x0,0x40,0x4000000,0x0,0x2,0x100,0x41000580,0x100,0x0,0x100,0x41000500,0x0,0x41008400,0x0,0x41008400,0x0,0x20000000,0x0,0x0,0x41000400,0x41000400,0x0,0x0,0x0,0x0,0x0,0x41000400,0x0,0x41000400,0x0,0x41000400,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x41000400,0x0,0x41000400,0x41000400,0x0,0x0,0x41000480,0x8000,0x80,0x0,0x0,0x80,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80,0x0,0x800000,0x0,0x10000000,0x0,0x0,0x0,0x10000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2000,0x0,0x0,0x0,0x0,0x0,0x41000400,0x0,0x41000400,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x20000000,0x2000800,0x2000800,0x0,0x0,0x0,0x0,0x41000400,0x2,0x41000400,0x41000400,0x41000400,0x41000400,0x1,0x41000400,0x41000400,0x41000400,0x41000400,0x41000400,0x41000400,0x41000400,0x41000400,0x41000400,}; final private int[] jj_la1_4 = {0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x10,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x40,0x0,0x40,0x200,0x0,0x0,0x0,0x8000,0x0,0x0,0x0,0x20,0x0,0xcc0,0x0,0x0,0x0,0x4c0,0x0,0x80000040,0x0,0x80000040,0x0,0x0,0x0,0x0,0x40,0x80000040,0x0,0x0,0x0,0x0,0x0,0x40,0x0,0x40,0x0,0x40,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x40,0x0,0x40,0xff000042,0x0,0x0,0xcc0,0x0,0x400,0x6000,0x6000,0x400,0x1e0000,0xe00000,0x40000000,0x20000002,0x68000002,0x2000000,0x6000000,0x1000000,0x80000000,0x10000000,0x0,0x0,0x0,0x1e0000,0xe00000,0x10000,0x0,0x0,0x0,0x0,0x97000000,0x80,0x0,0x0,0x0,0x0,0x0,0xc00,0x108,0x0,0x0,0x4,0x100,0x100,0x100,0x4,0x8,0x108,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000000,0x0,0x0,0x0,0x0,0x40,0x0,0x80000040,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80000040,0x20,0x40,0x40,0x40,0x40,0x0,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,}; final private int[] jj_la1_5 = {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xc0000,0x0,0x0,0x80000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xe004001,0x0,0xe004001,0x0,0x0,0x0,0x0,0x0,0xe004001,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x3ffff,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10010,0x0,0x10810,0x400,0x16ce,0x100,0x4001,0x22020,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8000,0x277ef,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x1000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd0000000,0x20000000,0x20000000,0x0,0x4001,0xe000000,0x0,0x0,0x0,0x0,0x0,0xe004001,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd0000000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xe004001,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,}; final private int[] jj_la1_6 = {0x0,0x8000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000,0x0,0x0,0x40000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x10000,0x0,0x10000,0x20,0x10000,0x0,0x0,0x0,0x0,0x20,0x20,0x0,0x0,0x0,0x0,0xf0,0x40000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xf0,0x0,0x0,0x0,0xf0,0x0,0x110fe,0x40000,0x110fe,0x40000,0x0,0x40000,0x0,0x30,0x11cfe,0x40000,0x0,0x40000,0x0,0x0,0x100f0,0x0,0xf0,0x40000,0x100f0,0x0,0x40000,0x0,0x40000,0x0,0x40000,0x40000,0x30,0x40000,0x30,0x30,0x40000,0x40000,0xf0,0x0,0x2,0x0,0x0,0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x140,0x10000,0x10000,0x0,0x0,0x0,0x40000,0x10000,0x10000,0x0,0x0,0x0,0x10000,0x0,0x0,0x0,0x0,0x0,0x2,0x0,0x0,0x0,0x2,0x2,0x2,0x0,0x0,0x2,0x40000,0x40000,0x0,0x2,0x0,0x0,0x2,0x0,0x0,0x8,0x0,0x0,0xf4,0x11000,0xfc,0x0,0x3,0x0,0x0,0x0,0x2,0x0,0x2,0x0,0x0,0x20,0x0,0x0,0x0,0x8,0x20,0x0,0x40000,0x110fe,0x0,0xf0,0xf0,0x30,0x30,0x30,0x30,0x30,0xf0,0xf0,0x30,0xf0,0xf0,0x30,0x20,}; final private JJCalls[] jj_2_rtns = new JJCalls[17]; private boolean jj_rescan = false; private int jj_gc = 0; public SQL(java.io.InputStream stream) { jj_input_stream = new SimpleCharStream(stream, 1, 1); token_source = new SQLTokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 189; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } public void ReInit(java.io.InputStream stream) { jj_input_stream.ReInit(stream, 1, 1); token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 189; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } public SQL(java.io.Reader stream) { jj_input_stream = new SimpleCharStream(stream, 1, 1); token_source = new SQLTokenManager(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 189; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } public void ReInit(java.io.Reader stream) { jj_input_stream.ReInit(stream, 1, 1); token_source.ReInit(jj_input_stream); token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 189; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } public SQL(SQLTokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 189; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } public void ReInit(SQLTokenManager tm) { token_source = tm; token = new Token(); jj_ntk = -1; jj_gen = 0; for (int i = 0; i < 189; i++) jj_la1[i] = -1; for (int i = 0; i < jj_2_rtns.length; i++) jj_2_rtns[i] = new JJCalls(); } final private Token jj_consume_token(int kind) throws ParseException { Token oldToken; if ((oldToken = token).next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; if (token.kind == kind) { jj_gen++; if (++jj_gc > 100) { jj_gc = 0; for (int i = 0; i < jj_2_rtns.length; i++) { JJCalls c = jj_2_rtns[i]; while (c != null) { if (c.gen < jj_gen) c.first = null; c = c.next; } } } return token; } token = oldToken; jj_kind = kind; throw generateParseException(); } final private boolean jj_scan_token(int kind) { if (jj_scanpos == jj_lastpos) { jj_la--; if (jj_scanpos.next == null) { jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.getNextToken(); } else { jj_lastpos = jj_scanpos = jj_scanpos.next; } } else { jj_scanpos = jj_scanpos.next; } if (jj_rescan) { int i = 0; Token tok = token; while (tok != null && tok != jj_scanpos) { i++; tok = tok.next; } if (tok != null) jj_add_error_token(kind, i); } return (jj_scanpos.kind != kind); } final public Token getNextToken() { if (token.next != null) token = token.next; else token = token.next = token_source.getNextToken(); jj_ntk = -1; jj_gen++; return token; } final public Token getToken(int index) { Token t = lookingAhead ? jj_scanpos : token; for (int i = 0; i < index; i++) { if (t.next != null) t = t.next; else t = t.next = token_source.getNextToken(); } return t; } final private int jj_ntk() { if ((jj_nt=token.next) == null) return (jj_ntk = (token.next=token_source.getNextToken()).kind); else return (jj_ntk = jj_nt.kind); } private java.util.Vector jj_expentries = new java.util.Vector(); private int[] jj_expentry; private int jj_kind = -1; private int[] jj_lasttokens = new int[100]; private int jj_endpos; private void jj_add_error_token(int kind, int pos) { if (pos >= 100) return; if (pos == jj_endpos + 1) { jj_lasttokens[jj_endpos++] = kind; } else if (jj_endpos != 0) { jj_expentry = new int[jj_endpos]; for (int i = 0; i < jj_endpos; i++) { jj_expentry[i] = jj_lasttokens[i]; } boolean exists = false; for (java.util.Enumeration en = jj_expentries.elements(); en.hasMoreElements();) { int[] oldentry = (int[])(en.nextElement()); if (oldentry.length == jj_expentry.length) { exists = true; for (int i = 0; i < jj_expentry.length; i++) { if (oldentry[i] != jj_expentry[i]) { exists = false; break; } } if (exists) break; } } if (!exists) jj_expentries.addElement(jj_expentry); if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind; } } final public ParseException generateParseException() { jj_expentries.removeAllElements(); boolean[] la1tokens = new boolean[211]; for (int i = 0; i < 211; i++) { la1tokens[i] = false; } if (jj_kind >= 0) { la1tokens[jj_kind] = true; jj_kind = -1; } for (int i = 0; i < 189; i++) { if (jj_la1[i] == jj_gen) { for (int j = 0; j < 32; j++) { if ((jj_la1_0[i] & (1< jj_gen) { jj_la = p.arg; jj_lastpos = jj_scanpos = p.first; switch (i) { case 0: jj_3_1(); break; case 1: jj_3_2(); break; case 2: jj_3_3(); break; case 3: jj_3_4(); break; case 4: jj_3_5(); break; case 5: jj_3_6(); break; case 6: jj_3_7(); break; case 7: jj_3_8(); break; case 8: jj_3_9(); break; case 9: jj_3_10(); break; case 10: jj_3_11(); break; case 11: jj_3_12(); break; case 12: jj_3_13(); break; case 13: jj_3_14(); break; case 14: jj_3_15(); break; case 15: jj_3_16(); break; case 16: jj_3_17(); break; } } p = p.next; } while (p != null); } jj_rescan = false; } final private void jj_save(int index, int xla) { JJCalls p = jj_2_rtns[index]; while (p.gen > jj_gen) { if (p.next == null) { p = p.next = new JJCalls(); break; } p = p.next; } p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla; } static final class JJCalls { int gen; Token first; int arg; JJCalls next; } } mckoisqldb-1.0.6/src/main/java/com/mckoi/database/sql/SQL.jj000066400000000000000000001731331330501023400235100ustar00rootroot00000000000000/** * SQL Grammar (JavaCC) * * Mckoi SQL Database ( http://www.mckoi.com/database ) * Copyright (C) 2000, 2001, 2002 Diehl and Associates, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * Version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License Version 2 for more details. * * You should have received a copy of the GNU General Public License * Version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Change Log: * * */ options { STATIC = false; UNICODE_INPUT = true; OPTIMIZE_TOKEN_MANAGER = true; // IGNORE_CASE = true; DEBUG_PARSER = false; } PARSER_BEGIN(SQL) package com.mckoi.database.sql; import com.mckoi.database.TType; import com.mckoi.database.Assignment; import com.mckoi.database.JoiningSet; import com.mckoi.database.Expression; import com.mckoi.database.Variable; import com.mckoi.database.FunctionDef; import com.mckoi.database.FunctionFactory; import com.mckoi.database.Operator; import com.mckoi.database.StatementTree; import com.mckoi.database.ParameterSubstitution; import com.mckoi.database.global.*; import com.mckoi.database.interpret.*; import com.mckoi.database.TObject; import java.util.ArrayList; import java.util.Stack; public class SQL { // State variables for the parser, /** * Set to true if the SQL identifiers are converted to upper case. * NOTE: Purely experimental feature! */ private boolean case_insensitive_identifiers = false; /** * The parameter id. */ private int parameter_id = 0; /** * Resets the parameter id. This MUST be called before a parser is used * to parse a statement. */ public void reset() { parameter_id = 0; } /** * Creates and returns a parameter substitution. This is called when the * parser comes across a '?' style object. This object is used to mark an * expression with a place mark that can be substituted for a value later. */ public ParameterSubstitution createSubstitution(String image) { ParameterSubstitution ps = new ParameterSubstitution(parameter_id); ++parameter_id; return ps; } /** * If the parser has been defined as case insensitive then this * returns the uppercase version of the given string. * * NOTE: This actually doesn't do anything because the case is now resolved * outside the parser. */ public String caseCheck(String identif) { // if (case_insensitive_identifiers) { // return identif.toUpperCase(); // } return identif; } /** * Helper for expression parsing. * Called when an end parenthese has been found. */ public void expEndParen(Expression exp, Stack stack) { Operator op = (Operator) stack.pop(); while (!op.is("(")) { addOperatorToExpression(exp, op); op = (Operator) stack.pop(); } } /** * Helper for expression parsing. * Called when an operator has been read in. This needs to check precedence * and add the operator to the expression as appropriate. */ public void expOperator(Expression exp, Stack stack, Operator op) { int precedence = op.precedence(); flushOperatorStack(exp, stack, precedence); stack.push(op); } /** * Flush the operator stack until the stack is either empty or the top * element is either a "(" or of a precedence lower than the given * precedence. */ public void flushOperatorStack(Expression exp, Stack stack, int precedence) { if (!stack.empty()) { Operator top_op = (Operator) stack.pop(); while (!top_op.is("(") && top_op.precedence() >= precedence) { addOperatorToExpression(exp, top_op); if (stack.empty()) { return; } top_op = (Operator) stack.pop(); } stack.push(top_op); } } /** * Helper for expression parsing. * Called when an entire expression has been read in. We need to empty * the stack. */ public void expEnd(Expression exp, Stack stack) { while (!stack.empty()) { Operator op = (Operator) stack.pop(); addOperatorToExpression(exp, op); } } /** * Helper for expression parsing. * Adds an operator to the given expression. */ public void addOperatorToExpression(Expression exp, Operator op) { if (op.is("not")) { exp.addElement(null); } exp.addOperator(op); } public static void main(String args[]) throws ParseException { SQL parser = new SQL(System.in); parser.Test(); } } PARSER_END(SQL) SKIP : { " " | "\t" | "\n" | "\r" | <"//" (~["\n","\r"])* ("\n" | "\r" | "\r\n")> | <"--" (~["\n","\r"])* ("\n" | "\r" | "\r\n")> //| <"/*" (~["*"])* "*" ("*" | ~["*","/"] (~["*"])* "*")* "/"> } TOKEN: { | " > | | =" > | " > | | | | } TOKEN [IGNORE_CASE] : { | // NOTE: Handling regex literals is a horrible horrible hack. The // token starts with 'regex /' and the regex string follows. // The reason for this hack is because / clashes with | } TOKEN [IGNORE_CASE] : { /* KEYWORDS */ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | //| | | | | | | | | | | | | | | | | | | | | | | | // Collate strengths, | | | | // Collate decomposition levels, | | | // Data types, | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | // Current date/time/timestamp literals | | | //| // NOTE: OPERATOR doesn't match '*' or '=' because we use * and = to mean different things // " | "<" | ">=" | "<=" | "!=" | "<>" | "/" | "+" | "-" | // "like" | "not like" | "regex" | "and" | "or" ) > | //| | | | | //| | } TOKEN : { | // | // | | } TOKEN : { /* IDENTIFIERS */ ( | )* > | ( "." )* > | ( "." )* > | "[]" > | > | ".*" > | ".*" > | | <#LETTER: ["a"-"z", "A"-"Z", "_"] > | <#DIGIT: ["0"-"9"]> } void Test() : { } { ( parseExpression() ";" ) { } } // Parses a single expression. Useed in 'com.mckoi.database.Expression.parse' method. Expression parseExpression() : { Expression exp; } { exp = DoExpression() { return exp; } } // Statement that ends with a ';' StatementTree Statement() : { StatementTree ob; } { ( ( ob=Select() | ob=Update() | ob=Alter() | ob=Compact() | ob=Create() | ob=Drop() | ob=Delete() | ob=Insert() | ob=Describe() | ob=Show() | ob=Call() | ob=Grant() | ob=Revoke() | ob=CompleteTransaction() // Either 'commit' or 'rollback' | ob=Set() | ob=ShutDown() ) ( ";" | ) ) { return ob; } } // All statements that start with StatementTree Create() : { StatementTree ob; } { ( ( ob=CreateTable() | ob=CreateTrigger() | ob=CreateFunction() | ob=CreateIndex() | ob=CreateSchema() | ob=CreateSequence() | ob=CreateUser() | ob=CreateView() ) ) { return ob; } } // All statements that start with StatementTree Drop() : { StatementTree ob; } { ( ( ob=DropTable() | ob=DropTrigger() | ob=DropFunction() | ob=DropIndex() | ob=DropSchema() | ob=DropSequence() | ob=DropUser() | ob=DropView() ) ) { return ob; } } StatementTree Select() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Select"); TableSelectExpression table_expr; ArrayList order_by = new ArrayList(); } { ( table_expr = GetTableSelectExpression() [ SelectOrderByList(order_by) ] ) { cmd.putObject("table_expression", table_expr); cmd.putObject("order_by", order_by); return cmd; } } StatementTree Update() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UpdateTable"); String table_name; ArrayList assignments = new ArrayList(); SearchExpression where_clause = new SearchExpression(); int limit = -1; } { ( table_name = TableName() AssignmentList(assignments) [ ConditionsExpression(where_clause) ] [ limit = PositiveIntegerConstant() ] ) { cmd.putObject("table_name", table_name); cmd.putObject("assignments", assignments); cmd.putObject("where_clause", where_clause); cmd.putInt("limit", limit); return cmd; } } StatementTree Alter() : { StatementTree cmd; } { ( ( cmd=AlterTable() | cmd=AlterUser() ) ) { return cmd; } } StatementTree AlterTable() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.AlterTable"); String table_name; AlterTableAction action; StatementTree create_statement; } { ( table_name=TableName() action=GetAlterTableAction() { cmd.putObject("table_name", table_name); cmd.putObject("alter_action", action); } | create_statement = CreateTable() { cmd.putObject("create_statement", create_statement); } ) { return cmd; } } StatementTree Compact() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Compact"); String table_name; } { (
table_name=TableName() ) { cmd.putObject("table_name", table_name); return cmd; } } StatementTree CreateTable() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.CreateTable"); boolean temporary = false; boolean only_if_not_exists = false; String table_name; ArrayList column_list = new ArrayList(); ArrayList constraint_list = new ArrayList(); Expression check_expression; } { ( [ { temporary = true; } ]
[ { only_if_not_exists = true; } ] table_name = TableName() ColumnDeclarationList(column_list, constraint_list) [ check_expression = DoExpression() { ConstraintDef check_constraint = new ConstraintDef(); check_constraint.setCheck(check_expression); constraint_list.add(check_constraint); } ] // [ CreateOptions(statement) ] // [ statement.select = Select() ] ) { cmd.putBoolean("temporary", temporary); cmd.putBoolean("only_if_not_exists", only_if_not_exists); cmd.putObject("table_name", table_name); cmd.putObject("column_list", column_list); cmd.putObject("constraint_list", constraint_list); return cmd; } } StatementTree CreateTrigger() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.CreateTrigger"); boolean callback; String trigger_name; ArrayList trigger_types = new ArrayList(); String table_name; String before_after; String procedure_name; Expression[] procedure_args; } { ( ( trigger_name = TriggerName() TriggerTypes(trigger_types) table_name = TableName() ) { cmd.putObject("type", "callback_trigger"); } | ( trigger_name = TriggerName() before_after = BeforeOrAfter() TriggerTypes(trigger_types) table_name = TableName() procedure_name = FunctionName() "(" procedure_args = ExpressionList() ")" ) { cmd.putObject("type", "procedure_trigger"); cmd.putObject("before_after", before_after); cmd.putObject("procedure_name", procedure_name); cmd.putObject("procedure_args", procedure_args); } ) { cmd.putObject("trigger_name", trigger_name); cmd.putObject("trigger_types", trigger_types); cmd.putObject("table_name", table_name); return cmd; } } StatementTree DropTrigger() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.DropTrigger"); String trigger_name; String type = null; } { ( trigger_name = TriggerName() { type = "callback_trigger"; } ) | ( trigger_name = TriggerName() { type = "procedure_trigger"; } ) { cmd.putObject("trigger_name", trigger_name); cmd.putObject("type", type); return cmd; } } StatementTree CreateFunction() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Function"); cmd.putObject("type", "create"); String function_name; ArrayList arg_names = new ArrayList(); ArrayList arg_types = new ArrayList(); Token loc_name; TType return_type = null; } { ( function_name = FunctionName() "(" ProcParameterList(arg_names, arg_types) ")" [ return_type = GetTType() ] loc_name = ) { cmd.putObject("function_name", function_name); cmd.putObject("arg_names", arg_names); cmd.putObject("arg_types", arg_types); // Note that 'location_name' will be a TObject cmd.putObject("location_name", Util.toParamObject(loc_name, case_insensitive_identifiers)); cmd.putObject("return_type", return_type); return cmd; } } StatementTree DropFunction() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Function"); cmd.putObject("type", "drop"); String function_name; } { ( function_name = FunctionName() ) { cmd.putObject("function_name", function_name); return cmd; } } StatementTree CreateSchema() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Schema"); cmd.putObject("type", "create"); String schema_name; } { ( schema_name = SchemaName() ) { cmd.putObject("schema_name", schema_name); return cmd; } } StatementTree DropSchema() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Schema"); cmd.putObject("type", "drop"); String schema_name; } { ( schema_name = SchemaName() ) { cmd.putObject("schema_name", schema_name); return cmd; } } StatementTree CreateView() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.ViewManager"); String view_name; TableSelectExpression select_cmd; ArrayList col_list = new ArrayList(); } { ( view_name = TableName() [ "(" BasicColumnList(col_list) ")" ] select_cmd = GetTableSelectExpression() ) { cmd.putObject("type", "create"); cmd.putObject("view_name", view_name); cmd.putObject("column_list", col_list); cmd.putObject("select_expression", select_cmd); return cmd; } } StatementTree DropView() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.ViewManager"); String view_name; } { ( view_name = TableName() ) { cmd.putObject("type", "drop"); cmd.putObject("view_name", view_name); return cmd; } } StatementTree CreateIndex() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.NoOp"); } { ( [] IndexName() TableName() "(" BasicColumnList(new ArrayList()) ")" ) { return cmd; } } StatementTree DropTable() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.DropTable"); boolean only_if_exists = false; String table_name; ArrayList table_list = new ArrayList(); } { (
[ { only_if_exists = true; } ] table_name = TableName() { table_list.add(table_name); } ( "," table_name = TableName() { table_list.add(table_name); } )* ) { cmd.putBoolean("only_if_exists", only_if_exists); cmd.putObject("table_list", table_list); return cmd; } } StatementTree DropIndex() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.NoOp"); } { ( IndexName() TableName() ) { return cmd; } } StatementTree Call() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Call"); String proc_name; Expression[] args = null; } { proc_name = ProcedureName() "(" args=ExpressionList() ")" { cmd.putObject("proc_name", proc_name); cmd.putObject("args", args); return cmd; } } StatementTree CreateSequence() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Sequence"); cmd.putObject("type", "create"); String seq_name; Expression v; } { seq_name=SequenceName() { cmd.putObject("seq_name", seq_name); } [ v = DoExpression() { cmd.putObject("increment", v); } ] [ v = DoExpression() { cmd.putObject("min_value", v); } ] [ v = DoExpression() { cmd.putObject("max_value", v); } ] [ v = DoExpression() { cmd.putObject("start", v); } ] [ v = DoExpression() { cmd.putObject("cache", v); } ] [ { cmd.putObject("cycle", "yes"); } ] { return cmd; } } StatementTree DropSequence() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Sequence"); cmd.putObject("type", "drop"); String seq_name; } { seq_name=SequenceName() { cmd.putObject("seq_name", seq_name); } { return cmd; } } StatementTree CreateUser() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UserManager"); cmd.putObject("type", "CREATE USER"); } { UserManagerCommand(cmd) { return cmd; } } StatementTree AlterUser() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UserManager"); cmd.putObject("type", "ALTER USER"); } { UserManagerCommand(cmd) { return cmd; } } StatementTree DropUser() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.UserManager"); cmd.putObject("type", "DROP USER"); String username; } { username = UserName() { cmd.putObject("username", username); return cmd; } } void UserManagerCommand(StatementTree cmd) : { String username; Expression password_exp; Expression[] groups_list = null; String lock_status = null; } { ( username = UserName() password_exp=DoExpression() [ LOOKAHEAD(2) groups_list=ExpressionList() ] [ ( { lock_status="LOCK"; } | { lock_status="UNLOCK"; } ) ] ) { cmd.putObject("username", username); cmd.putObject("password_expression", password_exp); cmd.putObject("groups_list", groups_list); cmd.putObject("lock_status", lock_status); } } StatementTree Delete() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Delete"); String table_name; SearchExpression where_clause = new SearchExpression(); int limit = -1; } { ( table_name = TableName() [ ConditionsExpression(where_clause) ] [ limit = PositiveIntegerConstant() ] ) { cmd.putObject("table_name", table_name); cmd.putObject("where_clause", where_clause); cmd.putInt("limit", limit); return cmd; } } StatementTree Insert() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Insert"); String table_name; ArrayList col_list = new ArrayList(); ArrayList data_list = new ArrayList(); // ( Array of Expression[] ) StatementTree select = null; ArrayList assignments = new ArrayList(); String type; } { ( [ ] table_name = TableName() ( [ "(" BasicColumnList(col_list) ")" ] ( InsertDataList(data_list) { type = "from_values"; } | select = Select() { type = "from_select"; } ) | AssignmentList(assignments) { type = "from_set"; } ) ) { cmd.putObject("table_name", table_name); cmd.putObject("col_list", col_list); cmd.putObject("data_list", data_list); cmd.putObject("select", select); cmd.putObject("assignments", assignments); cmd.putObject("type", type); return cmd; } } StatementTree Describe() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Show"); cmd.putObject("show", "describe_table"); String table_name; } { ( table_name = TableName() ) { cmd.putObject("table_name", table_name); cmd.putObject("where_clause", new SearchExpression()); return cmd; } } StatementTree Show() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.Show"); Expression[] args = null; SearchExpression where_clause = new SearchExpression(); Token t; } { ( ( t= | t= ) [ "(" args=ExpressionList() ")" ] [ ConditionsExpression(where_clause) ] ) { cmd.putObject("show", t.image); cmd.putObject("args", args); cmd.putObject("where_clause", where_clause); return cmd; } } StatementTree Grant() : { StatementTree cmd = new StatementTree("com.mckoi.database.interpret.PrivManager"); ArrayList priv_list = new ArrayList(); String priv_object; ArrayList grant_to; boolean grant_option = false; } { ( PrivList(priv_list) priv_object=PrivObject() grant_to=UserNameList(new ArrayList()) [
] table_name=TableName() { return "T:" + table_name; } | schema_name=SchemaName() { return "S:" + schema_name; } ) } // A list of privs ArrayList PrivList(ArrayList list) : { } { PrivListItem(list) ( "," PrivListItem(list) )* { return list; } } // Adds an item in a priv list void PrivListItem(ArrayList list) : { Token t; } { ( t= [ table_expr.distinct = SetQuantifier() ] SelectColumnList(table_expr.columns) [ SelectTableList(table_expr.from_clause) ] [ ConditionsExpression(table_expr.where_clause) ] [ SelectGroupByList(table_expr.group_by) [ table_expr.group_max = GroupMaxColumn() ] [ ConditionsExpression(table_expr.having_clause) ] ] [ composite = GetComposite() [ { is_all = true; } ] next_composite_expression = GetTableSelectExpression() { table_expr.chainComposite(next_composite_expression, composite, is_all); } ] ) { return table_expr; } } AlterTableAction GetAlterTableAction() : { String col_name, con_name; ColumnDef column_def; ConstraintDef constraint_def; Expression default_exp; AlterTableAction action = new AlterTableAction(); } { ( ( [ ] column_def=ColumnDefinition() { action.setAction("ADD"); action.addElement(column_def); } | constraint_def=TableConstraintDefinition() { action.setAction("ADD_CONSTRAINT"); action.addElement(constraint_def); } ) | [ ] col_name=ColumnName() ( default_exp=DoExpression() { action.setAction("ALTERSET"); action.addElement(col_name); action.addElement(default_exp); } | { action.setAction("DROPDEFAULT"); action.addElement(col_name); } ) | ( [ ] col_name=ColumnName() { action.setAction("DROP"); action.addElement(col_name); } | con_name=ConstraintName() { action.setAction("DROP_CONSTRAINT"); action.addElement(con_name); } | { action.setAction("DROP_CONSTRAINT_PRIMARY_KEY"); } ) ) { return action; } } // An element to insert, either an expression or DEFAULT for the default // element. Object InsertElement() : { Expression e; } { ( { return "DEFAULT"; } | e = DoExpression() { return e; } ) } ArrayList InsertExpressionList() : { ArrayList list = new ArrayList(); Object elem; } { [ elem = InsertElement() { list.add(elem); } ( "," elem = InsertElement() { list.add(elem); } )* ] { return list; } } // The list of columns to insert formatted as; eg. (9, 4), (3, 2), (9, 9), .... void InsertDataList(ArrayList data_list) : { ArrayList insert_vals; } { "(" insert_vals = InsertExpressionList() ")" { data_list.add(insert_vals); } ( "," "(" insert_vals = InsertExpressionList() ")" { data_list.add(insert_vals); } )* } // Returning true means distinct, false means all. boolean SetQuantifier() : {} { ( { return true; } | { return false; } ) } void SelectColumnList(ArrayList list) : { SelectColumn col; } { col = SelectColumn() { list.add(col); } ( "," col = SelectColumn() { list.add(col); } )* } SelectColumn SelectColumn() : { SelectColumn col = new SelectColumn(); String aliased_name; Token t; } { ( col.expression = DoExpression() [ ] [ col.alias=TableAliasName() ] | { col.glob_name = "*"; } | t = { col.glob_name = caseCheck(t.image); } | t = { col.glob_name = caseCheck(Util.asNonQuotedRef(t)); } ) { return col; } } void SelectGroupByList(ArrayList list) : { ByColumn col; Expression exp; } { exp = DoExpression() { col = new ByColumn(); col.exp = exp; list.add(col); } ( "," exp = DoExpression() { col = new ByColumn(); col.exp = exp; list.add(col); } )* } /** * NOTE: This is an extension, allows for us to specify a column to return the * max value for each row representing a group. */ Variable GroupMaxColumn() : { Variable var; } { var = ColumnNameVariable() { return var; } } void SelectOrderByList(ArrayList list) : { ByColumn col; Expression exp; boolean ascending = true; } { exp = DoExpression() [ ascending=OrderingSpec() ] { col = new ByColumn(); col.exp = exp; col.ascending = ascending; list.add(col); } ( "," exp = DoExpression() { ascending=true; } [ ascending=OrderingSpec() ] { col = new ByColumn(); col.exp = exp; col.ascending = ascending; list.add(col); } )* } boolean OrderingSpec() : {} { ( { return true; } | { return false; } ) { return true; } } void TableDeclaration(FromClause from_clause) : { String table=null, declare_as = null; TableSelectExpression select_stmt = null; } { ( ( table=TableName() | "(" select_stmt=GetTableSelectExpression() ")" ) [ [ ] declare_as=TableName() ] ) { from_clause.addTableDeclaration(table, select_stmt, declare_as); } } void SelectTableList(FromClause from_clause) : {} { TableDeclaration(from_clause) [ FromClauseJoin(from_clause) ] } void FromClauseJoin(FromClause from_clause) : { Expression on_expression; } { ( ( "," { from_clause.addJoin(JoiningSet.INNER_JOIN);} ) [ SelectTableList(from_clause) ] | ( [ ] TableDeclaration(from_clause) on_expression=DoExpression() { from_clause.addPreviousJoin(JoiningSet.INNER_JOIN, on_expression); } ) [ FromClauseJoin(from_clause) ] | ( [] TableDeclaration(from_clause) on_expression=DoExpression() { from_clause.addPreviousJoin(JoiningSet.LEFT_OUTER_JOIN, on_expression); } ) [ FromClauseJoin(from_clause) ] | ( [] TableDeclaration(from_clause) on_expression=DoExpression() { from_clause.addPreviousJoin(JoiningSet.RIGHT_OUTER_JOIN, on_expression); } ) [ FromClauseJoin(from_clause) ] ) } // A list of parameters in a function or procedure declaration. For example, // ' p1 NUMERIC, p2 NUMERIC, s1 CHARACTER VARYING ' // First array contains parameter names, and second contains TType representing // the type. void ProcParameterList(ArrayList decl_names, ArrayList decl_types) : { String name; TType type; } { [ { name = null; } ( [ name = ProcArgumentName() ] type = GetTType() ) { decl_names.add(name); decl_types.add(type); } ( ( "," { name = null; } [ name = ProcArgumentName() ] type = GetTType() ) { decl_names.add(name); decl_types.add(type); } )* ] } // The ' set a = (a * 9), b = concat(b, "aa") ' part of the 'update', 'insert' statement void AssignmentList(ArrayList assignment_list) : { String column; Expression exp; } { ( column=ColumnName() exp=DoExpression() { assignment_list.add(new Assignment(Variable.resolve(column), exp)); } [ "," AssignmentList(assignment_list) ] ) } // Parses a list of column declarations. eg. ' id NUMERIC(5, 20), number VARCHAR(90), ... ' // and also any constraints. void ColumnDeclarationList(ArrayList column_list, ArrayList constraint_list) : { } { "(" ColumnOrConstraintDefinition(column_list, constraint_list) ( "," ColumnOrConstraintDefinition(column_list, constraint_list) )* ")" } void ColumnOrConstraintDefinition(ArrayList column_list, ArrayList constraint_list) : { ColumnDef coldef = null; ConstraintDef condef = null; } { ( coldef = ColumnDefinition() { column_list.add(coldef); } | condef = TableConstraintDefinition() { constraint_list.add(condef); } ) } ColumnDef ColumnDefinition() : { ColumnDef column = new ColumnDef(); Token t; Token col_constraint; Expression default_exp; String col_name; } { ( col_name = ColumnName() { column.setName(col_name); } ColumnDataType(column) [ default_exp = DoExpression() { column.setDefaultExpression(default_exp); } ] ( ColumnConstraint(column) )* [ ( t= | t= ) { column.setIndex(t); } ] ) { return column; } } // Constraint on a column, eg. 'NOT NULL', 'NULL', 'PRIMARY KEY', 'UNIQUE', etc. void ColumnConstraint(ColumnDef column) : { Token t; String table_name; ArrayList col_list = new ArrayList(); } { ( { column.addConstraint("NOT NULL"); } | { column.addConstraint("NULL"); } | { column.addConstraint("PRIMARY"); } | { column.addConstraint("UNIQUE"); } ) } int GetCollateStrength() : { } { ( { return java.text.Collator.PRIMARY; } | { return java.text.Collator.SECONDARY; } | { return java.text.Collator.TERTIARY; } | { return java.text.Collator.IDENTICAL; } ) } int GetCollateDecomposition() : { } { ( { return java.text.Collator.NO_DECOMPOSITION; } | { return java.text.Collator.CANONICAL_DECOMPOSITION; } | { return java.text.Collator.FULL_DECOMPOSITION; } ) } int GetStringSQLType() : { } { LOOKAHEAD(2) ( ) { return SQLTypes.VARCHAR; } | LOOKAHEAD(3) ( ) { return SQLTypes.LONGVARCHAR; } | ( | | ) { return SQLTypes.LONGVARCHAR; } | ( | ) { return SQLTypes.CHAR; } | { return SQLTypes.VARCHAR; } | { return SQLTypes.CLOB; } } int GetNumericSQLType() : { } { ( | ) { return SQLTypes.INTEGER; } | { return SQLTypes.TINYINT; } | { return SQLTypes.SMALLINT; } | { return SQLTypes.BIGINT; } | { return SQLTypes.FLOAT; } | { return SQLTypes.REAL; } | { return SQLTypes.DOUBLE; } | { return SQLTypes.NUMERIC; } | { return SQLTypes.DECIMAL; } } int GetBooleanSQLType() : { } { ( | ) { return SQLTypes.BOOLEAN; } } int GetDateSQLType() : { } { { return SQLTypes.TIMESTAMP; } |