PageRenderTime 45ms CodeModel.GetById 12ms RepoModel.GetById 0ms app.codeStats 1ms

/derby-10.6.2.1/db-derby-10.6.2.1-src/java/engine/org/apache/derby/impl/sql/execute/DeleteCascadeResultSet.java

#
Java | 488 lines | 319 code | 79 blank | 90 comment | 48 complexity | 1ed17aba7146175aa6e7c296124c36a7 MD5 | raw file
Possible License(s): Apache-2.0
  1. /*
  2. Derby - Class org.apache.derby.impl.sql.execute.DeleteCascadeResultSet
  3. Licensed to the Apache Software Foundation (ASF) under one or more
  4. contributor license agreements. See the NOTICE file distributed with
  5. this work for additional information regarding copyright ownership.
  6. The ASF licenses this file to you under the Apache License, Version 2.0
  7. (the "License"); you may not use this file except in compliance with
  8. the License. You may obtain a copy of the License at
  9. http://www.apache.org/licenses/LICENSE-2.0
  10. Unless required by applicable law or agreed to in writing, software
  11. distributed under the License is distributed on an "AS IS" BASIS,
  12. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. See the License for the specific language governing permissions and
  14. limitations under the License.
  15. */
  16. package org.apache.derby.impl.sql.execute;
  17. import org.apache.derby.iapi.services.sanity.SanityManager;
  18. import org.apache.derby.iapi.error.StandardException;
  19. import org.apache.derby.iapi.sql.execute.ConstantAction;
  20. import org.apache.derby.iapi.sql.execute.CursorResultSet;
  21. import org.apache.derby.iapi.sql.execute.RowChanger;
  22. import org.apache.derby.iapi.sql.execute.NoPutResultSet;
  23. import org.apache.derby.iapi.sql.Activation;
  24. import org.apache.derby.iapi.sql.ResultDescription;
  25. import org.apache.derby.iapi.types.DataValueDescriptor;
  26. import org.apache.derby.iapi.sql.ResultSet;
  27. import org.apache.derby.iapi.store.access.ConglomerateController;
  28. import org.apache.derby.iapi.store.access.TransactionController;
  29. import org.apache.derby.iapi.sql.execute.ExecRow;
  30. import org.apache.derby.iapi.sql.execute.TemporaryRowHolder;
  31. import org.apache.derby.iapi.reference.SQLState;
  32. import java.util.Vector;
  33. import java.util.Hashtable;
  34. import java.util.Enumeration;
  35. /**
  36. * Delete the rows from the specified base table and executes delete/update
  37. * on dependent tables depending on the referential actions specified.
  38. * Note:(beetle:5197) Dependent Resultsets of DeleteCascade Resultset can in
  39. * any one of the multiple resultsets generated for the same table because of
  40. * multiple foreign key relationship to the same table. At the bind time ,
  41. * dependents are binded only once per table.
  42. * We can not depend on mainNodeTable Flag to fire actions on dependents,
  43. * it should be done based on whether the resultset has dependent resultsets or not.
  44. *
  45. */
  46. public class DeleteCascadeResultSet extends DeleteResultSet
  47. {
  48. public ResultSet[] dependentResultSets;
  49. private int noDependents =0;
  50. private CursorResultSet parentSource;
  51. private FKInfo parentFKInfo;
  52. private long fkIndexConglomNumber;
  53. private String resultSetId;
  54. private boolean mainNodeForTable = true;
  55. private boolean affectedRows = false;
  56. private int tempRowHolderId; //this result sets temporary row holder id
  57. /*
  58. * class interface
  59. * @exception StandardException Thrown on error
  60. */
  61. public DeleteCascadeResultSet
  62. (
  63. NoPutResultSet source,
  64. Activation activation,
  65. int constantActionItem,
  66. ResultSet[] dependentResultSets,
  67. String resultSetId
  68. )
  69. throws StandardException
  70. {
  71. super(source,
  72. ((constantActionItem == -1) ?activation.getConstantAction() :
  73. (ConstantAction)activation.getPreparedStatement().getSavedObject(constantActionItem)),
  74. activation);
  75. ConstantAction passedInConstantAction;
  76. if(constantActionItem == -1)
  77. passedInConstantAction = activation.getConstantAction(); //root table
  78. else
  79. {
  80. passedInConstantAction =
  81. (ConstantAction) activation.getPreparedStatement().getSavedObject(constantActionItem);
  82. resultDescription = constants.resultDescription;
  83. }
  84. cascadeDelete = true;
  85. this.resultSetId = resultSetId;
  86. if(dependentResultSets != null)
  87. {
  88. noDependents = dependentResultSets.length;
  89. this.dependentResultSets = dependentResultSets;
  90. }
  91. }
  92. /**
  93. @exception StandardException Standard Derby error policy
  94. */
  95. public void open() throws StandardException
  96. {
  97. try{
  98. setup();
  99. if(isMultipleDeletePathsExist())
  100. {
  101. setRowHoldersTypeToUniqueStream();
  102. //collect until there are no more rows to found
  103. while(collectAffectedRows(false));
  104. }else
  105. {
  106. collectAffectedRows(false);
  107. }
  108. if (! affectedRows)
  109. {
  110. activation.addWarning(
  111. StandardException.newWarning(
  112. SQLState.LANG_NO_ROW_FOUND));
  113. }
  114. runFkChecker(true); //check for only RESTRICT referential action rule violations
  115. Hashtable mntHashTable = new Hashtable(); //Hash Table to identify mutiple node for same table cases.
  116. mergeRowHolders(mntHashTable);
  117. fireBeforeTriggers(mntHashTable);
  118. deleteDeferredRows();
  119. runFkChecker(false); //check for all constraint violations
  120. rowChangerFinish();
  121. fireAfterTriggers();
  122. }finally
  123. {
  124. cleanUp();
  125. //clear the parent result sets hash table
  126. activation.clearParentResultSets();
  127. }
  128. endTime = getCurrentTimeMillis();
  129. }
  130. /**
  131. *Gathers the rows that needs to be deleted/updated
  132. *and creates a temporary resulsets that will be passed
  133. *as source to its dependent result sets.
  134. */
  135. void setup() throws StandardException
  136. {
  137. /* Cache query plan text for source, before it gets blown away */
  138. if (lcc.getRunTimeStatisticsMode())
  139. {
  140. /* savedSource nulled after run time statistics generation */
  141. savedSource = source;
  142. }
  143. super.setup();
  144. activation.setParentResultSet(rowHolder, resultSetId);
  145. Vector sVector = (Vector) activation.getParentResultSet(resultSetId);
  146. tempRowHolderId = sVector.size() -1;
  147. for(int i =0 ; i < noDependents; i++)
  148. {
  149. if(dependentResultSets[i] instanceof UpdateResultSet)
  150. {
  151. ((UpdateResultSet) dependentResultSets[i]).setup();
  152. }else
  153. {
  154. ((DeleteCascadeResultSet) dependentResultSets[i]).setup();
  155. }
  156. }
  157. }
  158. boolean collectAffectedRows(boolean rowsFound) throws StandardException
  159. {
  160. if(super.collectAffectedRows())
  161. {
  162. affectedRows = true;
  163. rowsFound = true;
  164. }
  165. for(int i =0 ; i < noDependents; i++)
  166. {
  167. if(dependentResultSets[i] instanceof UpdateResultSet)
  168. {
  169. if(((UpdateResultSet)dependentResultSets[i]).collectAffectedRows())
  170. rowsFound = true;
  171. }else
  172. {
  173. if(((DeleteCascadeResultSet)
  174. dependentResultSets[i]).collectAffectedRows(rowsFound))
  175. rowsFound = true;
  176. }
  177. }
  178. return rowsFound;
  179. }
  180. void fireBeforeTriggers(Hashtable msht) throws StandardException
  181. {
  182. if(!mainNodeForTable)
  183. {
  184. /*to handle case where no table node had qualified rows, in which case no node for
  185. * the table get marked as mainNodeFor table , one way to identify
  186. * such case is to look at the mutinode hash table and see if the result id exist ,
  187. *if it does not means none of the table nodes resulsets got marked
  188. * as main node for table. If that is the case we mark this
  189. * resultset as mainNodeTable and put entry in the hash table.
  190. */
  191. if(!msht.containsKey(resultSetId))
  192. {
  193. mainNodeForTable = true;
  194. msht.put(resultSetId, resultSetId);
  195. }
  196. }
  197. //execute the before triggers on the dependents
  198. //Defect 5743: Before enabling BEFORE triggers, check DB2 behavior.
  199. for(int i =0 ; i < noDependents; i++)
  200. {
  201. if(dependentResultSets[i] instanceof UpdateResultSet)
  202. {
  203. ((UpdateResultSet) dependentResultSets[i]).fireBeforeTriggers();
  204. }
  205. else{
  206. ((DeleteCascadeResultSet)dependentResultSets[i]).fireBeforeTriggers(msht);
  207. }
  208. }
  209. //If there is more than one node for the same table
  210. //only one node fires the triggers
  211. if(mainNodeForTable && constants.deferred)
  212. super.fireBeforeTriggers();
  213. }
  214. void fireAfterTriggers() throws StandardException
  215. {
  216. //fire the After Triggers on the dependent tables, if any rows changed
  217. for(int i=0 ; i<noDependents && affectedRows; i++){
  218. if(dependentResultSets[i] instanceof UpdateResultSet)
  219. {
  220. ((UpdateResultSet) dependentResultSets[i]).fireAfterTriggers();
  221. }
  222. else{
  223. ((DeleteCascadeResultSet)dependentResultSets[i]).fireAfterTriggers();
  224. }
  225. }
  226. //If there is more than one node for the same table
  227. //, we let only one node fire the triggers.
  228. if(mainNodeForTable && constants.deferred)
  229. super.fireAfterTriggers();
  230. }
  231. void deleteDeferredRows() throws StandardException
  232. {
  233. //delete the rows in the dependents tables
  234. for(int i =0 ; i < noDependents; i++)
  235. {
  236. if(dependentResultSets[i] instanceof UpdateResultSet)
  237. {
  238. ((UpdateResultSet) dependentResultSets[i]).updateDeferredRows();
  239. }
  240. else{
  241. ((DeleteCascadeResultSet)dependentResultSets[i]).deleteDeferredRows();
  242. }
  243. }
  244. //If there is more than one node for the same table
  245. //only one node deletes all the rows.
  246. if(mainNodeForTable)
  247. super.deleteDeferredRows();
  248. }
  249. void runFkChecker(boolean restrictCheckOnly) throws StandardException
  250. {
  251. //run the Foreign key or primary key Checker on the dependent tables
  252. for(int i =0 ; i < noDependents; i++)
  253. {
  254. if(dependentResultSets[i] instanceof UpdateResultSet)
  255. {
  256. ((UpdateResultSet) dependentResultSets[i]).runChecker(restrictCheckOnly);
  257. }
  258. else{
  259. ((DeleteCascadeResultSet)dependentResultSets[i]).runFkChecker(restrictCheckOnly);
  260. }
  261. }
  262. //If there is more than one node for the same table
  263. //only one node does all foreign key checks.
  264. if(mainNodeForTable)
  265. super.runFkChecker(restrictCheckOnly);
  266. }
  267. public void cleanUp() throws StandardException
  268. {
  269. super.cleanUp();
  270. for(int i =0 ; i < noDependents; i++)
  271. {
  272. if(dependentResultSets[i] instanceof UpdateResultSet)
  273. {
  274. ((UpdateResultSet) dependentResultSets[i]).cleanUp();
  275. }else
  276. {
  277. ((DeleteCascadeResultSet) dependentResultSets[i]).cleanUp();
  278. }
  279. }
  280. endTime = getCurrentTimeMillis();
  281. }
  282. private void rowChangerFinish() throws StandardException
  283. {
  284. rc.finish();
  285. for(int i =0 ; i < noDependents; i++)
  286. {
  287. if(dependentResultSets[i] instanceof UpdateResultSet)
  288. {
  289. ((UpdateResultSet) dependentResultSets[i]).rowChangerFinish();
  290. }else
  291. {
  292. ((DeleteCascadeResultSet) dependentResultSets[i]).rowChangerFinish();
  293. }
  294. }
  295. }
  296. //if there is more than one node for the same table, copy the rows
  297. // into one node , so that we don't fire trigger more than once.
  298. private void mergeRowHolders(Hashtable msht) throws StandardException
  299. {
  300. if(msht.containsKey(resultSetId) || rowCount ==0)
  301. {
  302. //there is already another resultset node that is marked as main
  303. //node for this table or this resultset has no rows qualified.
  304. //when none of the resultset nodes for the table has any rows then
  305. //we mark them as one them as main node in fireBeforeTriggers().
  306. mainNodeForTable = false;
  307. }else
  308. {
  309. mergeResultSets();
  310. mainNodeForTable = true;
  311. msht.put(resultSetId, resultSetId);
  312. }
  313. for(int i =0 ; i < noDependents; i++)
  314. {
  315. if(dependentResultSets[i] instanceof UpdateResultSet)
  316. {
  317. return;
  318. }
  319. else{
  320. ((DeleteCascadeResultSet)dependentResultSets[i]).mergeRowHolders(msht);
  321. }
  322. }
  323. }
  324. private void mergeResultSets() throws StandardException
  325. {
  326. Vector sVector = (Vector) activation.getParentResultSet(resultSetId);
  327. int size = sVector.size();
  328. // if there is more than one source, we need to merge them into onc
  329. // temporary result set.
  330. if(size > 1)
  331. {
  332. ExecRow row = null;
  333. int rowHolderId = 0 ;
  334. //copy all the vallues in the result set to the current resultset row holder
  335. while(rowHolderId < size)
  336. {
  337. if(rowHolderId == tempRowHolderId )
  338. {
  339. //skipping the row holder that we are copying the rows into.
  340. rowHolderId++;
  341. continue;
  342. }
  343. TemporaryRowHolder currentRowHolder = (TemporaryRowHolder)sVector.elementAt(rowHolderId);
  344. CursorResultSet rs = currentRowHolder.getResultSet();
  345. rs.open();
  346. while ((row = rs.getNextRow()) != null)
  347. {
  348. rowHolder.insert(row);
  349. }
  350. rs.close();
  351. rowHolderId++;
  352. }
  353. }
  354. }
  355. public void finish() throws StandardException {
  356. super.finish();
  357. //clear the parent result sets hash table
  358. //This is necessary in case if we hit any error conditions
  359. activation.clearParentResultSets();
  360. }
  361. /* check whether we have mutiple path delete scenario, if
  362. ** find any retun true. Multiple delete paths exist if we find more than
  363. ** one parent source resultset for a table involved in the delete cascade
  364. **/
  365. private boolean isMultipleDeletePathsExist()
  366. {
  367. Hashtable parentResultSets = activation.getParentResultSets();
  368. for (Enumeration e = parentResultSets.keys() ; e.hasMoreElements() ;)
  369. {
  370. String rsId = (String) e.nextElement();
  371. Vector sVector = (Vector) activation.getParentResultSet(rsId);
  372. int size = sVector.size();
  373. if(size > 1)
  374. {
  375. return true;
  376. }
  377. }
  378. return false;
  379. }
  380. /*
  381. **Incases where we have multiple paths we could get the same
  382. **rows to be deleted mutiple time and also in case of cycles
  383. **there might be new rows getting added to the row holders through
  384. **multiple iterations. To handle these case we set the temporary row holders
  385. ** to be 'uniqStream' type.
  386. **/
  387. private void setRowHoldersTypeToUniqueStream()
  388. {
  389. Hashtable parentResultSets = activation.getParentResultSets();
  390. for (Enumeration e = parentResultSets.keys() ; e.hasMoreElements() ;)
  391. {
  392. String rsId = (String) e.nextElement();
  393. Vector sVector = (Vector) activation.getParentResultSet(rsId);
  394. int size = sVector.size();
  395. int rowHolderId = 0 ;
  396. while(rowHolderId < size)
  397. {
  398. TemporaryRowHolder currentRowHolder = (TemporaryRowHolder)sVector.elementAt(rowHolderId);
  399. currentRowHolder.setRowHolderTypeToUniqueStream();
  400. rowHolderId++;
  401. }
  402. }
  403. }
  404. }