PageRenderTime 71ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/tags/release-0.1-rc2/hive/external/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java

#
Java | 3258 lines | 2454 code | 332 blank | 472 comment | 676 complexity | 834d1c5310bbb8a1f599a85916c3cad4 MD5 | raw file
Possible License(s): Apache-2.0, BSD-3-Clause, JSON, CPL-1.0
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.hadoop.hive.ql.exec;
  19. import static org.apache.commons.lang.StringUtils.join;
  20. import static org.apache.hadoop.util.StringUtils.stringifyException;
  21. import java.io.BufferedWriter;
  22. import java.io.DataOutput;
  23. import java.io.FileNotFoundException;
  24. import java.io.IOException;
  25. import java.io.OutputStreamWriter;
  26. import java.io.Serializable;
  27. import java.io.Writer;
  28. import java.net.URI;
  29. import java.net.URISyntaxException;
  30. import java.util.ArrayList;
  31. import java.util.Collections;
  32. import java.util.Comparator;
  33. import java.util.HashSet;
  34. import java.util.Iterator;
  35. import java.util.List;
  36. import java.util.Map;
  37. import java.util.Set;
  38. import java.util.SortedSet;
  39. import java.util.TreeSet;
  40. import java.util.Map.Entry;
  41. import org.apache.commons.logging.Log;
  42. import org.apache.commons.logging.LogFactory;
  43. import org.apache.hadoop.fs.FSDataOutputStream;
  44. import org.apache.hadoop.fs.FileStatus;
  45. import org.apache.hadoop.fs.FileSystem;
  46. import org.apache.hadoop.fs.FsShell;
  47. import org.apache.hadoop.fs.Path;
  48. import org.apache.hadoop.hive.conf.HiveConf;
  49. import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
  50. import org.apache.hadoop.hive.metastore.MetaStoreUtils;
  51. import org.apache.hadoop.hive.metastore.ProtectMode;
  52. import org.apache.hadoop.hive.metastore.TableType;
  53. import org.apache.hadoop.hive.metastore.Warehouse;
  54. import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
  55. import org.apache.hadoop.hive.metastore.api.Database;
  56. import org.apache.hadoop.hive.metastore.api.FieldSchema;
  57. import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
  58. import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
  59. import org.apache.hadoop.hive.metastore.api.HiveObjectType;
  60. import org.apache.hadoop.hive.metastore.api.Index;
  61. import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
  62. import org.apache.hadoop.hive.metastore.api.MetaException;
  63. import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
  64. import org.apache.hadoop.hive.metastore.api.Order;
  65. import org.apache.hadoop.hive.metastore.api.PrincipalType;
  66. import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
  67. import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
  68. import org.apache.hadoop.hive.metastore.api.Role;
  69. import org.apache.hadoop.hive.ql.Context;
  70. import org.apache.hadoop.hive.ql.DriverContext;
  71. import org.apache.hadoop.hive.ql.QueryPlan;
  72. import org.apache.hadoop.hive.ql.hooks.ReadEntity;
  73. import org.apache.hadoop.hive.ql.hooks.WriteEntity;
  74. import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
  75. import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
  76. import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
  77. import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
  78. import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
  79. import org.apache.hadoop.hive.ql.metadata.CheckResult;
  80. import org.apache.hadoop.hive.ql.metadata.Hive;
  81. import org.apache.hadoop.hive.ql.metadata.HiveException;
  82. import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
  83. import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
  84. import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
  85. import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils;
  86. import org.apache.hadoop.hive.ql.metadata.Partition;
  87. import org.apache.hadoop.hive.ql.metadata.Table;
  88. import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
  89. import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
  90. import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
  91. import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
  92. import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
  93. import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
  94. import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
  95. import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
  96. import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
  97. import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
  98. import org.apache.hadoop.hive.ql.plan.DDLWork;
  99. import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
  100. import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
  101. import org.apache.hadoop.hive.ql.plan.DescTableDesc;
  102. import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
  103. import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
  104. import org.apache.hadoop.hive.ql.plan.DropTableDesc;
  105. import org.apache.hadoop.hive.ql.plan.GrantDesc;
  106. import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
  107. import org.apache.hadoop.hive.ql.plan.LockTableDesc;
  108. import org.apache.hadoop.hive.ql.plan.MsckDesc;
  109. import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
  110. import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
  111. import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
  112. import org.apache.hadoop.hive.ql.plan.RevokeDesc;
  113. import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
  114. import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
  115. import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
  116. import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
  117. import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
  118. import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
  119. import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
  120. import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
  121. import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
  122. import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
  123. import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
  124. import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
  125. import org.apache.hadoop.hive.ql.plan.api.StageType;
  126. import org.apache.hadoop.hive.ql.security.authorization.Privilege;
  127. import org.apache.hadoop.hive.serde.Constants;
  128. import org.apache.hadoop.hive.serde2.Deserializer;
  129. import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
  130. import org.apache.hadoop.hive.serde2.SerDeException;
  131. import org.apache.hadoop.hive.serde2.SerDeUtils;
  132. import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
  133. import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
  134. import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
  135. import org.apache.hadoop.hive.shims.HadoopShims;
  136. import org.apache.hadoop.hive.shims.ShimLoader;
  137. import org.apache.hadoop.util.ToolRunner;
  138. /**
  139. * DDLTask implementation.
  140. *
  141. **/
  142. public class DDLTask extends Task<DDLWork> implements Serializable {
  143. private static final long serialVersionUID = 1L;
  144. private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
  145. transient HiveConf conf;
  146. private static final int separator = Utilities.tabCode;
  147. private static final int terminator = Utilities.newLineCode;
  148. // These are suffixes attached to intermediate directory names used in the
  149. // archiving / un-archiving process.
  150. private static String INTERMEDIATE_ARCHIVED_DIR_SUFFIX;
  151. private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX;
  152. private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX;
  153. public DDLTask() {
  154. super();
  155. }
  156. @Override
  157. public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
  158. super.initialize(conf, queryPlan, ctx);
  159. this.conf = conf;
  160. INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
  161. HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
  162. INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
  163. HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL);
  164. INTERMEDIATE_EXTRACTED_DIR_SUFFIX =
  165. HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED);
  166. }
  167. @Override
  168. public int execute(DriverContext driverContext) {
  169. // Create the db
  170. Hive db;
  171. try {
  172. db = Hive.get(conf);
  173. CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
  174. if (null != createDatabaseDesc) {
  175. return createDatabase(db, createDatabaseDesc);
  176. }
  177. DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
  178. if (dropDatabaseDesc != null) {
  179. return dropDatabase(db, dropDatabaseDesc);
  180. }
  181. SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
  182. if (switchDatabaseDesc != null) {
  183. return switchDatabase(db, switchDatabaseDesc);
  184. }
  185. DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
  186. if (descDatabaseDesc != null) {
  187. return descDatabase(descDatabaseDesc);
  188. }
  189. AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
  190. if (alterDatabaseDesc != null) {
  191. return alterDatabase(alterDatabaseDesc);
  192. }
  193. CreateTableDesc crtTbl = work.getCreateTblDesc();
  194. if (crtTbl != null) {
  195. return createTable(db, crtTbl);
  196. }
  197. CreateIndexDesc crtIndex = work.getCreateIndexDesc();
  198. if (crtIndex != null) {
  199. return createIndex(db, crtIndex);
  200. }
  201. AlterIndexDesc alterIndex = work.getAlterIndexDesc();
  202. if (alterIndex != null) {
  203. return alterIndex(db, alterIndex);
  204. }
  205. DropIndexDesc dropIdx = work.getDropIdxDesc();
  206. if (dropIdx != null) {
  207. return dropIndex(db, dropIdx);
  208. }
  209. CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
  210. if (crtTblLike != null) {
  211. return createTableLike(db, crtTblLike);
  212. }
  213. DropTableDesc dropTbl = work.getDropTblDesc();
  214. if (dropTbl != null) {
  215. return dropTable(db, dropTbl);
  216. }
  217. AlterTableDesc alterTbl = work.getAlterTblDesc();
  218. if (alterTbl != null) {
  219. return alterTable(db, alterTbl);
  220. }
  221. CreateViewDesc crtView = work.getCreateViewDesc();
  222. if (crtView != null) {
  223. return createView(db, crtView);
  224. }
  225. AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
  226. if (addPartitionDesc != null) {
  227. return addPartition(db, addPartitionDesc);
  228. }
  229. AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
  230. if (simpleDesc != null) {
  231. if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
  232. return touch(db, simpleDesc);
  233. } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
  234. return archive(db, simpleDesc, driverContext);
  235. } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
  236. return unarchive(db, simpleDesc);
  237. }
  238. }
  239. MsckDesc msckDesc = work.getMsckDesc();
  240. if (msckDesc != null) {
  241. return msck(db, msckDesc);
  242. }
  243. DescTableDesc descTbl = work.getDescTblDesc();
  244. if (descTbl != null) {
  245. return describeTable(db, descTbl);
  246. }
  247. DescFunctionDesc descFunc = work.getDescFunctionDesc();
  248. if (descFunc != null) {
  249. return describeFunction(descFunc);
  250. }
  251. ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
  252. if (showDatabases != null) {
  253. return showDatabases(db, showDatabases);
  254. }
  255. ShowTablesDesc showTbls = work.getShowTblsDesc();
  256. if (showTbls != null) {
  257. return showTables(db, showTbls);
  258. }
  259. ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
  260. if (showTblStatus != null) {
  261. return showTableStatus(db, showTblStatus);
  262. }
  263. ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
  264. if (showFuncs != null) {
  265. return showFunctions(showFuncs);
  266. }
  267. ShowLocksDesc showLocks = work.getShowLocksDesc();
  268. if (showLocks != null) {
  269. return showLocks(showLocks);
  270. }
  271. LockTableDesc lockTbl = work.getLockTblDesc();
  272. if (lockTbl != null) {
  273. return lockTable(lockTbl);
  274. }
  275. UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
  276. if (unlockTbl != null) {
  277. return unlockTable(unlockTbl);
  278. }
  279. ShowPartitionsDesc showParts = work.getShowPartsDesc();
  280. if (showParts != null) {
  281. return showPartitions(db, showParts);
  282. }
  283. RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
  284. if (roleDDLDesc != null) {
  285. return roleDDL(roleDDLDesc);
  286. }
  287. GrantDesc grantDesc = work.getGrantDesc();
  288. if (grantDesc != null) {
  289. return grantOrRevokePrivileges(grantDesc.getPrincipals(), grantDesc
  290. .getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
  291. }
  292. RevokeDesc revokeDesc = work.getRevokeDesc();
  293. if (revokeDesc != null) {
  294. return grantOrRevokePrivileges(revokeDesc.getPrincipals(), revokeDesc
  295. .getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, false, false);
  296. }
  297. ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
  298. if (showGrantDesc != null) {
  299. return showGrants(showGrantDesc);
  300. }
  301. GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
  302. if (grantOrRevokeRoleDDL != null) {
  303. return grantOrRevokeRole(grantOrRevokeRoleDDL);
  304. }
  305. ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
  306. if (showIndexes != null) {
  307. return showIndexes(db, showIndexes);
  308. }
  309. } catch (InvalidTableException e) {
  310. console.printError("Table " + e.getTableName() + " does not exist");
  311. LOG.debug(stringifyException(e));
  312. return 1;
  313. } catch (HiveException e) {
  314. console.printError("FAILED: Error in metadata: " + e.getMessage(), "\n"
  315. + stringifyException(e));
  316. LOG.debug(stringifyException(e));
  317. return 1;
  318. } catch (Exception e) {
  319. console.printError("Failed with exception " + e.getMessage(), "\n"
  320. + stringifyException(e));
  321. return (1);
  322. }
  323. assert false;
  324. return 0;
  325. }
  326. private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL)
  327. throws HiveException {
  328. try {
  329. boolean grantRole = grantOrRevokeRoleDDL.getGrant();
  330. List<PrincipalDesc> principals = grantOrRevokeRoleDDL.getPrincipalDesc();
  331. List<String> roles = grantOrRevokeRoleDDL.getRoles();
  332. for (PrincipalDesc principal : principals) {
  333. String userName = principal.getName();
  334. for (String roleName : roles) {
  335. if (grantRole) {
  336. db.grantRole(roleName, userName, principal.getType(),
  337. grantOrRevokeRoleDDL.getGrantor(), grantOrRevokeRoleDDL
  338. .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption());
  339. } else {
  340. db.revokeRole(roleName, userName, principal.getType());
  341. }
  342. }
  343. }
  344. } catch (Exception e) {
  345. throw new HiveException(e);
  346. }
  347. return 0;
  348. }
  349. private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException {
  350. try {
  351. Path resFile = new Path(showGrantDesc.getResFile());
  352. FileSystem fs = resFile.getFileSystem(conf);
  353. DataOutput outStream = fs.create(resFile);
  354. PrincipalDesc principalDesc = showGrantDesc.getPrincipalDesc();
  355. PrivilegeObjectDesc hiveObjectDesc = showGrantDesc.getHiveObj();
  356. String principalName = principalDesc.getName();
  357. if (hiveObjectDesc == null) {
  358. List<HiveObjectPrivilege> users = db.showPrivilegeGrant(
  359. HiveObjectType.GLOBAL, principalName, principalDesc.getType(),
  360. null, null, null, null);
  361. if (users != null && users.size() > 0) {
  362. boolean first = true;
  363. for (HiveObjectPrivilege usr : users) {
  364. if (!first) {
  365. outStream.write(terminator);
  366. } else {
  367. first = false;
  368. }
  369. writeGrantInfo(outStream, principalDesc.getType(), principalName,
  370. null, null, null, null, usr.getGrantInfo());
  371. }
  372. }
  373. } else {
  374. String obj = hiveObjectDesc.getObject();
  375. boolean notFound = true;
  376. String dbName = null;
  377. String tableName = null;
  378. Table tableObj = null;
  379. Database dbObj = null;
  380. if (hiveObjectDesc.getTable()) {
  381. String[] dbTab = obj.split("\\.");
  382. if (dbTab.length == 2) {
  383. dbName = dbTab[0];
  384. tableName = dbTab[1];
  385. } else {
  386. dbName = db.getCurrentDatabase();
  387. tableName = obj;
  388. }
  389. dbObj = db.getDatabase(dbName);
  390. tableObj = db.getTable(dbName, tableName);
  391. notFound = (dbObj == null || tableObj == null);
  392. } else {
  393. dbName = hiveObjectDesc.getObject();
  394. dbObj = db.getDatabase(dbName);
  395. notFound = (dbObj == null);
  396. }
  397. if (notFound) {
  398. throw new HiveException(obj + " can not be found");
  399. }
  400. String partName = null;
  401. List<String> partValues = null;
  402. if (hiveObjectDesc.getPartSpec() != null) {
  403. partName = Warehouse
  404. .makePartName(hiveObjectDesc.getPartSpec(), false);
  405. partValues = Warehouse.getPartValuesFromPartName(partName);
  406. }
  407. if (!hiveObjectDesc.getTable()) {
  408. // show database level privileges
  409. List<HiveObjectPrivilege> dbs = db.showPrivilegeGrant(HiveObjectType.DATABASE, principalName,
  410. principalDesc.getType(), dbName, null, null, null);
  411. if (dbs != null && dbs.size() > 0) {
  412. boolean first = true;
  413. for (HiveObjectPrivilege db : dbs) {
  414. if (!first) {
  415. outStream.write(terminator);
  416. } else {
  417. first = false;
  418. }
  419. writeGrantInfo(outStream, principalDesc.getType(), principalName,
  420. dbName, null, null, null, db.getGrantInfo());
  421. }
  422. }
  423. } else {
  424. if (showGrantDesc.getColumns() != null) {
  425. // show column level privileges
  426. for (String columnName : showGrantDesc.getColumns()) {
  427. List<HiveObjectPrivilege> columnss = db.showPrivilegeGrant(
  428. HiveObjectType.COLUMN, principalName,
  429. principalDesc.getType(), dbName, tableName, partValues,
  430. columnName);
  431. if (columnss != null && columnss.size() > 0) {
  432. boolean first = true;
  433. for (HiveObjectPrivilege col : columnss) {
  434. if (!first) {
  435. outStream.write(terminator);
  436. } else {
  437. first = false;
  438. }
  439. writeGrantInfo(outStream, principalDesc.getType(),
  440. principalName, dbName, tableName, partName, columnName,
  441. col.getGrantInfo());
  442. }
  443. }
  444. }
  445. } else if (hiveObjectDesc.getPartSpec() != null) {
  446. // show partition level privileges
  447. List<HiveObjectPrivilege> parts = db.showPrivilegeGrant(
  448. HiveObjectType.PARTITION, principalName, principalDesc
  449. .getType(), dbName, tableName, partValues, null);
  450. if (parts != null && parts.size() > 0) {
  451. boolean first = true;
  452. for (HiveObjectPrivilege part : parts) {
  453. if (!first) {
  454. outStream.write(terminator);
  455. } else {
  456. first = false;
  457. }
  458. writeGrantInfo(outStream, principalDesc.getType(),
  459. principalName, dbName, tableName, partName, null, part.getGrantInfo());
  460. }
  461. }
  462. } else {
  463. // show table level privileges
  464. List<HiveObjectPrivilege> tbls = db.showPrivilegeGrant(
  465. HiveObjectType.TABLE, principalName, principalDesc.getType(),
  466. dbName, tableName, null, null);
  467. if (tbls != null && tbls.size() > 0) {
  468. boolean first = true;
  469. for (HiveObjectPrivilege tbl : tbls) {
  470. if (!first) {
  471. outStream.write(terminator);
  472. } else {
  473. first = false;
  474. }
  475. writeGrantInfo(outStream, principalDesc.getType(),
  476. principalName, dbName, tableName, null, null, tbl.getGrantInfo());
  477. }
  478. }
  479. }
  480. }
  481. }
  482. ((FSDataOutputStream) outStream).close();
  483. } catch (FileNotFoundException e) {
  484. LOG.info("show table status: " + stringifyException(e));
  485. return 1;
  486. } catch (IOException e) {
  487. LOG.info("show table status: " + stringifyException(e));
  488. return 1;
  489. } catch (Exception e) {
  490. e.printStackTrace();
  491. throw new HiveException(e);
  492. }
  493. return 0;
  494. }
  495. private int grantOrRevokePrivileges(List<PrincipalDesc> principals,
  496. List<PrivilegeDesc> privileges, PrivilegeObjectDesc privSubjectDesc,
  497. String grantor, PrincipalType grantorType, boolean grantOption, boolean isGrant) {
  498. if (privileges == null || privileges.size() == 0) {
  499. console.printError("No privilege found.");
  500. return 1;
  501. }
  502. String dbName = null;
  503. String tableName = null;
  504. Table tableObj = null;
  505. Database dbObj = null;
  506. try {
  507. if (privSubjectDesc != null) {
  508. if (privSubjectDesc.getPartSpec() != null && isGrant) {
  509. throw new HiveException("Grant does not support partition level.");
  510. }
  511. String obj = privSubjectDesc.getObject();
  512. boolean notFound = true;
  513. if (privSubjectDesc.getTable()) {
  514. String[] dbTab = obj.split("\\.");
  515. if (dbTab.length == 2) {
  516. dbName = dbTab[0];
  517. tableName = dbTab[1];
  518. } else {
  519. dbName = db.getCurrentDatabase();
  520. tableName = obj;
  521. }
  522. dbObj = db.getDatabase(dbName);
  523. tableObj = db.getTable(dbName, tableName);
  524. notFound = (dbObj == null || tableObj == null);
  525. } else {
  526. dbName = privSubjectDesc.getObject();
  527. dbObj = db.getDatabase(dbName);
  528. notFound = (dbObj == null);
  529. }
  530. if (notFound) {
  531. throw new HiveException(obj + " can not be found");
  532. }
  533. }
  534. PrivilegeBag privBag = new PrivilegeBag();
  535. if (privSubjectDesc == null) {
  536. for (int idx = 0; idx < privileges.size(); idx++) {
  537. Privilege priv = privileges.get(idx).getPrivilege();
  538. if (privileges.get(idx).getColumns() != null
  539. && privileges.get(idx).getColumns().size() > 0) {
  540. throw new HiveException(
  541. "For user-level privileges, column sets should be null. columns="
  542. + privileges.get(idx).getColumns().toString());
  543. }
  544. privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef(
  545. HiveObjectType.GLOBAL, null, null, null, null), null, null,
  546. new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType,
  547. grantOption)));
  548. }
  549. } else {
  550. org.apache.hadoop.hive.metastore.api.Partition partObj = null;
  551. List<String> partValues = null;
  552. if (tableObj != null) {
  553. if ((!tableObj.isPartitioned())
  554. && privSubjectDesc.getPartSpec() != null) {
  555. throw new HiveException(
  556. "Table is not partitioned, but partition name is present: partSpec="
  557. + privSubjectDesc.getPartSpec().toString());
  558. }
  559. if (privSubjectDesc.getPartSpec() != null) {
  560. partObj = db.getPartition(tableObj, privSubjectDesc.getPartSpec(),
  561. false).getTPartition();
  562. partValues = partObj.getValues();
  563. }
  564. }
  565. for (PrivilegeDesc privDesc : privileges) {
  566. List<String> columns = privDesc.getColumns();
  567. Privilege priv = privDesc.getPrivilege();
  568. if (columns != null && columns.size() > 0) {
  569. if (!priv.supportColumnLevel()) {
  570. throw new HiveException(priv.toString()
  571. + " does not support column level.");
  572. }
  573. if (privSubjectDesc == null || tableName == null) {
  574. throw new HiveException(
  575. "For user-level/database-level privileges, column sets should be null. columns="
  576. + columns);
  577. }
  578. for (int i = 0; i < columns.size(); i++) {
  579. privBag.addToPrivileges(new HiveObjectPrivilege(
  580. new HiveObjectRef(HiveObjectType.COLUMN, dbName, tableName,
  581. partValues, columns.get(i)), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
  582. }
  583. } else {
  584. if (privSubjectDesc.getTable()) {
  585. if (privSubjectDesc.getPartSpec() != null) {
  586. privBag.addToPrivileges(new HiveObjectPrivilege(
  587. new HiveObjectRef(HiveObjectType.PARTITION, dbName,
  588. tableName, partValues, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
  589. } else {
  590. privBag
  591. .addToPrivileges(new HiveObjectPrivilege(
  592. new HiveObjectRef(HiveObjectType.TABLE, dbName,
  593. tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
  594. }
  595. } else {
  596. privBag.addToPrivileges(new HiveObjectPrivilege(
  597. new HiveObjectRef(HiveObjectType.DATABASE, dbName, null,
  598. null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
  599. }
  600. }
  601. }
  602. }
  603. for (PrincipalDesc principal : principals) {
  604. for (int i = 0; i < privBag.getPrivileges().size(); i++) {
  605. HiveObjectPrivilege objPrivs = privBag.getPrivileges().get(i);
  606. objPrivs.setPrincipalName(principal.getName());
  607. objPrivs.setPrincipalType(principal.getType());
  608. }
  609. if (isGrant) {
  610. db.grantPrivileges(privBag);
  611. } else {
  612. db.revokePrivileges(privBag);
  613. }
  614. }
  615. } catch (Exception e) {
  616. console.printError("Error: " + e.getMessage());
  617. return 1;
  618. }
  619. return 0;
  620. }
  621. private int roleDDL(RoleDDLDesc roleDDLDesc) {
  622. RoleDDLDesc.RoleOperation operation = roleDDLDesc.getOperation();
  623. try {
  624. if (operation.equals(RoleDDLDesc.RoleOperation.CREATE_ROLE)) {
  625. db.createRole(roleDDLDesc.getName(), roleDDLDesc.getRoleOwnerName());
  626. } else if (operation.equals(RoleDDLDesc.RoleOperation.DROP_ROLE)) {
  627. db.dropRole(roleDDLDesc.getName());
  628. } else if (operation.equals(RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT)) {
  629. List<Role> roles = db.showRoleGrant(roleDDLDesc.getName(), roleDDLDesc
  630. .getPrincipalType());
  631. if (roles != null && roles.size() > 0) {
  632. Path resFile = new Path(roleDDLDesc.getResFile());
  633. FileSystem fs = resFile.getFileSystem(conf);
  634. DataOutput outStream = fs.create(resFile);
  635. for (Role role : roles) {
  636. outStream.writeBytes("role name:" + role.getRoleName());
  637. outStream.write(terminator);
  638. }
  639. ((FSDataOutputStream) outStream).close();
  640. }
  641. } else {
  642. throw new HiveException("Unkown role operation "
  643. + operation.getOperationName());
  644. }
  645. } catch (HiveException e) {
  646. console.printError("Error in role operation "
  647. + operation.getOperationName() + " on role name "
  648. + roleDDLDesc.getName() + ", error message " + e.getMessage());
  649. return 1;
  650. } catch (IOException e) {
  651. LOG.info("role ddl exception: " + stringifyException(e));
  652. return 1;
  653. }
  654. return 0;
  655. }
  656. private int alterDatabase(AlterDatabaseDesc alterDbDesc) throws HiveException {
  657. String dbName = alterDbDesc.getDatabaseName();
  658. Database database = db.getDatabase(dbName);
  659. Map<String, String> newParams = alterDbDesc.getDatabaseProperties();
  660. if (database != null) {
  661. Map<String, String> params = database.getParameters();
  662. // if both old and new params are not null, merge them
  663. if (params != null && newParams != null) {
  664. params.putAll(newParams);
  665. database.setParameters(params);
  666. } else { // if one of them is null, replace the old params with the new one
  667. database.setParameters(newParams);
  668. }
  669. db.alterDatabase(database.getName(), database);
  670. } else {
  671. throw new HiveException("ERROR: The database " + dbName + " does not exist.");
  672. }
  673. return 0;
  674. }
  675. private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException {
  676. db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(),
  677. dropIdx.getIndexName(), true);
  678. return 0;
  679. }
  680. private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException {
  681. if( crtIndex.getSerde() != null) {
  682. validateSerDe(crtIndex.getSerde());
  683. }
  684. db
  685. .createIndex(
  686. crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(),
  687. crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(),
  688. crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(),
  689. crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(),
  690. crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(),
  691. crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment()
  692. );
  693. return 0;
  694. }
  695. private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
  696. String dbName = alterIndex.getDbName();
  697. String baseTableName = alterIndex.getBaseTableName();
  698. String indexName = alterIndex.getIndexName();
  699. Index idx = db.getIndex(dbName, baseTableName, indexName);
  700. if (alterIndex.getOp() == AlterIndexDesc.AlterIndexTypes.ADDPROPS) {
  701. idx.getParameters().putAll(alterIndex.getProps());
  702. } else {
  703. console.printError("Unsupported Alter commnad");
  704. return 1;
  705. }
  706. // set last modified by properties
  707. if (!updateModifiedParameters(idx.getParameters(), conf)) {
  708. return 1;
  709. }
  710. try {
  711. db.alterIndex(dbName, baseTableName, indexName, idx);
  712. } catch (InvalidOperationException e) {
  713. console.printError("Invalid alter operation: " + e.getMessage());
  714. LOG.info("alter index: " + stringifyException(e));
  715. return 1;
  716. } catch (HiveException e) {
  717. console.printError("Invalid alter operation: " + e.getMessage());
  718. return 1;
  719. }
  720. return 0;
  721. }
  722. /**
  723. * Add a partition to a table.
  724. *
  725. * @param db
  726. * Database to add the partition to.
  727. * @param addPartitionDesc
  728. * Add this partition.
  729. * @return Returns 0 when execution succeeds and above 0 if it fails.
  730. * @throws HiveException
  731. */
  732. /**
  733. * Add a partition to a table.
  734. *
  735. * @param db
  736. * Database to add the partition to.
  737. * @param addPartitionDesc
  738. * Add this partition.
  739. * @return Returns 0 when execution succeeds and above 0 if it fails.
  740. * @throws HiveException
  741. */
  742. private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
  743. Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
  744. validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION);
  745. // If the add partition was created with IF NOT EXISTS, then we should
  746. // not throw an error if the specified part does exist.
  747. Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false);
  748. if (checkPart != null && addPartitionDesc.getIfNotExists()) {
  749. return 0;
  750. }
  751. if (addPartitionDesc.getLocation() == null) {
  752. db.createPartition(tbl, addPartitionDesc.getPartSpec());
  753. } else {
  754. // set partition path relative to table
  755. db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl
  756. .getPath(), addPartitionDesc.getLocation()));
  757. }
  758. Partition part = db
  759. .getPartition(tbl, addPartitionDesc.getPartSpec(), false);
  760. work.getOutputs().add(new WriteEntity(part));
  761. return 0;
  762. }
  763. /**
  764. * Rewrite the partition's metadata and force the pre/post execute hooks to
  765. * be fired.
  766. *
  767. * @param db
  768. * @param touchDesc
  769. * @return
  770. * @throws HiveException
  771. */
  772. private int touch(Hive db, AlterTableSimpleDesc touchDesc)
  773. throws HiveException {
  774. String dbName = touchDesc.getDbName();
  775. String tblName = touchDesc.getTableName();
  776. Table tbl = db.getTable(dbName, tblName);
  777. validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.TOUCH);
  778. if (touchDesc.getPartSpec() == null) {
  779. try {
  780. db.alterTable(tblName, tbl);
  781. } catch (InvalidOperationException e) {
  782. throw new HiveException("Uable to update table");
  783. }
  784. work.getInputs().add(new ReadEntity(tbl));
  785. work.getOutputs().add(new WriteEntity(tbl));
  786. } else {
  787. Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
  788. if (part == null) {
  789. throw new HiveException("Specified partition does not exist");
  790. }
  791. try {
  792. db.alterPartition(tblName, part);
  793. } catch (InvalidOperationException e) {
  794. throw new HiveException(e);
  795. }
  796. work.getInputs().add(new ReadEntity(part));
  797. work.getOutputs().add(new WriteEntity(part));
  798. }
  799. return 0;
  800. }
  801. /**
  802. * Determines whether a partition has been archived
  803. *
  804. * @param p
  805. * @return
  806. */
  807. private boolean isArchived(Partition p) {
  808. Map<String, String> params = p.getParameters();
  809. if ("true".equalsIgnoreCase(params.get(
  810. org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED))) {
  811. return true;
  812. } else {
  813. return false;
  814. }
  815. }
  816. private void setIsArchived(Partition p, boolean state) {
  817. Map<String, String> params = p.getParameters();
  818. if (state) {
  819. params.put(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED,
  820. "true");
  821. } else {
  822. params.remove(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED);
  823. }
  824. }
  825. private String getOriginalLocation(Partition p) {
  826. Map<String, String> params = p.getParameters();
  827. return params.get(
  828. org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION);
  829. }
  830. private void setOriginalLocation(Partition p, String loc) {
  831. Map<String, String> params = p.getParameters();
  832. if (loc == null) {
  833. params.remove(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION);
  834. } else {
  835. params.put(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION, loc);
  836. }
  837. }
  838. // Returns only the path component of the URI
  839. private String getArchiveDirOnly(Path parentDir, String archiveName) {
  840. URI parentUri = parentDir.toUri();
  841. Path harDir = new Path(parentUri.getPath(), archiveName);
  842. return harDir.toString();
  843. }
  844. /**
  845. * Sets the appropriate attributes in the supplied Partition object to mark
  846. * it as archived. Note that the metastore is not touched - a separate
  847. * call to alter_partition is needed.
  848. *
  849. * @param p - the partition object to modify
  850. * @param parentDir - the parent directory of the archive, which is the
  851. * original directory that the partition's files resided in
  852. * @param dirInArchive - the directory within the archive file that contains
  853. * the partitions files
  854. * @param archiveName - the name of the archive
  855. * @throws URISyntaxException
  856. */
  857. private void setArchived(Partition p, Path parentDir, String dirInArchive, String archiveName)
  858. throws URISyntaxException {
  859. assert(isArchived(p) == false);
  860. Map<String, String> params = p.getParameters();
  861. URI parentUri = parentDir.toUri();
  862. String parentHost = parentUri.getHost();
  863. String harHost = null;
  864. if (parentHost == null) {
  865. harHost = "";
  866. } else {
  867. harHost = parentUri.getScheme() + "-" + parentHost;
  868. }
  869. // harUri is used to access the partition's files, which are in the archive
  870. // The format of the RI is something like:
  871. // har://underlyingfsscheme-host:port/archivepath
  872. URI harUri = null;
  873. if (dirInArchive.length() == 0) {
  874. harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(),
  875. getArchiveDirOnly(parentDir, archiveName),
  876. parentUri.getQuery(), parentUri.getFragment());
  877. } else {
  878. harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(),
  879. new Path(getArchiveDirOnly(parentDir, archiveName), dirInArchive).toUri().getPath(),
  880. parentUri.getQuery(), parentUri.getFragment());
  881. }
  882. setIsArchived(p, true);
  883. setOriginalLocation(p, parentDir.toString());
  884. p.setLocation(harUri.toString());
  885. }
  886. /**
  887. * Sets the appropriate attributes in the supplied Partition object to mark
  888. * it as not archived. Note that the metastore is not touched - a separate
  889. * call to alter_partition is needed.
  890. *
  891. * @param p - the partition to modify
  892. */
  893. private void setUnArchived(Partition p) {
  894. assert(isArchived(p) == true);
  895. String parentDir = getOriginalLocation(p);
  896. setIsArchived(p, false);
  897. setOriginalLocation(p, null);
  898. assert(parentDir != null);
  899. p.setLocation(parentDir);
  900. }
  901. private boolean pathExists(Path p) throws HiveException {
  902. try {
  903. FileSystem fs = p.getFileSystem(conf);
  904. return fs.exists(p);
  905. } catch (IOException e) {
  906. throw new HiveException(e);
  907. }
  908. }
  909. private void moveDir(FileSystem fs, Path from, Path to) throws HiveException {
  910. try {
  911. if (!fs.rename(from, to)) {
  912. throw new HiveException("Moving " + from + " to " + to + " failed!");
  913. }
  914. } catch (IOException e) {
  915. throw new HiveException(e);
  916. }
  917. }
  918. private void deleteDir(Path dir) throws HiveException {
  919. try {
  920. Warehouse wh = new Warehouse(conf);
  921. wh.deleteDir(dir, true);
  922. } catch (MetaException e) {
  923. throw new HiveException(e);
  924. }
  925. }
  926. private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext)
  927. throws HiveException {
  928. String dbName = simpleDesc.getDbName();
  929. String tblName = simpleDesc.getTableName();
  930. Table tbl = db.getTable(dbName, tblName);
  931. validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ARCHIVE);
  932. Map<String, String> partSpec = simpleDesc.getPartSpec();
  933. Partition p = db.getPartition(tbl, partSpec, false);
  934. if (tbl.getTableType() != TableType.MANAGED_TABLE) {
  935. throw new HiveException("ARCHIVE can only be performed on managed tables");
  936. }
  937. if (p == null) {
  938. throw new HiveException("Specified partition does not exist");
  939. }
  940. if (isArchived(p)) {
  941. // If there were a failure right after the metadata was updated in an
  942. // archiving operation, it's possible that the original, unarchived files
  943. // weren't deleted.
  944. Path originalDir = new Path(getOriginalLocation(p));
  945. Path leftOverIntermediateOriginal = new Path(originalDir.getParent(),
  946. originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
  947. if (pathExists(leftOverIntermediateOriginal)) {
  948. console.printInfo("Deleting " + leftOverIntermediateOriginal +
  949. " left over from a previous archiving operation");
  950. deleteDir(leftOverIntermediateOriginal);
  951. }
  952. throw new HiveException("Specified partition is already archived");
  953. }
  954. Path originalDir = p.getPartitionPath();
  955. Path intermediateArchivedDir = new Path(originalDir.getParent(),
  956. originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
  957. Path intermediateOriginalDir = new Path(originalDir.getParent(),
  958. originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
  959. String archiveName = "data.har";
  960. FileSystem fs = null;
  961. try {
  962. fs = originalDir.getFileSystem(conf);
  963. } catch (IOException e) {
  964. throw new HiveException(e);
  965. }
  966. // The following steps seem roundabout, but they are meant to aid in
  967. // recovery if a failure occurs and to keep a consistent state in the FS
  968. // Steps:
  969. // 1. Create the archive in a temporary folder
  970. // 2. Move the archive dir to an intermediate dir that is in at the same
  971. // dir as the original partition dir. Call the new dir
  972. // intermediate-archive.
  973. // 3. Rename the original partition dir to an intermediate dir. Call the
  974. // renamed dir intermediate-original
  975. // 4. Rename intermediate-archive to the original partition dir
  976. // 5. Change the metadata
  977. // 6. Delete the original partition files in intermediate-original
  978. // The original partition files are deleted after the metadata change
  979. // because the presence of those files are used to indicate whether
  980. // the original partition directory contains archived or unarchived files.
  981. // Create an archived version of the partition in a directory ending in
  982. // ARCHIVE_INTERMEDIATE_DIR_SUFFIX that's the same level as the partition,
  983. // if it does not already exist. If it does exist, we assume the dir is good
  984. // to use as the move operation that created it is atomic.
  985. if (!pathExists(intermediateArchivedDir) &&
  986. !pathExists(intermediateOriginalDir)) {
  987. // First create the archive in a tmp dir so that if the job fails, the
  988. // bad files don't pollute the filesystem
  989. Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI(originalDir.toUri()), "partlevel");
  990. console.printInfo("Creating " + archiveName + " for " + originalDir.toString());
  991. console.printInfo("in " + tmpDir);
  992. console.printInfo("Please wait... (this may take a while)");
  993. // Create the Hadoop archive
  994. HadoopShims shim = ShimLoader.getHadoopShims();
  995. int ret=0;
  996. try {
  997. ret = shim.createHadoopArchive(conf, originalDir, tmpDir, archiveName);
  998. } catch (Exception e) {
  999. throw new HiveException(e);
  1000. }
  1001. if (ret != 0) {
  1002. throw new HiveException("Error while creating HAR");
  1003. }
  1004. // Move from the tmp dir to an intermediate directory, in the same level as
  1005. // the partition directory. e.g. .../hr=12-intermediate-archived
  1006. try {
  1007. console.printInfo("Moving " + tmpDir + " to " + intermediateArchivedDir);
  1008. if (pathExists(intermediateArchivedDir)) {
  1009. throw new HiveException("The intermediate archive directory already exists.");
  1010. }
  1011. fs.rename(tmpDir, intermediateArchivedDir);
  1012. } catch (IOException e) {
  1013. throw new HiveException("Error while moving tmp directory");
  1014. }
  1015. } else {
  1016. if (pathExists(intermediateArchivedDir)) {
  1017. console.printInfo("Intermediate archive directory " + intermediateArchivedDir +
  1018. " already exists. Assuming it contains an archived version of the partition");
  1019. }
  1020. }
  1021. // If we get to here, we know that we've archived the partition files, but
  1022. // they may be in the original partition location, or in the intermediate
  1023. // original dir.
  1024. // Move the original parent directory to the intermediate original directory
  1025. // if the move hasn't been made already
  1026. if (!pathExists(intermediateOriginalDir)) {
  1027. console.printInfo("Moving " + originalDir + " to " +
  1028. intermediateOriginalDir);
  1029. moveDir(fs, originalDir, intermediateOriginalDir);
  1030. } else {
  1031. console.printInfo(intermediateOriginalDir + " already exists. " +
  1032. "Assuming it contains the original files in the partition");
  1033. }
  1034. // If there's a failure from here to when the metadata is updated,
  1035. // there will be no data in the partition, or an error while trying to read
  1036. // the partition (if the archive files have been moved to the original
  1037. // partition directory.) But re-running the archive command will allow
  1038. // recovery
  1039. // Move the intermediate archived directory to the original parent directory
  1040. if (!pathExists(originalDir)) {
  1041. console.printInfo("Moving " + intermediateArchivedDir + " to " +
  1042. originalDir);
  1043. moveDir(fs, intermediateArchivedDir, originalDir);
  1044. } else {
  1045. console.printInfo(originalDir + " already exists. " +
  1046. "Assuming it contains the archived version of the partition");
  1047. }
  1048. // Record this change in the metastore
  1049. try {
  1050. boolean parentSettable =
  1051. conf.getBoolVar(HiveConf.ConfVars.HIVEHARPARENTDIRSETTABLE);
  1052. // dirInArchive is the directory within the archive that has all the files
  1053. // for this partition. With older versions of Hadoop, archiving a
  1054. // a directory would produce the same directory structure
  1055. // in the archive. So if you created myArchive.har of /tmp/myDir, the
  1056. // files in /tmp/myDir would be located under myArchive.har/tmp/myDir/*
  1057. // In this case, dirInArchive should be tmp/myDir
  1058. // With newer versions of Hadoop, the parent directory could be specified.
  1059. // Assuming the parent directory was set to /tmp/myDir when creating the
  1060. // archive, the files can be found under myArchive.har/*
  1061. // In this case, dirInArchive should be empty
  1062. String dirInArchive = "";
  1063. if (!parentSettable) {
  1064. dirInArchive = originalDir.toUri().getPath();
  1065. if(dirInArchive.length() > 1 && dirInArchive.charAt(0)=='/') {
  1066. dirInArchive = dirInArchive.substring(1);
  1067. }
  1068. }
  1069. setArchived(p, originalDir, dirInArchive, archiveName);
  1070. db.alterPartition(tblName, p);
  1071. } catch (Exception e) {
  1072. throw new HiveException("Unable to change the partition info for HAR", e);
  1073. }
  1074. // If a failure occurs here, the directory containing the original files
  1075. // will not be deleted. The user will run ARCHIVE again to clear this up
  1076. deleteDir(intermediateOriginalDir);
  1077. return 0;
  1078. }
  1079. private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc)
  1080. throws HiveException {
  1081. String dbName = simpleDesc.getDbName();
  1082. String tblName = simpleDesc.getTableName();
  1083. Table tbl = db.getTable(dbName, tblName);
  1084. validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.UNARCHIVE);
  1085. // Means user specified a table, not a partition
  1086. if (simpleDesc.getPartSpec() == null) {
  1087. throw new HiveException("ARCHIVE is for partitions only");
  1088. }
  1089. Map<String, String> partSpec = simpleDesc.getPartSpec();
  1090. Partition p = db.getPartition(tbl, partSpec, false);
  1091. if (tbl.getTableType() != TableType.MANAGED_TABLE) {
  1092. throw new HiveException("UNARCHIVE can only be performed on managed tables");
  1093. }
  1094. if (p == null) {
  1095. throw new HiveException("Specified partition does not exist");
  1096. }
  1097. if (!isArchived(p)) {
  1098. Path location = new Path(p.getLocation());
  1099. Path leftOverArchiveDir = new Path(location.getParent(),
  1100. location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
  1101. if (pathExists(leftOverArchiveDir)) {
  1102. console.printInfo("Deleting " + leftOverArchiveDir + " left over " +
  1103. "from a previous unarchiving operation");
  1104. deleteDir(leftOverArchiveDir);
  1105. }
  1106. throw new HiveException("Specified partition is not archived");
  1107. }
  1108. Path originalLocation = new Path(getOriginalLocation(p));
  1109. Path sourceDir = new Path(p.getLocation());
  1110. Path intermediateArchiveDir = new Path(originalLocation.getParent(),
  1111. originalLocation.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
  1112. Path intermediateExtractedDir = new Path(originalLocation.getParent(),
  1113. originalLocation.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX);
  1114. Path tmpDir = new Path(driverContext
  1115. .getCtx()
  1116. .getExternalTmpFileURI(originalLocation.toUri()));
  1117. FileSystem fs = null;
  1118. try {
  1119. fs = tmpDir.getFileSystem(conf);
  1120. // Verify that there are no files in the tmp dir, because if there are, it
  1121. // would be copied to the partition
  1122. FileStatus [] filesInTmpDir = fs.listStatus(tmpDir);
  1123. if (filesInTmpDir != null && filesInTmpDir.length != 0) {
  1124. for (FileStatus file : filesInTmpDir) {
  1125. console.printInfo(file.getPath().toString());
  1126. }
  1127. throw new HiveException("Temporary directory " + tmpDir + " is not empty");
  1128. }
  1129. } catch (IOException e) {
  1130. throw new HiveException(e);
  1131. }
  1132. // Some sanity checks
  1133. if (originalLocation == null) {
  1134. throw new HiveException("Missing archive data in the partition");
  1135. }
  1136. if (!"har".equals(sourceDir.toUri().getScheme())) {
  1137. throw new HiveException("Location should refer to a HAR");
  1138. }
  1139. // Clarification of terms:
  1140. // - The originalLocation directory represents the original directory of the
  1141. // partition's files. They now contain an archived version of those files
  1142. // eg. hdfs:/warehouse/myTable/ds=1/
  1143. // - The source directory is the directory containing all the files that
  1144. // should be in the partition. e.g. har:/warehouse/myTable/ds=1/myTable.har/
  1145. // Note the har:/ scheme
  1146. // Steps:
  1147. // 1. Extract the archive in a temporary folder
  1148. // 2. Move the archive dir to an intermediate dir that is in at the same
  1149. // dir as originalLocation. Call the new dir intermediate-extracted.
  1150. // 3. Rename the original partition dir to an intermediate dir. Call the
  1151. // renamed dir intermediate-archive
  1152. // 4. Rename intermediate-extracted to the original partition dir
  1153. // 5. Change the metadata
  1154. // 6. Delete the archived partition files in intermediate-archive
  1155. if (!pathExists(intermediateExtractedDir) &&
  1156. !pathExists(intermediateArchiveDir)) {
  1157. try {
  1158. // Copy the files out of the archive into the temporary directory
  1159. String copySource = (new Path(sourceDir, "*")).toString();
  1160. String copyDest = tmpDir.toString();
  1161. List<String> args = new ArrayList<String>();
  1162. args.add("-cp");
  1163. args.add(copySource);
  1164. args.add(copyDest);
  1165. console.printInfo("Copying " + copySource + " to " + copyDest);
  1166. FsShell fss = new FsShell(conf);
  1167. int ret = 0;
  1168. try {
  1169. ret = ToolRunner.run(fss, args.toArray(new String[0]));
  1170. } catch (Exception e) {
  1171. throw new HiveException(e);
  1172. }
  1173. if (ret != 0) {
  1174. throw new HiveException("Error while copying files from archive");
  1175. }
  1176. console.printInfo("Moving " + tmpDir + " to " + intermediateExtractedDir);
  1177. if (fs.exists(intermediateExtractedDir)) {
  1178. throw new HiveException("Invalid state: the intermediate extracted " +
  1179. "directory already exists.");
  1180. }
  1181. fs.rename(tmpDir, intermediateExtractedDir);
  1182. } catch (Exception e) {
  1183. throw new HiveException(e);
  1184. }
  1185. }
  1186. // At this point, we know that the extracted files are in the intermediate
  1187. // extracted dir, or in the the original directory.
  1188. if (!pathExists(intermediateArchiveDir)) {
  1189. try {
  1190. console.printInfo("Moving " + originalLocation + " to " + intermediateArchiveDir);
  1191. fs.rename(originalLocation, intermediateArchiveDir);
  1192. } catch (IOException e) {
  1193. throw new HiveException(e);
  1194. }
  1195. } else {
  1196. console.printInfo(intermediateArchiveDir + " already exists. " +
  1197. "Assuming it contains the archived version of the partition");
  1198. }
  1199. // If there is a failure from here to until when the metadata is changed,
  1200. // the partition will be empty or throw errors on read.
  1201. // If the original location exists here, then it must be the extracted files
  1202. // because in the previous step, we moved the previous original location
  1203. // (containing the archived version of the files) to intermediateArchiveDir
  1204. if (!pathExists(originalLocation)) {
  1205. try {
  1206. console.printInfo("Moving " + intermediateExtractedDir + " to " + originalLocation);
  1207. fs.rename(intermediateExtractedDir, originalLocation);
  1208. } catch (IOException e) {
  1209. throw new HiveException(e);
  1210. }
  1211. } else {
  1212. console.printInfo(originalLocation + " already exists. " +
  1213. "Assuming it contains the extracted files in the partition");
  1214. }
  1215. setUnArchived(p);
  1216. try {
  1217. db.alterPartition(tblName, p);
  1218. } catch (InvalidOperationException e) {
  1219. throw new HiveException(e);
  1220. }
  1221. // If a failure happens here, the intermediate archive files won't be
  1222. // deleted. The user will need to call unarchive again to clear those up.
  1223. deleteDir(intermediateArchiveDir);
  1224. return 0;
  1225. }
  1226. private void validateAlterTableType(
  1227. Table tbl, AlterTableDesc.AlterTableTypes alterType) throws HiveException {
  1228. if (tbl.isView()) {
  1229. switch (alterType) {
  1230. case ADDPROPS:
  1231. // allow this form
  1232. break;
  1233. default:
  1234. throw new HiveException(
  1235. "Cannot use this form of ALTER TABLE on a view");
  1236. }
  1237. }
  1238. if (tbl.isNonNative()) {
  1239. throw new HiveException("Cannot use ALTER TABLE on a non-native table");
  1240. }
  1241. }
  1242. /**
  1243. * MetastoreCheck, see if the data in the metastore matches what is on the
  1244. * dfs. Current version checks for tables and partitions that are either
  1245. * missing on disk on in the metastore.
  1246. *
  1247. * @param db
  1248. * The database in question.
  1249. * @param msckDesc
  1250. * Information about the tables and partitions we want to check for.
  1251. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1252. */
  1253. private int msck(Hive db, MsckDesc msckDesc) {
  1254. CheckResult result = new CheckResult();
  1255. List<String> repairOutput = new ArrayList<String>();
  1256. try {
  1257. HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
  1258. Table t = db.newTable(msckDesc.getTableName());
  1259. checker.checkMetastore(t.getDbName(), t.getTableName(), msckDesc.getPartSpecs(), result);
  1260. if (msckDesc.isRepairPartitions()) {
  1261. Table table = db.getTable(msckDesc.getTableName());
  1262. for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
  1263. try {
  1264. db.createPartition(table, Warehouse.makeSpecFromName(part
  1265. .getPartitionName()));
  1266. repairOutput.add("Repair: Added partition to metastore "
  1267. + msckDesc.getTableName() + ':' + part.getPartitionName());
  1268. } catch (Exception e) {
  1269. LOG.warn("Repair error, could not add partition to metastore: ", e);
  1270. }
  1271. }
  1272. }
  1273. } catch (HiveException e) {
  1274. LOG.warn("Failed to run metacheck: ", e);
  1275. return 1;
  1276. } catch (IOException e) {
  1277. LOG.warn("Failed to run metacheck: ", e);
  1278. return 1;
  1279. } finally {
  1280. BufferedWriter resultOut = null;
  1281. try {
  1282. Path resFile = new Path(msckDesc.getResFile());
  1283. FileSystem fs = resFile.getFileSystem(conf);
  1284. resultOut = new BufferedWriter(new OutputStreamWriter(fs
  1285. .create(resFile)));
  1286. boolean firstWritten = false;
  1287. firstWritten |= writeMsckResult(result.getTablesNotInMs(),
  1288. "Tables not in metastore:", resultOut, firstWritten);
  1289. firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
  1290. "Tables missing on filesystem:", resultOut, firstWritten);
  1291. firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
  1292. "Partitions not in metastore:", resultOut, firstWritten);
  1293. firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
  1294. "Partitions missing from filesystem:", resultOut, firstWritten);
  1295. for (String rout : repairOutput) {
  1296. if (firstWritten) {
  1297. resultOut.write(terminator);
  1298. } else {
  1299. firstWritten = true;
  1300. }
  1301. resultOut.write(rout);
  1302. }
  1303. } catch (IOException e) {
  1304. LOG.warn("Failed to save metacheck output: ", e);
  1305. return 1;
  1306. } finally {
  1307. if (resultOut != null) {
  1308. try {
  1309. resultOut.close();
  1310. } catch (IOException e) {
  1311. LOG.warn("Failed to close output file: ", e);
  1312. return 1;
  1313. }
  1314. }
  1315. }
  1316. }
  1317. return 0;
  1318. }
  1319. /**
  1320. * Write the result of msck to a writer.
  1321. *
  1322. * @param result
  1323. * The result we're going to write
  1324. * @param msg
  1325. * Message to write.
  1326. * @param out
  1327. * Writer to write to
  1328. * @param wrote
  1329. * if any previous call wrote data
  1330. * @return true if something was written
  1331. * @throws IOException
  1332. * In case the writing fails
  1333. */
  1334. private boolean writeMsckResult(List<? extends Object> result, String msg,
  1335. Writer out, boolean wrote) throws IOException {
  1336. if (!result.isEmpty()) {
  1337. if (wrote) {
  1338. out.write(terminator);
  1339. }
  1340. out.write(msg);
  1341. for (Object entry : result) {
  1342. out.write(separator);
  1343. out.write(entry.toString());
  1344. }
  1345. return true;
  1346. }
  1347. return false;
  1348. }
  1349. /**
  1350. * Write a list of partitions to a file.
  1351. *
  1352. * @param db
  1353. * The database in question.
  1354. * @param showParts
  1355. * These are the partitions we're interested in.
  1356. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1357. * @throws HiveException
  1358. * Throws this exception if an unexpected error occurs.
  1359. */
  1360. private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveException {
  1361. // get the partitions for the table and populate the output
  1362. String tabName = showParts.getTabName();
  1363. Table tbl = null;
  1364. List<String> parts = null;
  1365. tbl = db.getTable(tabName);
  1366. if (!tbl.isPartitioned()) {
  1367. console.printError("Table " + tabName + " is not a partitioned table");
  1368. return 1;
  1369. }
  1370. if (showParts.getPartSpec() != null) {
  1371. parts = db.getPartitionNames(tbl.getDbName(),
  1372. tbl.getTableName(), showParts.getPartSpec(), (short) -1);
  1373. } else {
  1374. parts = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1);
  1375. }
  1376. // write the results in the file
  1377. try {
  1378. Path resFile = new Path(showParts.getResFile());
  1379. FileSystem fs = resFile.getFileSystem(conf);
  1380. DataOutput outStream = fs.create(resFile);
  1381. Iterator<String> iterParts = parts.iterator();
  1382. while (iterParts.hasNext()) {
  1383. // create a row per partition name
  1384. outStream.writeBytes(iterParts.next());
  1385. outStream.write(terminator);
  1386. }
  1387. ((FSDataOutputStream) outStream).close();
  1388. } catch (FileNotFoundException e) {
  1389. LOG.info("show partitions: " + stringifyException(e));
  1390. throw new HiveException(e.toString());
  1391. } catch (IOException e) {
  1392. LOG.info("show partitions: " + stringifyException(e));
  1393. throw new HiveException(e.toString());
  1394. } catch (Exception e) {
  1395. throw new HiveException(e.toString());
  1396. }
  1397. return 0;
  1398. }
  1399. /**
  1400. * Write a list of indexes to a file.
  1401. *
  1402. * @param db
  1403. * The database in question.
  1404. * @param showIndexes
  1405. * These are the indexes we're interested in.
  1406. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1407. * @throws HiveException
  1408. * Throws this exception if an unexpected error occurs.
  1409. */
  1410. private int showIndexes(Hive db, ShowIndexesDesc showIndexes) throws HiveException {
  1411. // get the indexes for the table and populate the output
  1412. String tableName = showIndexes.getTableName();
  1413. Table tbl = null;
  1414. List<Index> indexes = null;
  1415. tbl = db.getTable(tableName);
  1416. indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1);
  1417. // write the results in the file
  1418. try {
  1419. Path resFile = new Path(showIndexes.getResFile());
  1420. FileSystem fs = resFile.getFileSystem(conf);
  1421. DataOutput outStream = fs.create(resFile);
  1422. if (showIndexes.isFormatted()) {
  1423. // column headers
  1424. outStream.writeBytes(MetaDataFormatUtils.getIndexColumnsHeader());
  1425. outStream.write(terminator);
  1426. outStream.write(terminator);
  1427. }
  1428. for (Index index : indexes)
  1429. {
  1430. outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(index));
  1431. }
  1432. ((FSDataOutputStream) outStream).close();
  1433. } catch (FileNotFoundException e) {
  1434. LOG.info("show indexes: " + stringifyException(e));
  1435. throw new HiveException(e.toString());
  1436. } catch (IOException e) {
  1437. LOG.info("show indexes: " + stringifyException(e));
  1438. throw new HiveException(e.toString());
  1439. } catch (Exception e) {
  1440. throw new HiveException(e.toString());
  1441. }
  1442. return 0;
  1443. }
  1444. /**
  1445. * Write a list of the available databases to a file.
  1446. *
  1447. * @param showDatabases
  1448. * These are the databases we're interested in.
  1449. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1450. * @throws HiveException
  1451. * Throws this exception if an unexpected error occurs.
  1452. */
  1453. private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException {
  1454. // get the databases for the desired pattern - populate the output stream
  1455. List<String> databases = null;
  1456. if (showDatabasesDesc.getPattern() != null) {
  1457. LOG.info("pattern: " + showDatabasesDesc.getPattern());
  1458. databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
  1459. } else {
  1460. databases = db.getAllDatabases();
  1461. }
  1462. LOG.info("results : " + databases.size());
  1463. // write the results in the file
  1464. try {
  1465. Path resFile = new Path(showDatabasesDesc.getResFile());
  1466. FileSystem fs = resFile.getFileSystem(conf);
  1467. DataOutput outStream = fs.create(resFile);
  1468. for (String database : databases) {
  1469. // create a row per database name
  1470. outStream.writeBytes(database);
  1471. outStream.write(terminator);
  1472. }
  1473. ((FSDataOutputStream) outStream).close();
  1474. } catch (FileNotFoundException e) {
  1475. LOG.warn("show databases: " + stringifyException(e));
  1476. return 1;
  1477. } catch (IOException e) {
  1478. LOG.warn("show databases: " + stringifyException(e));
  1479. return 1;
  1480. } catch (Exception e) {
  1481. throw new HiveException(e.toString());
  1482. }
  1483. return 0;
  1484. }
  1485. /**
  1486. * Write a list of the tables in the database to a file.
  1487. *
  1488. * @param db
  1489. * The database in question.
  1490. * @param showTbls
  1491. * These are the tables we're interested in.
  1492. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1493. * @throws HiveException
  1494. * Throws this exception if an unexpected error occurs.
  1495. */
  1496. private int showTables(Hive db, ShowTablesDesc showTbls) throws HiveException {
  1497. // get the tables for the desired pattenn - populate the output stream
  1498. List<String> tbls = null;
  1499. String dbName = showTbls.getDbName();
  1500. if (!db.databaseExists(dbName)) {
  1501. throw new HiveException("ERROR: The database " + dbName + " does not exist.");
  1502. }
  1503. if (showTbls.getPattern() != null) {
  1504. LOG.info("pattern: " + showTbls.getPattern());
  1505. tbls = db.getTablesByPattern(dbName, showTbls.getPattern());
  1506. LOG.info("results : " + tbls.size());
  1507. } else {
  1508. tbls = db.getAllTables(dbName);
  1509. }
  1510. // write the results in the file
  1511. try {
  1512. Path resFile = new Path(showTbls.getResFile());
  1513. FileSystem fs = resFile.getFileSystem(conf);
  1514. DataOutput outStream = fs.create(resFile);
  1515. SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
  1516. Iterator<String> iterTbls = sortedTbls.iterator();
  1517. while (iterTbls.hasNext()) {
  1518. // create a row per table name
  1519. outStream.writeBytes(iterTbls.next());
  1520. outStream.write(terminator);
  1521. }
  1522. ((FSDataOutputStream) outStream).close();
  1523. } catch (FileNotFoundException e) {
  1524. LOG.warn("show table: " + stringifyException(e));
  1525. return 1;
  1526. } catch (IOException e) {
  1527. LOG.warn("show table: " + stringifyException(e));
  1528. return 1;
  1529. } catch (Exception e) {
  1530. throw new HiveException(e.toString());
  1531. }
  1532. return 0;
  1533. }
  1534. /**
  1535. * Write a list of the user defined functions to a file.
  1536. *
  1537. * @param showFuncs
  1538. * are the functions we're interested in.
  1539. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1540. * @throws HiveException
  1541. * Throws this exception if an unexpected error occurs.
  1542. */
  1543. private int showFunctions(ShowFunctionsDesc showFuncs) throws HiveException {
  1544. // get the tables for the desired pattenn - populate the output stream
  1545. Set<String> funcs = null;
  1546. if (showFuncs.getPattern() != null) {
  1547. LOG.info("pattern: " + showFuncs.getPattern());
  1548. funcs = FunctionRegistry.getFunctionNames(showFuncs.getPattern());
  1549. LOG.info("results : " + funcs.size());
  1550. } else {
  1551. funcs = FunctionRegistry.getFunctionNames();
  1552. }
  1553. // write the results in the file
  1554. try {
  1555. Path resFile = new Path(showFuncs.getResFile());
  1556. FileSystem fs = resFile.getFileSystem(conf);
  1557. DataOutput outStream = fs.create(resFile);
  1558. SortedSet<String> sortedFuncs = new TreeSet<String>(funcs);
  1559. Iterator<String> iterFuncs = sortedFuncs.iterator();
  1560. while (iterFuncs.hasNext()) {
  1561. // create a row per table name
  1562. outStream.writeBytes(iterFuncs.next());
  1563. outStream.write(terminator);
  1564. }
  1565. ((FSDataOutputStream) outStream).close();
  1566. } catch (FileNotFoundException e) {
  1567. LOG.warn("show function: " + stringifyException(e));
  1568. return 1;
  1569. } catch (IOException e) {
  1570. LOG.warn("show function: " + stringifyException(e));
  1571. return 1;
  1572. } catch (Exception e) {
  1573. throw new HiveException(e.toString());
  1574. }
  1575. return 0;
  1576. }
  1577. /**
  1578. * Write a list of the current locks to a file.
  1579. *
  1580. * @param showLocks
  1581. * the locks we're interested in.
  1582. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1583. * @throws HiveException
  1584. * Throws this exception if an unexpected error occurs.
  1585. */
  1586. private int showLocks(ShowLocksDesc showLocks) throws HiveException {
  1587. Context ctx = driverContext.getCtx();
  1588. HiveLockManager lockMgr = ctx.getHiveLockMgr();
  1589. boolean isExt = showLocks.isExt();
  1590. if (lockMgr == null) {
  1591. throw new HiveException("show Locks LockManager not specified");
  1592. }
  1593. // write the results in the file
  1594. try {
  1595. Path resFile = new Path(showLocks.getResFile());
  1596. FileSystem fs = resFile.getFileSystem(conf);
  1597. DataOutput outStream = fs.create(resFile);
  1598. List<HiveLock> locks = null;
  1599. if (showLocks.getTableName() == null) {
  1600. locks = lockMgr.getLocks(false, isExt);
  1601. }
  1602. else {
  1603. locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(),
  1604. showLocks.getPartSpec()),
  1605. true, isExt);
  1606. }
  1607. Collections.sort(locks, new Comparator<HiveLock>() {
  1608. @Override
  1609. public int compare(HiveLock o1, HiveLock o2) {
  1610. int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
  1611. if (cmp == 0) {
  1612. if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
  1613. return cmp;
  1614. }
  1615. // EXCLUSIVE locks occur before SHARED locks
  1616. if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
  1617. return -1;
  1618. }
  1619. return +1;
  1620. }
  1621. return cmp;
  1622. }
  1623. });
  1624. Iterator<HiveLock> locksIter = locks.iterator();
  1625. while (locksIter.hasNext()) {
  1626. HiveLock lock = locksIter.next();
  1627. outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
  1628. outStream.write(separator);
  1629. outStream.writeBytes(lock.getHiveLockMode().toString());
  1630. if (isExt) {
  1631. outStream.write(terminator);
  1632. HiveLockObjectData lockData = lock.getHiveLockObject().getData();
  1633. if (lockData != null) {
  1634. outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId() + " ");
  1635. outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime() + " ");
  1636. outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode() + " ");
  1637. }
  1638. }
  1639. outStream.write(terminator);
  1640. }
  1641. ((FSDataOutputStream) outStream).close();
  1642. } catch (FileNotFoundException e) {
  1643. LOG.warn("show function: " + stringifyException(e));
  1644. return 1;
  1645. } catch (IOException e) {
  1646. LOG.warn("show function: " + stringifyException(e));
  1647. return 1;
  1648. } catch (Exception e) {
  1649. throw new HiveException(e.toString());
  1650. }
  1651. return 0;
  1652. }
  1653. /**
  1654. * Lock the table/partition specified
  1655. *
  1656. * @param lockTbl
  1657. * the table/partition to be locked along with the mode
  1658. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1659. * @throws HiveException
  1660. * Throws this exception if an unexpected error occurs.
  1661. */
  1662. private int lockTable(LockTableDesc lockTbl) throws HiveException {
  1663. Context ctx = driverContext.getCtx();
  1664. HiveLockManager lockMgr = ctx.getHiveLockMgr();
  1665. if (lockMgr == null) {
  1666. throw new HiveException("lock Table LockManager not specified");
  1667. }
  1668. HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode());
  1669. String tabName = lockTbl.getTableName();
  1670. Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName);
  1671. if (tbl == null) {
  1672. throw new HiveException("Table " + tabName + " does not exist ");
  1673. }
  1674. Map<String, String> partSpec = lockTbl.getPartSpec();
  1675. HiveLockObjectData lockData =
  1676. new HiveLockObjectData(lockTbl.getQueryId(),
  1677. String.valueOf(System.currentTimeMillis()),
  1678. "EXPLICIT");
  1679. if (partSpec == null) {
  1680. HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true, 0, 0);
  1681. if (lck == null) {
  1682. return 1;
  1683. }
  1684. return 0;
  1685. }
  1686. Partition par = db.getPartition(tbl, partSpec, false);
  1687. if (par == null) {
  1688. throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
  1689. }
  1690. HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true, 0, 0);
  1691. if (lck == null) {
  1692. return 1;
  1693. }
  1694. return 0;
  1695. }
  1696. private HiveLockObject getHiveObject(String tabName,
  1697. Map<String, String> partSpec) throws HiveException {
  1698. Table tbl = db.getTable(tabName);
  1699. if (tbl == null) {
  1700. throw new HiveException("Table " + tabName + " does not exist ");
  1701. }
  1702. HiveLockObject obj = null;
  1703. if (partSpec == null) {
  1704. obj = new HiveLockObject(tbl, null);
  1705. }
  1706. else {
  1707. Partition par = db.getPartition(tbl, partSpec, false);
  1708. if (par == null) {
  1709. throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
  1710. }
  1711. obj = new HiveLockObject(par, null);
  1712. }
  1713. return obj;
  1714. }
  1715. /**
  1716. * Unlock the table/partition specified
  1717. *
  1718. * @param unlockTbl
  1719. * the table/partition to be unlocked
  1720. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1721. * @throws HiveException
  1722. * Throws this exception if an unexpected error occurs.
  1723. */
  1724. private int unlockTable(UnlockTableDesc unlockTbl) throws HiveException {
  1725. Context ctx = driverContext.getCtx();
  1726. HiveLockManager lockMgr = ctx.getHiveLockMgr();
  1727. if (lockMgr == null) {
  1728. throw new HiveException("unlock Table LockManager not specified");
  1729. }
  1730. String tabName = unlockTbl.getTableName();
  1731. HiveLockObject obj = getHiveObject(tabName, unlockTbl.getPartSpec());
  1732. List<HiveLock> locks = lockMgr.getLocks(obj, false, false);
  1733. if ((locks == null) || (locks.isEmpty())) {
  1734. throw new HiveException("Table " + tabName + " is not locked ");
  1735. }
  1736. Iterator<HiveLock> locksIter = locks.iterator();
  1737. while (locksIter.hasNext()) {
  1738. HiveLock lock = locksIter.next();
  1739. lockMgr.unlock(lock);
  1740. }
  1741. return 0;
  1742. }
  1743. /**
  1744. * Shows a description of a function.
  1745. *
  1746. * @param descFunc
  1747. * is the function we are describing
  1748. * @throws HiveException
  1749. */
  1750. private int describeFunction(DescFunctionDesc descFunc) throws HiveException {
  1751. String funcName = descFunc.getName();
  1752. // write the results in the file
  1753. try {
  1754. Path resFile = new Path(descFunc.getResFile());
  1755. FileSystem fs = resFile.getFileSystem(conf);
  1756. DataOutput outStream = fs.create(resFile);
  1757. // get the function documentation
  1758. Description desc = null;
  1759. Class<?> funcClass = null;
  1760. FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(funcName);
  1761. if (functionInfo != null) {
  1762. funcClass = functionInfo.getFunctionClass();
  1763. }
  1764. if (funcClass != null) {
  1765. desc = funcClass.getAnnotation(Description.class);
  1766. }
  1767. if (desc != null) {
  1768. outStream.writeBytes(desc.value().replace("_FUNC_", funcName));
  1769. if (descFunc.isExtended()) {
  1770. Set<String> synonyms = FunctionRegistry.getFunctionSynonyms(funcName);
  1771. if (synonyms.size() > 0) {
  1772. outStream.writeBytes("\nSynonyms: " + join(synonyms, ", "));
  1773. }
  1774. if (desc.extended().length() > 0) {
  1775. outStream.writeBytes("\n"
  1776. + desc.extended().replace("_FUNC_", funcName));
  1777. }
  1778. }
  1779. } else {
  1780. if (funcClass != null) {
  1781. outStream.writeBytes("There is no documentation for function '"
  1782. + funcName + "'");
  1783. } else {
  1784. outStream.writeBytes("Function '" + funcName + "' does not exist.");
  1785. }
  1786. }
  1787. outStream.write(terminator);
  1788. ((FSDataOutputStream) outStream).close();
  1789. } catch (FileNotFoundException e) {
  1790. LOG.warn("describe function: " + stringifyException(e));
  1791. return 1;
  1792. } catch (IOException e) {
  1793. LOG.warn("describe function: " + stringifyException(e));
  1794. return 1;
  1795. } catch (Exception e) {
  1796. throw new HiveException(e.toString());
  1797. }
  1798. return 0;
  1799. }
  1800. private int descDatabase(DescDatabaseDesc descDatabase) throws HiveException {
  1801. try {
  1802. Path resFile = new Path(descDatabase.getResFile());
  1803. FileSystem fs = resFile.getFileSystem(conf);
  1804. DataOutput outStream = fs.create(resFile);
  1805. Database database = db.getDatabase(descDatabase.getDatabaseName());
  1806. if (database != null) {
  1807. outStream.writeBytes(database.getName());
  1808. outStream.write(separator);
  1809. if (database.getDescription() != null) {
  1810. outStream.writeBytes(database.getDescription());
  1811. }
  1812. outStream.write(separator);
  1813. if (database.getLocationUri() != null) {
  1814. outStream.writeBytes(database.getLocationUri());
  1815. }
  1816. outStream.write(separator);
  1817. if (descDatabase.isExt() && database.getParametersSize() > 0) {
  1818. Map<String, String> params = database.getParameters();
  1819. outStream.writeBytes(params.toString());
  1820. }
  1821. } else {
  1822. outStream.writeBytes("No such database: " + descDatabase.getDatabaseName());
  1823. }
  1824. outStream.write(terminator);
  1825. ((FSDataOutputStream) outStream).close();
  1826. } catch (FileNotFoundException e) {
  1827. LOG.warn("describe database: " + stringifyException(e));
  1828. return 1;
  1829. } catch (IOException e) {
  1830. LOG.warn("describe database: " + stringifyException(e));
  1831. return 1;
  1832. } catch (Exception e) {
  1833. throw new HiveException(e.toString());
  1834. }
  1835. return 0;
  1836. }
  1837. /**
  1838. * Write the status of tables to a file.
  1839. *
  1840. * @param db
  1841. * The database in question.
  1842. * @param showTblStatus
  1843. * tables we are interested in
  1844. * @return Return 0 when execution succeeds and above 0 if it fails.
  1845. */
  1846. private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException {
  1847. // get the tables for the desired pattenn - populate the output stream
  1848. List<Table> tbls = new ArrayList<Table>();
  1849. Map<String, String> part = showTblStatus.getPartSpec();
  1850. Partition par = null;
  1851. if (part != null) {
  1852. Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern());
  1853. par = db.getPartition(tbl, part, false);
  1854. if (par == null) {
  1855. throw new HiveException("Partition " + part + " for table "
  1856. + showTblStatus.getPattern() + " does not exist.");
  1857. }
  1858. tbls.add(tbl);
  1859. } else {
  1860. LOG.info("pattern: " + showTblStatus.getPattern());
  1861. List<String> tblStr = db.getTablesForDb(showTblStatus.getDbName(),
  1862. showTblStatus.getPattern());
  1863. SortedSet<String> sortedTbls = new TreeSet<String>(tblStr);
  1864. Iterator<String> iterTbls = sortedTbls.iterator();
  1865. while (iterTbls.hasNext()) {
  1866. // create a row per table name
  1867. String tblName = iterTbls.next();
  1868. Table tbl = db.getTable(showTblStatus.getDbName(), tblName);
  1869. tbls.add(tbl);
  1870. }
  1871. LOG.info("results : " + tblStr.size());
  1872. }
  1873. // write the results in the file
  1874. try {
  1875. Path resFile = new Path(showTblStatus.getResFile());
  1876. FileSystem fs = resFile.getFileSystem(conf);
  1877. DataOutput outStream = fs.create(resFile);
  1878. Iterator<Table> iterTables = tbls.iterator();
  1879. while (iterTables.hasNext()) {
  1880. // create a row per table name
  1881. Table tbl = iterTables.next();
  1882. String tableName = tbl.getTableName();
  1883. String tblLoc = null;
  1884. String inputFormattCls = null;
  1885. String outputFormattCls = null;
  1886. if (part != null) {
  1887. if (par != null) {
  1888. tblLoc = par.getDataLocation().toString();
  1889. inputFormattCls = par.getInputFormatClass().getName();
  1890. outputFormattCls = par.getOutputFormatClass().getName();
  1891. }
  1892. } else {
  1893. tblLoc = tbl.getDataLocation().toString();
  1894. inputFormattCls = tbl.getInputFormatClass().getName();
  1895. outputFormattCls = tbl.getOutputFormatClass().getName();
  1896. }
  1897. String owner = tbl.getOwner();
  1898. List<FieldSchema> cols = tbl.getCols();
  1899. String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
  1900. boolean isPartitioned = tbl.isPartitioned();
  1901. String partitionCols = "";
  1902. if (isPartitioned) {
  1903. partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
  1904. "partition_columns", tbl.getPartCols());
  1905. }
  1906. outStream.writeBytes("tableName:" + tableName);
  1907. outStream.write(terminator);
  1908. outStream.writeBytes("owner:" + owner);
  1909. outStream.write(terminator);
  1910. outStream.writeBytes("location:" + tblLoc);
  1911. outStream.write(terminator);
  1912. outStream.writeBytes("inputformat:" + inputFormattCls);
  1913. outStream.write(terminator);
  1914. outStream.writeBytes("outputformat:" + outputFormattCls);
  1915. outStream.write(terminator);
  1916. outStream.writeBytes("columns:" + ddlCols);
  1917. outStream.write(terminator);
  1918. outStream.writeBytes("partitioned:" + isPartitioned);
  1919. outStream.write(terminator);
  1920. outStream.writeBytes("partitionColumns:" + partitionCols);
  1921. outStream.write(terminator);
  1922. // output file system information
  1923. Path tablLoc = tbl.getPath();
  1924. List<Path> locations = new ArrayList<Path>();
  1925. if (isPartitioned) {
  1926. if (par == null) {
  1927. for (Partition curPart : db.getPartitions(tbl)) {
  1928. locations.add(new Path(curPart.getTPartition().getSd()
  1929. .getLocation()));
  1930. }
  1931. } else {
  1932. locations.add(new Path(par.getTPartition().getSd().getLocation()));
  1933. }
  1934. } else {
  1935. locations.add(tablLoc);
  1936. }
  1937. writeFileSystemStats(outStream, locations, tablLoc, false, 0);
  1938. outStream.write(terminator);
  1939. }
  1940. ((FSDataOutputStream) outStream).close();
  1941. } catch (FileNotFoundException e) {
  1942. LOG.info("show table status: " + stringifyException(e));
  1943. return 1;
  1944. } catch (IOException e) {
  1945. LOG.info("show table status: " + stringifyException(e));
  1946. return 1;
  1947. } catch (Exception e) {
  1948. throw new HiveException(e);
  1949. }
  1950. return 0;
  1951. }
  1952. /**
  1953. * Write the description of a table to a file.
  1954. *
  1955. * @param db
  1956. * The database in question.
  1957. * @param descTbl
  1958. * This is the table we're interested in.
  1959. * @return Returns 0 when execution succeeds and above 0 if it fails.
  1960. * @throws HiveException
  1961. * Throws this exception if an unexpected error occurs.
  1962. */
  1963. private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException {
  1964. String colPath = descTbl.getTableName();
  1965. String tableName = colPath.substring(0,
  1966. colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.'));
  1967. // describe the table - populate the output stream
  1968. Table tbl = db.getTable(tableName, false);
  1969. Partition part = null;
  1970. try {
  1971. Path resFile = new Path(descTbl.getResFile());
  1972. if (tbl == null) {
  1973. FileSystem fs = resFile.getFileSystem(conf);
  1974. DataOutput outStream = (DataOutput) fs.open(resFile);
  1975. String errMsg = "Table " + tableName + " does not exist";
  1976. outStream.write(errMsg.getBytes("UTF-8"));
  1977. ((FSDataOutputStream) outStream).close();
  1978. return 0;
  1979. }
  1980. if (descTbl.getPartSpec() != null) {
  1981. part = db.getPartition(tbl, descTbl.getPartSpec(), false);
  1982. if (part == null) {
  1983. FileSystem fs = resFile.getFileSystem(conf);
  1984. DataOutput outStream = (DataOutput) fs.open(resFile);
  1985. String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
  1986. + tableName + " does not exist";
  1987. outStream.write(errMsg.getBytes("UTF-8"));
  1988. ((FSDataOutputStream) outStream).close();
  1989. return 0;
  1990. }
  1991. tbl = part.getTable();
  1992. }
  1993. } catch (FileNotFoundException e) {
  1994. LOG.info("describe table: " + stringifyException(e));
  1995. return 1;
  1996. } catch (IOException e) {
  1997. LOG.info("describe table: " + stringifyException(e));
  1998. return 1;
  1999. }
  2000. try {
  2001. LOG.info("DDLTask: got data for " + tbl.getTableName());
  2002. Path resFile = new Path(descTbl.getResFile());
  2003. FileSystem fs = resFile.getFileSystem(conf);
  2004. DataOutput outStream = fs.create(resFile);
  2005. if (colPath.equals(tableName)) {
  2006. if (!descTbl.isFormatted()) {
  2007. List<FieldSchema> cols = tbl.getCols();
  2008. if (tableName.equals(colPath)) {
  2009. cols.addAll(tbl.getPartCols());
  2010. }
  2011. outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
  2012. } else {
  2013. outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl));
  2014. }
  2015. } else {
  2016. List<FieldSchema> cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
  2017. if (descTbl.isFormatted()) {
  2018. outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
  2019. } else {
  2020. outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
  2021. }
  2022. }
  2023. if (tableName.equals(colPath)) {
  2024. if (descTbl.isFormatted()) {
  2025. if (part != null) {
  2026. outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
  2027. } else {
  2028. outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
  2029. }
  2030. }
  2031. // if extended desc table then show the complete details of the table
  2032. if (descTbl.isExt()) {
  2033. // add empty line
  2034. outStream.write(terminator);
  2035. if (part != null) {
  2036. // show partition information
  2037. outStream.writeBytes("Detailed Partition Information");
  2038. outStream.write(separator);
  2039. outStream.writeBytes(part.getTPartition().toString());
  2040. outStream.write(separator);
  2041. // comment column is empty
  2042. outStream.write(terminator);
  2043. } else {
  2044. // show table information
  2045. outStream.writeBytes("Detailed Table Information");
  2046. outStream.write(separator);
  2047. outStream.writeBytes(tbl.getTTable().toString());
  2048. outStream.write(separator);
  2049. outStream.write(terminator);
  2050. }
  2051. }
  2052. }
  2053. LOG.info("DDLTask: written data for " + tbl.getTableName());
  2054. ((FSDataOutputStream) outStream).close();
  2055. } catch (FileNotFoundException e) {
  2056. LOG.info("describe table: " + stringifyException(e));
  2057. return 1;
  2058. } catch (IOException e) {
  2059. LOG.info("describe table: " + stringifyException(e));
  2060. return 1;
  2061. } catch (Exception e) {
  2062. throw new HiveException(e);
  2063. }
  2064. return 0;
  2065. }
  2066. public static void writeGrantInfo(DataOutput outStream,
  2067. PrincipalType principalType, String principalName, String dbName,
  2068. String tableName, String partName, String columnName,
  2069. PrivilegeGrantInfo grantInfo) throws IOException {
  2070. String privilege = grantInfo.getPrivilege();
  2071. int createTime = grantInfo.getCreateTime();
  2072. String grantor = grantInfo.getGrantor();
  2073. if (dbName != null) {
  2074. writeKeyValuePair(outStream, "database", dbName);
  2075. }
  2076. if (tableName != null) {
  2077. writeKeyValuePair(outStream, "table", tableName);
  2078. }
  2079. if (partName != null) {
  2080. writeKeyValuePair(outStream, "partition", partName);
  2081. }
  2082. if (columnName != null) {
  2083. writeKeyValuePair(outStream, "columnName", columnName);
  2084. }
  2085. writeKeyValuePair(outStream, "principalName", principalName);
  2086. writeKeyValuePair(outStream, "principalType", "" + principalType);
  2087. writeKeyValuePair(outStream, "privilege", privilege);
  2088. writeKeyValuePair(outStream, "grantTime", "" + createTime);
  2089. if (grantor != null) {
  2090. writeKeyValuePair(outStream, "grantor", grantor);
  2091. }
  2092. }
  2093. private static void writeKeyValuePair(DataOutput outStream, String key,
  2094. String value) throws IOException {
  2095. outStream.write(terminator);
  2096. outStream.writeBytes(key);
  2097. outStream.write(separator);
  2098. outStream.writeBytes(value);
  2099. outStream.write(separator);
  2100. }
  2101. private void writeFileSystemStats(DataOutput outStream, List<Path> locations,
  2102. Path tabLoc, boolean partSpecified, int indent) throws IOException {
  2103. long totalFileSize = 0;
  2104. long maxFileSize = 0;
  2105. long minFileSize = Long.MAX_VALUE;
  2106. long lastAccessTime = 0;
  2107. long lastUpdateTime = 0;
  2108. int numOfFiles = 0;
  2109. boolean unknown = false;
  2110. FileSystem fs = tabLoc.getFileSystem(conf);
  2111. // in case all files in locations do not exist
  2112. try {
  2113. FileStatus tmpStatus = fs.getFileStatus(tabLoc);
  2114. lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
  2115. lastUpdateTime = tmpStatus.getModificationTime();
  2116. if (partSpecified) {
  2117. // check whether the part exists or not in fs
  2118. tmpStatus = fs.getFileStatus(locations.get(0));
  2119. }
  2120. } catch (IOException e) {
  2121. LOG.warn(
  2122. "Cannot access File System. File System status will be unknown: ", e);
  2123. unknown = true;
  2124. }
  2125. if (!unknown) {
  2126. for (Path loc : locations) {
  2127. try {
  2128. FileStatus status = fs.getFileStatus(tabLoc);
  2129. FileStatus[] files = fs.listStatus(loc);
  2130. long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
  2131. long updateTime = status.getModificationTime();
  2132. // no matter loc is the table location or part location, it must be a
  2133. // directory.
  2134. if (!status.isDir()) {
  2135. continue;
  2136. }
  2137. if (accessTime > lastAccessTime) {
  2138. lastAccessTime = accessTime;
  2139. }
  2140. if (updateTime > lastUpdateTime) {
  2141. lastUpdateTime = updateTime;
  2142. }
  2143. for (FileStatus currentStatus : files) {
  2144. if (currentStatus.isDir()) {
  2145. continue;
  2146. }
  2147. numOfFiles++;
  2148. long fileLen = currentStatus.getLen();
  2149. totalFileSize += fileLen;
  2150. if (fileLen > maxFileSize) {
  2151. maxFileSize = fileLen;
  2152. }
  2153. if (fileLen < minFileSize) {
  2154. minFileSize = fileLen;
  2155. }
  2156. accessTime = ShimLoader.getHadoopShims().getAccessTime(
  2157. currentStatus);
  2158. updateTime = currentStatus.getModificationTime();
  2159. if (accessTime > lastAccessTime) {
  2160. lastAccessTime = accessTime;
  2161. }
  2162. if (updateTime > lastUpdateTime) {
  2163. lastUpdateTime = updateTime;
  2164. }
  2165. }
  2166. } catch (IOException e) {
  2167. // ignore
  2168. }
  2169. }
  2170. }
  2171. String unknownString = "unknown";
  2172. for (int k = 0; k < indent; k++) {
  2173. outStream.writeBytes(Utilities.INDENT);
  2174. }
  2175. outStream.writeBytes("totalNumberFiles:");
  2176. outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
  2177. outStream.write(terminator);
  2178. for (int k = 0; k < indent; k++) {
  2179. outStream.writeBytes(Utilities.INDENT);
  2180. }
  2181. outStream.writeBytes("totalFileSize:");
  2182. outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
  2183. outStream.write(terminator);
  2184. for (int k = 0; k < indent; k++) {
  2185. outStream.writeBytes(Utilities.INDENT);
  2186. }
  2187. outStream.writeBytes("maxFileSize:");
  2188. outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
  2189. outStream.write(terminator);
  2190. for (int k = 0; k < indent; k++) {
  2191. outStream.writeBytes(Utilities.INDENT);
  2192. }
  2193. outStream.writeBytes("minFileSize:");
  2194. if (numOfFiles > 0) {
  2195. outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
  2196. } else {
  2197. outStream.writeBytes(unknown ? unknownString : "" + 0);
  2198. }
  2199. outStream.write(terminator);
  2200. for (int k = 0; k < indent; k++) {
  2201. outStream.writeBytes(Utilities.INDENT);
  2202. }
  2203. outStream.writeBytes("lastAccessTime:");
  2204. outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
  2205. + lastAccessTime);
  2206. outStream.write(terminator);
  2207. for (int k = 0; k < indent; k++) {
  2208. outStream.writeBytes(Utilities.INDENT);
  2209. }
  2210. outStream.writeBytes("lastUpdateTime:");
  2211. outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
  2212. outStream.write(terminator);
  2213. }
  2214. /**
  2215. * Alter a given table.
  2216. *
  2217. * @param db
  2218. * The database in question.
  2219. * @param alterTbl
  2220. * This is the table we're altering.
  2221. * @return Returns 0 when execution succeeds and above 0 if it fails.
  2222. * @throws HiveException
  2223. * Throws this exception if an unexpected error occurs.
  2224. */
  2225. private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
  2226. // alter the table
  2227. Table tbl = db.getTable(alterTbl.getOldName());
  2228. Partition part = null;
  2229. if(alterTbl.getPartSpec() != null) {
  2230. part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
  2231. if(part == null) {
  2232. console.printError("Partition : " + alterTbl.getPartSpec().toString()
  2233. + " does not exist.");
  2234. return 1;
  2235. }
  2236. }
  2237. validateAlterTableType(tbl, alterTbl.getOp());
  2238. if (tbl.isView()) {
  2239. if (!alterTbl.getExpectView()) {
  2240. throw new HiveException("Cannot alter a view with ALTER TABLE");
  2241. }
  2242. } else {
  2243. if (alterTbl.getExpectView()) {
  2244. throw new HiveException("Cannot alter a base table with ALTER VIEW");
  2245. }
  2246. }
  2247. Table oldTbl = tbl.copy();
  2248. if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
  2249. tbl.setTableName(alterTbl.getNewName());
  2250. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
  2251. List<FieldSchema> newCols = alterTbl.getNewCols();
  2252. List<FieldSchema> oldCols = tbl.getCols();
  2253. if (tbl.getSerializationLib().equals(
  2254. "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
  2255. console
  2256. .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
  2257. tbl.setSerializationLib(LazySimpleSerDe.class.getName());
  2258. tbl.getTTable().getSd().setCols(newCols);
  2259. } else {
  2260. // make sure the columns does not already exist
  2261. Iterator<FieldSchema> iterNewCols = newCols.iterator();
  2262. while (iterNewCols.hasNext()) {
  2263. FieldSchema newCol = iterNewCols.next();
  2264. String newColName = newCol.getName();
  2265. Iterator<FieldSchema> iterOldCols = oldCols.iterator();
  2266. while (iterOldCols.hasNext()) {
  2267. String oldColName = iterOldCols.next().getName();
  2268. if (oldColName.equalsIgnoreCase(newColName)) {
  2269. console.printError("Column '" + newColName + "' exists");
  2270. return 1;
  2271. }
  2272. }
  2273. oldCols.add(newCol);
  2274. }
  2275. tbl.getTTable().getSd().setCols(oldCols);
  2276. }
  2277. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
  2278. List<FieldSchema> oldCols = tbl.getCols();
  2279. List<FieldSchema> newCols = new ArrayList<FieldSchema>();
  2280. Iterator<FieldSchema> iterOldCols = oldCols.iterator();
  2281. String oldName = alterTbl.getOldColName();
  2282. String newName = alterTbl.getNewColName();
  2283. String type = alterTbl.getNewColType();
  2284. String comment = alterTbl.getNewColComment();
  2285. boolean first = alterTbl.getFirst();
  2286. String afterCol = alterTbl.getAfterCol();
  2287. FieldSchema column = null;
  2288. boolean found = false;
  2289. int position = -1;
  2290. if (first) {
  2291. position = 0;
  2292. }
  2293. int i = 1;
  2294. while (iterOldCols.hasNext()) {
  2295. FieldSchema col = iterOldCols.next();
  2296. String oldColName = col.getName();
  2297. if (oldColName.equalsIgnoreCase(newName)
  2298. && !oldColName.equalsIgnoreCase(oldName)) {
  2299. console.printError("Column '" + newName + "' exists");
  2300. return 1;
  2301. } else if (oldColName.equalsIgnoreCase(oldName)) {
  2302. col.setName(newName);
  2303. if (type != null && !type.trim().equals("")) {
  2304. col.setType(type);
  2305. }
  2306. if (comment != null) {
  2307. col.setComment(comment);
  2308. }
  2309. found = true;
  2310. if (first || (afterCol != null && !afterCol.trim().equals(""))) {
  2311. column = col;
  2312. continue;
  2313. }
  2314. }
  2315. if (afterCol != null && !afterCol.trim().equals("")
  2316. && oldColName.equalsIgnoreCase(afterCol)) {
  2317. position = i;
  2318. }
  2319. i++;
  2320. newCols.add(col);
  2321. }
  2322. // did not find the column
  2323. if (!found) {
  2324. console.printError("Column '" + oldName + "' does not exist");
  2325. return 1;
  2326. }
  2327. // after column is not null, but we did not find it.
  2328. if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
  2329. console.printError("Column '" + afterCol + "' does not exist");
  2330. return 1;
  2331. }
  2332. if (position >= 0) {
  2333. newCols.add(position, column);
  2334. }
  2335. tbl.getTTable().getSd().setCols(newCols);
  2336. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
  2337. // change SerDe to LazySimpleSerDe if it is columnsetSerDe
  2338. if (tbl.getSerializationLib().equals(
  2339. "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
  2340. console
  2341. .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
  2342. tbl.setSerializationLib(LazySimpleSerDe.class.getName());
  2343. } else if (!tbl.getSerializationLib().equals(
  2344. MetadataTypedColumnsetSerDe.class.getName())
  2345. && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
  2346. && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
  2347. && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
  2348. console.printError("Replace columns is not supported for this table. "
  2349. + "SerDe may be incompatible.");
  2350. return 1;
  2351. }
  2352. tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
  2353. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
  2354. tbl.getTTable().getParameters().putAll(alterTbl.getProps());
  2355. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
  2356. tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
  2357. alterTbl.getProps());
  2358. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
  2359. tbl.setSerializationLib(alterTbl.getSerdeName());
  2360. if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
  2361. tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
  2362. alterTbl.getProps());
  2363. }
  2364. tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl
  2365. .getDeserializer()));
  2366. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
  2367. if(part != null) {
  2368. part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
  2369. part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
  2370. if (alterTbl.getSerdeName() != null) {
  2371. part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
  2372. alterTbl.getSerdeName());
  2373. }
  2374. } else {
  2375. tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
  2376. tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
  2377. if (alterTbl.getSerdeName() != null) {
  2378. tbl.setSerializationLib(alterTbl.getSerdeName());
  2379. }
  2380. }
  2381. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
  2382. boolean protectModeEnable = alterTbl.isProtectModeEnable();
  2383. AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType();
  2384. ProtectMode mode = null;
  2385. if(part != null) {
  2386. mode = part.getProtectMode();
  2387. } else {
  2388. mode = tbl.getProtectMode();
  2389. }
  2390. if (protectModeEnable
  2391. && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
  2392. mode.offline = true;
  2393. } else if (protectModeEnable
  2394. && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
  2395. mode.noDrop = true;
  2396. } else if (!protectModeEnable
  2397. && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
  2398. mode.offline = false;
  2399. } else if (!protectModeEnable
  2400. && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
  2401. mode.noDrop = false;
  2402. }
  2403. if (part != null) {
  2404. part.setProtectMode(mode);
  2405. } else {
  2406. tbl.setProtectMode(mode);
  2407. }
  2408. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
  2409. // validate sort columns and bucket columns
  2410. List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
  2411. .getCols());
  2412. Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
  2413. if (alterTbl.getSortColumns() != null) {
  2414. Utilities.validateColumnNames(columns, Utilities
  2415. .getColumnNamesFromSortCols(alterTbl.getSortColumns()));
  2416. }
  2417. int numBuckets = -1;
  2418. ArrayList<String> bucketCols = null;
  2419. ArrayList<Order> sortCols = null;
  2420. // -1 buckets means to turn off bucketing
  2421. if (alterTbl.getNumberBuckets() == -1) {
  2422. bucketCols = new ArrayList<String>();
  2423. sortCols = new ArrayList<Order>();
  2424. numBuckets = -1;
  2425. } else {
  2426. bucketCols = alterTbl.getBucketColumns();
  2427. sortCols = alterTbl.getSortColumns();
  2428. numBuckets = alterTbl.getNumberBuckets();
  2429. }
  2430. tbl.getTTable().getSd().setBucketCols(bucketCols);
  2431. tbl.getTTable().getSd().setNumBuckets(numBuckets);
  2432. tbl.getTTable().getSd().setSortCols(sortCols);
  2433. } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
  2434. String newLocation = alterTbl.getNewLocation();
  2435. try {
  2436. URI locURI = new URI(newLocation);
  2437. if (!locURI.isAbsolute() || locURI.getScheme() == null
  2438. || locURI.getScheme().trim().equals("")) {
  2439. throw new HiveException(
  2440. newLocation
  2441. + " is not absolute or has no scheme information. "
  2442. + "Please specify a complete absolute uri with scheme information.");
  2443. }
  2444. if (part != null) {
  2445. part.setLocation(newLocation);
  2446. } else {
  2447. tbl.setDataLocation(locURI);
  2448. }
  2449. } catch (URISyntaxException e) {
  2450. throw new HiveException(e);
  2451. }
  2452. } else {
  2453. console.printError("Unsupported Alter commnad");
  2454. return 1;
  2455. }
  2456. if(part == null) {
  2457. if (!updateModifiedParameters(tbl.getTTable().getParameters(), conf)) {
  2458. return 1;
  2459. }
  2460. try {
  2461. tbl.checkValidity();
  2462. } catch (HiveException e) {
  2463. console.printError("Invalid table columns : " + e.getMessage(),
  2464. stringifyException(e));
  2465. return 1;
  2466. }
  2467. } else {
  2468. if (!updateModifiedParameters(part.getParameters(), conf)) {
  2469. return 1;
  2470. }
  2471. }
  2472. try {
  2473. if (part == null) {
  2474. db.alterTable(alterTbl.getOldName(), tbl);
  2475. } else {
  2476. db.alterPartition(tbl.getTableName(), part);
  2477. }
  2478. } catch (InvalidOperationException e) {
  2479. console.printError("Invalid alter operation: " + e.getMessage());
  2480. LOG.info("alter table: " + stringifyException(e));
  2481. return 1;
  2482. } catch (HiveException e) {
  2483. return 1;
  2484. }
  2485. // This is kind of hacky - the read entity contains the old table, whereas
  2486. // the write entity
  2487. // contains the new table. This is needed for rename - both the old and the
  2488. // new table names are
  2489. // passed
  2490. if(part != null) {
  2491. work.getInputs().add(new ReadEntity(part));
  2492. work.getOutputs().add(new WriteEntity(part));
  2493. } else {
  2494. work.getInputs().add(new ReadEntity(oldTbl));
  2495. work.getOutputs().add(new WriteEntity(tbl));
  2496. }
  2497. return 0;
  2498. }
  2499. /**
  2500. * Drop a given table.
  2501. *
  2502. * @param db
  2503. * The database in question.
  2504. * @param dropTbl
  2505. * This is the table we're dropping.
  2506. * @return Returns 0 when execution succeeds and above 0 if it fails.
  2507. * @throws HiveException
  2508. * Throws this exception if an unexpected error occurs.
  2509. */
  2510. private int dropTable(Hive db, DropTableDesc dropTbl) throws HiveException {
  2511. // We need to fetch the table before it is dropped so that it can be passed
  2512. // to
  2513. // post-execution hook
  2514. Table tbl = null;
  2515. try {
  2516. tbl = db.getTable(dropTbl.getTableName());
  2517. } catch (InvalidTableException e) {
  2518. // drop table is idempotent
  2519. }
  2520. if (tbl != null) {
  2521. if (tbl.isView()) {
  2522. if (!dropTbl.getExpectView()) {
  2523. throw new HiveException("Cannot drop a view with DROP TABLE");
  2524. }
  2525. } else {
  2526. if (dropTbl.getExpectView()) {
  2527. throw new HiveException("Cannot drop a base table with DROP VIEW");
  2528. }
  2529. }
  2530. }
  2531. if (dropTbl.getPartSpecs() == null) {
  2532. if (tbl != null && !tbl.canDrop()) {
  2533. throw new HiveException("Table " + tbl.getTableName() +
  2534. " is protected from being dropped");
  2535. }
  2536. // We should check that all the partitions of the table can be dropped
  2537. if (tbl != null && tbl.isPartitioned()) {
  2538. List<Partition> listPartitions = db.getPartitions(tbl);
  2539. for (Partition p: listPartitions) {
  2540. if (!p.canDrop()) {
  2541. throw new HiveException("Table " + tbl.getTableName() +
  2542. " Partition" + p.getName() +
  2543. " is protected from being dropped");
  2544. }
  2545. }
  2546. }
  2547. // drop the table
  2548. db.dropTable(dropTbl.getTableName());
  2549. if (tbl != null) {
  2550. work.getOutputs().add(new WriteEntity(tbl));
  2551. }
  2552. } else {
  2553. // get all partitions of the table
  2554. List<String> partitionNames =
  2555. db.getPartitionNames(dropTbl.getTableName(), (short) -1);
  2556. Set<Map<String, String>> partitions = new HashSet<Map<String, String>>();
  2557. for (String partitionName : partitionNames) {
  2558. try {
  2559. partitions.add(Warehouse.makeSpecFromName(partitionName));
  2560. } catch (MetaException e) {
  2561. LOG.warn("Unrecognized partition name from metastore: " + partitionName);
  2562. }
  2563. }
  2564. // drop partitions in the list
  2565. List<Partition> partsToDelete = new ArrayList<Partition>();
  2566. for (Map<String, String> partSpec : dropTbl.getPartSpecs()) {
  2567. Iterator<Map<String, String>> it = partitions.iterator();
  2568. while (it.hasNext()) {
  2569. Map<String, String> part = it.next();
  2570. // test if partSpec matches part
  2571. boolean match = true;
  2572. for (Map.Entry<String, String> item : partSpec.entrySet()) {
  2573. if (!item.getValue().equals(part.get(item.getKey()))) {
  2574. match = false;
  2575. break;
  2576. }
  2577. }
  2578. if (match) {
  2579. Partition p = db.getPartition(tbl, part, false);
  2580. if (!p.canDrop()) {
  2581. throw new HiveException("Table " + tbl.getTableName() +
  2582. " Partition " + p.getName() +
  2583. " is protected from being dropped");
  2584. }
  2585. partsToDelete.add(p);
  2586. it.remove();
  2587. }
  2588. }
  2589. }
  2590. // drop all existing partitions from the list
  2591. for (Partition partition : partsToDelete) {
  2592. console.printInfo("Dropping the partition " + partition.getName());
  2593. db.dropPartition(dropTbl.getTableName(), partition.getValues(), true);
  2594. work.getOutputs().add(new WriteEntity(partition));
  2595. }
  2596. }
  2597. return 0;
  2598. }
  2599. /**
  2600. * Update last_modified_by and last_modified_time parameters in parameter map.
  2601. *
  2602. * @param params
  2603. * Parameters.
  2604. * @param user
  2605. * user that is doing the updating.
  2606. */
  2607. private boolean updateModifiedParameters(Map<String, String> params, HiveConf conf) {
  2608. String user = null;
  2609. try {
  2610. user = conf.getUser();
  2611. } catch (IOException e) {
  2612. console.printError("Unable to get current user: " + e.getMessage(),
  2613. stringifyException(e));
  2614. return false;
  2615. }
  2616. params.put("last_modified_by", user);
  2617. params.put("last_modified_time", Long.toString(System.currentTimeMillis() / 1000));
  2618. return true;
  2619. }
  2620. /**
  2621. * Check if the given serde is valid.
  2622. */
  2623. private void validateSerDe(String serdeName) throws HiveException {
  2624. try {
  2625. Deserializer d = SerDeUtils.lookupDeserializer(serdeName);
  2626. if (d != null) {
  2627. LOG.debug("Found class for " + serdeName);
  2628. }
  2629. } catch (SerDeException e) {
  2630. throw new HiveException("Cannot validate serde: " + serdeName, e);
  2631. }
  2632. }
  2633. /**
  2634. * Create a Database
  2635. * @param db
  2636. * @param crtDb
  2637. * @return Always returns 0
  2638. * @throws HiveException
  2639. * @throws AlreadyExistsException
  2640. */
  2641. private int createDatabase(Hive db, CreateDatabaseDesc crtDb)
  2642. throws HiveException, AlreadyExistsException {
  2643. Database database = new Database();
  2644. database.setName(crtDb.getName());
  2645. database.setDescription(crtDb.getComment());
  2646. database.setLocationUri(crtDb.getLocationUri());
  2647. database.setParameters(crtDb.getDatabaseProperties());
  2648. db.createDatabase(database, crtDb.getIfNotExists());
  2649. return 0;
  2650. }
  2651. /**
  2652. * Drop a Database
  2653. * @param db
  2654. * @param dropDb
  2655. * @return Always returns 0
  2656. * @throws HiveException
  2657. * @throws NoSuchObjectException
  2658. */
  2659. private int dropDatabase(Hive db, DropDatabaseDesc dropDb)
  2660. throws HiveException, NoSuchObjectException {
  2661. db.dropDatabase(dropDb.getDatabaseName(), true, dropDb.getIfExists());
  2662. return 0;
  2663. }
  2664. /**
  2665. * Switch to a different Database
  2666. * @param db
  2667. * @param switchDb
  2668. * @return Always returns 0
  2669. * @throws HiveException
  2670. */
  2671. private int switchDatabase(Hive db, SwitchDatabaseDesc switchDb)
  2672. throws HiveException {
  2673. String dbName = switchDb.getDatabaseName();
  2674. if (!db.databaseExists(dbName)) {
  2675. throw new HiveException("ERROR: The database " + dbName + " does not exist.");
  2676. }
  2677. db.setCurrentDatabase(dbName);
  2678. // set database specific parameters
  2679. Database database = db.getDatabase(dbName);
  2680. assert(database != null);
  2681. Map<String, String> dbParams = database.getParameters();
  2682. if (dbParams != null) {
  2683. for (HiveConf.ConfVars var: HiveConf.dbVars) {
  2684. String newValue = dbParams.get(var.varname);
  2685. if (newValue != null) {
  2686. LOG.info("Changing " + var.varname +
  2687. " from " + conf.getVar(var) + " to " + newValue);
  2688. conf.setVar(var, newValue);
  2689. }
  2690. }
  2691. }
  2692. return 0;
  2693. }
  2694. /**
  2695. * Create a new table.
  2696. *
  2697. * @param db
  2698. * The database in question.
  2699. * @param crtTbl
  2700. * This is the table we're creating.
  2701. * @return Returns 0 when execution succeeds and above 0 if it fails.
  2702. * @throws HiveException
  2703. * Throws this exception if an unexpected error occurs.
  2704. */
  2705. private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
  2706. // create the table
  2707. Table tbl = db.newTable(crtTbl.getTableName());
  2708. if (crtTbl.getTblProps() != null) {
  2709. tbl.getTTable().getParameters().putAll(crtTbl.getTblProps());
  2710. }
  2711. if (crtTbl.getPartCols() != null) {
  2712. tbl.setPartCols(crtTbl.getPartCols());
  2713. }
  2714. if (crtTbl.getNumBuckets() != -1) {
  2715. tbl.setNumBuckets(crtTbl.getNumBuckets());
  2716. }
  2717. if (crtTbl.getStorageHandler() != null) {
  2718. tbl.setProperty(
  2719. org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
  2720. crtTbl.getStorageHandler());
  2721. }
  2722. HiveStorageHandler storageHandler = tbl.getStorageHandler();
  2723. /*
  2724. * We use LazySimpleSerDe by default.
  2725. *
  2726. * If the user didn't specify a SerDe, and any of the columns are not simple
  2727. * types, we will have to use DynamicSerDe instead.
  2728. */
  2729. if (crtTbl.getSerName() == null) {
  2730. if (storageHandler == null) {
  2731. LOG.info("Default to LazySimpleSerDe for table " + crtTbl.getTableName());
  2732. tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
  2733. } else {
  2734. String serDeClassName = storageHandler.getSerDeClass().getName();
  2735. LOG.info("Use StorageHandler-supplied " + serDeClassName
  2736. + " for table " + crtTbl.getTableName());
  2737. tbl.setSerializationLib(serDeClassName);
  2738. }
  2739. } else {
  2740. // let's validate that the serde exists
  2741. validateSerDe(crtTbl.getSerName());
  2742. tbl.setSerializationLib(crtTbl.getSerName());
  2743. }
  2744. if (crtTbl.getFieldDelim() != null) {
  2745. tbl.setSerdeParam(Constants.FIELD_DELIM, crtTbl.getFieldDelim());
  2746. tbl.setSerdeParam(Constants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());
  2747. }
  2748. if (crtTbl.getFieldEscape() != null) {
  2749. tbl.setSerdeParam(Constants.ESCAPE_CHAR, crtTbl.getFieldEscape());
  2750. }
  2751. if (crtTbl.getCollItemDelim() != null) {
  2752. tbl.setSerdeParam(Constants.COLLECTION_DELIM, crtTbl.getCollItemDelim());
  2753. }
  2754. if (crtTbl.getMapKeyDelim() != null) {
  2755. tbl.setSerdeParam(Constants.MAPKEY_DELIM, crtTbl.getMapKeyDelim());
  2756. }
  2757. if (crtTbl.getLineDelim() != null) {
  2758. tbl.setSerdeParam(Constants.LINE_DELIM, crtTbl.getLineDelim());
  2759. }
  2760. if (crtTbl.getSerdeProps() != null) {
  2761. Iterator<Entry<String, String>> iter = crtTbl.getSerdeProps().entrySet()
  2762. .iterator();
  2763. while (iter.hasNext()) {
  2764. Entry<String, String> m = iter.next();
  2765. tbl.setSerdeParam(m.getKey(), m.getValue());
  2766. }
  2767. }
  2768. if (crtTbl.getCols() != null) {
  2769. tbl.setFields(crtTbl.getCols());
  2770. }
  2771. if (crtTbl.getBucketCols() != null) {
  2772. tbl.setBucketCols(crtTbl.getBucketCols());
  2773. }
  2774. if (crtTbl.getSortCols() != null) {
  2775. tbl.setSortCols(crtTbl.getSortCols());
  2776. }
  2777. if (crtTbl.getComment() != null) {
  2778. tbl.setProperty("comment", crtTbl.getComment());
  2779. }
  2780. if (crtTbl.getLocation() != null) {
  2781. tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
  2782. }
  2783. tbl.setInputFormatClass(crtTbl.getInputFormat());
  2784. tbl.setOutputFormatClass(crtTbl.getOutputFormat());
  2785. tbl.getTTable().getSd().setInputFormat(
  2786. tbl.getInputFormatClass().getName());
  2787. tbl.getTTable().getSd().setOutputFormat(
  2788. tbl.getOutputFormatClass().getName());
  2789. if (crtTbl.isExternal()) {
  2790. tbl.setProperty("EXTERNAL", "TRUE");
  2791. tbl.setTableType(TableType.EXTERNAL_TABLE);
  2792. }
  2793. // If the sorted columns is a superset of bucketed columns, store this fact.
  2794. // It can be later used to
  2795. // optimize some group-by queries. Note that, the order does not matter as
  2796. // long as it in the first
  2797. // 'n' columns where 'n' is the length of the bucketed columns.
  2798. if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null)) {
  2799. List<String> bucketCols = tbl.getBucketCols();
  2800. List<Order> sortCols = tbl.getSortCols();
  2801. if ((sortCols.size() > 0) && (sortCols.size() >= bucketCols.size())) {
  2802. boolean found = true;
  2803. Iterator<String> iterBucketCols = bucketCols.iterator();
  2804. while (iterBucketCols.hasNext()) {
  2805. String bucketCol = iterBucketCols.next();
  2806. boolean colFound = false;
  2807. for (int i = 0; i < bucketCols.size(); i++) {
  2808. if (bucketCol.equals(sortCols.get(i).getCol())) {
  2809. colFound = true;
  2810. break;
  2811. }
  2812. }
  2813. if (colFound == false) {
  2814. found = false;
  2815. break;
  2816. }
  2817. }
  2818. if (found) {
  2819. tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE");
  2820. }
  2821. }
  2822. }
  2823. int rc = setGenericTableAttributes(tbl);
  2824. if (rc != 0) {
  2825. return rc;
  2826. }
  2827. // create the table
  2828. db.createTable(tbl, crtTbl.getIfNotExists());
  2829. work.getOutputs().add(new WriteEntity(tbl));
  2830. return 0;
  2831. }
  2832. /**
  2833. * Create a new table like an existing table.
  2834. *
  2835. * @param db
  2836. * The database in question.
  2837. * @param crtTbl
  2838. * This is the table we're creating.
  2839. * @return Returns 0 when execution succeeds and above 0 if it fails.
  2840. * @throws HiveException
  2841. * Throws this exception if an unexpected error occurs.
  2842. */
  2843. private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException {
  2844. // Get the existing table
  2845. Table tbl = db.getTable(crtTbl.getLikeTableName());
  2846. // find out database name and table name of target table
  2847. String targetTableName = crtTbl.getTableName();
  2848. Table newTable = db.newTable(targetTableName);
  2849. tbl.setDbName(newTable.getDbName());
  2850. tbl.setTableName(newTable.getTableName());
  2851. if (crtTbl.isExternal()) {
  2852. tbl.setProperty("EXTERNAL", "TRUE");
  2853. } else {
  2854. tbl.setProperty("EXTERNAL", "FALSE");
  2855. }
  2856. if (crtTbl.getLocation() != null) {
  2857. tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
  2858. } else {
  2859. tbl.unsetDataLocation();
  2860. }
  2861. // we should reset table specific parameters including (stats, lastDDLTime etc.)
  2862. Map<String, String> params = tbl.getParameters();
  2863. params.clear();
  2864. // create the table
  2865. db.createTable(tbl, crtTbl.getIfNotExists());
  2866. work.getOutputs().add(new WriteEntity(tbl));
  2867. return 0;
  2868. }
  2869. /**
  2870. * Create a new view.
  2871. *
  2872. * @param db
  2873. * The database in question.
  2874. * @param crtView
  2875. * This is the view we're creating.
  2876. * @return Returns 0 when execution succeeds and above 0 if it fails.
  2877. * @throws HiveException
  2878. * Throws this exception if an unexpected error occurs.
  2879. */
  2880. private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
  2881. Table tbl = db.newTable(crtView.getViewName());
  2882. tbl.setTableType(TableType.VIRTUAL_VIEW);
  2883. tbl.setSerializationLib(null);
  2884. tbl.clearSerDeInfo();
  2885. tbl.setViewOriginalText(crtView.getViewOriginalText());
  2886. tbl.setViewExpandedText(crtView.getViewExpandedText());
  2887. tbl.setFields(crtView.getSchema());
  2888. if (crtView.getComment() != null) {
  2889. tbl.setProperty("comment", crtView.getComment());
  2890. }
  2891. if (crtView.getTblProps() != null) {
  2892. tbl.getTTable().getParameters().putAll(crtView.getTblProps());
  2893. }
  2894. int rc = setGenericTableAttributes(tbl);
  2895. if (rc != 0) {
  2896. return rc;
  2897. }
  2898. db.createTable(tbl, crtView.getIfNotExists());
  2899. work.getOutputs().add(new WriteEntity(tbl));
  2900. return 0;
  2901. }
  2902. private int setGenericTableAttributes(Table tbl) {
  2903. try {
  2904. tbl.setOwner(conf.getUser());
  2905. } catch (IOException e) {
  2906. console.printError("Unable to get current user: " + e.getMessage(),
  2907. stringifyException(e));
  2908. return 1;
  2909. }
  2910. // set create time
  2911. tbl.setCreateTime((int) (System.currentTimeMillis() / 1000));
  2912. return 0;
  2913. }
  2914. @Override
  2915. public StageType getType() {
  2916. return StageType.DDL;
  2917. }
  2918. @Override
  2919. public String getName() {
  2920. return "DDL";
  2921. }
  2922. @Override
  2923. protected void localizeMRTmpFilesImpl(Context ctx) {
  2924. // no-op
  2925. }
  2926. }