PageRenderTime 30ms CodeModel.GetById 33ms RepoModel.GetById 1ms app.codeStats 0ms

/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java

http://github.com/apache/hive
Java | 1103 lines | 956 code | 91 blank | 56 comment | 69 complexity | 82e4fbe6ed24f3d58a8998f1f412abb3 MD5 | raw file
Possible License(s): Apache-2.0
  1. /*
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing,
  13. * software distributed under the License is distributed on an
  14. * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
  15. * KIND, either express or implied. See the License for the
  16. * specific language governing permissions and limitations
  17. * under the License.
  18. */
  19. package org.apache.hive.hcatalog.api;
  20. import java.io.IOException;
  21. import java.util.ArrayList;
  22. import java.util.Arrays;
  23. import java.util.Collections;
  24. import java.util.Iterator;
  25. import java.util.List;
  26. import java.util.Map;
  27. import javax.annotation.Nullable;
  28. import org.apache.commons.lang3.tuple.Pair;
  29. import org.apache.commons.lang3.StringUtils;
  30. import org.apache.hadoop.conf.Configuration;
  31. import org.apache.hadoop.hive.common.classification.InterfaceAudience;
  32. import org.apache.hadoop.hive.common.classification.InterfaceStability;
  33. import org.apache.hadoop.hive.conf.HiveConf;
  34. import org.apache.hadoop.hive.metastore.IMetaStoreClient;
  35. import org.apache.hadoop.hive.metastore.TableType;
  36. import org.apache.hadoop.hive.metastore.Warehouse;
  37. import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
  38. import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
  39. import org.apache.hadoop.hive.metastore.api.Database;
  40. import org.apache.hadoop.hive.metastore.api.FieldSchema;
  41. import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
  42. import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
  43. import org.apache.hadoop.hive.metastore.api.MetaException;
  44. import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
  45. import org.apache.hadoop.hive.metastore.api.NotificationEvent;
  46. import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
  47. import org.apache.hadoop.hive.metastore.api.Partition;
  48. import org.apache.hadoop.hive.metastore.api.PartitionEventType;
  49. import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
  50. import org.apache.hadoop.hive.metastore.api.Table;
  51. import org.apache.hadoop.hive.metastore.api.UnknownDBException;
  52. import org.apache.hadoop.hive.metastore.api.UnknownTableException;
  53. import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
  54. import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
  55. import org.apache.hadoop.hive.ql.parse.SemanticException;
  56. import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
  57. import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
  58. import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
  59. import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
  60. import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
  61. import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
  62. import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
  63. import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
  64. import org.apache.hive.hcatalog.api.repl.HCatReplicationTaskIterator;
  65. import org.apache.hive.hcatalog.api.repl.ReplicationTask;
  66. import org.apache.hive.hcatalog.common.HCatConstants;
  67. import org.apache.hive.hcatalog.common.HCatException;
  68. import org.apache.hive.hcatalog.common.HCatUtil;
  69. import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
  70. import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils;
  71. import org.apache.thrift.TException;
  72. import org.slf4j.Logger;
  73. import org.slf4j.LoggerFactory;
  74. import com.google.common.base.Function;
  75. import com.google.common.collect.Lists;
  76. import com.google.common.collect.Maps;
  77. /**
  78. * The HCatClientHMSImpl is the Hive Metastore client based implementation of
  79. * HCatClient.
  80. */
  81. public class HCatClientHMSImpl extends HCatClient {
  82. private static final Logger LOG = LoggerFactory.getLogger(HCatClientHMSImpl.class);
  83. private IMetaStoreClient hmsClient;
  84. private Configuration config;
  85. private HiveConf hiveConfig;
  86. @Override
  87. public List<String> listDatabaseNamesByPattern(String pattern)
  88. throws HCatException {
  89. List<String> dbNames = null;
  90. try {
  91. dbNames = hmsClient.getDatabases(pattern);
  92. } catch (MetaException exp) {
  93. throw new HCatException("MetaException while listing db names. " + exp.getMessage(), exp);
  94. } catch (TException e) {
  95. throw new HCatException("Transport Exception while listing db names. " + e.getMessage(), e);
  96. }
  97. return dbNames;
  98. }
  99. @Override
  100. public HCatDatabase getDatabase(String dbName) throws HCatException {
  101. HCatDatabase db = null;
  102. try {
  103. Database hiveDB = hmsClient.getDatabase(checkDB(dbName));
  104. if (hiveDB != null) {
  105. db = new HCatDatabase(hiveDB);
  106. }
  107. } catch (NoSuchObjectException exp) {
  108. throw new ObjectNotFoundException(
  109. "NoSuchObjectException while fetching database", exp);
  110. } catch (MetaException exp) {
  111. throw new HCatException("MetaException while fetching database",
  112. exp);
  113. } catch (TException exp) {
  114. throw new ConnectionFailureException(
  115. "TException while fetching database", exp);
  116. }
  117. return db;
  118. }
  119. @Override
  120. public void createDatabase(HCatCreateDBDesc dbInfo) throws HCatException {
  121. try {
  122. hmsClient.createDatabase(dbInfo.toHiveDb());
  123. } catch (AlreadyExistsException exp) {
  124. if (!dbInfo.getIfNotExists()) {
  125. throw new HCatException(
  126. "AlreadyExistsException while creating database", exp);
  127. }
  128. } catch (InvalidObjectException exp) {
  129. throw new HCatException(
  130. "InvalidObjectException while creating database", exp);
  131. } catch (MetaException exp) {
  132. throw new HCatException("MetaException while creating database",
  133. exp);
  134. } catch (TException exp) {
  135. throw new ConnectionFailureException(
  136. "TException while creating database", exp);
  137. }
  138. }
  139. @Override
  140. public void dropDatabase(String dbName, boolean ifExists, DropDBMode mode)
  141. throws HCatException {
  142. boolean isCascade = mode.toString().equalsIgnoreCase("cascade");
  143. try {
  144. hmsClient.dropDatabase(checkDB(dbName), true, ifExists, isCascade);
  145. } catch (NoSuchObjectException e) {
  146. if (!ifExists) {
  147. throw new ObjectNotFoundException(
  148. "NoSuchObjectException while dropping db.", e);
  149. }
  150. } catch (InvalidOperationException e) {
  151. throw new HCatException(
  152. "InvalidOperationException while dropping db.", e);
  153. } catch (MetaException e) {
  154. throw new HCatException("MetaException while dropping db.", e);
  155. } catch (TException e) {
  156. throw new ConnectionFailureException("TException while dropping db.",
  157. e);
  158. }
  159. }
  160. @Override
  161. public List<String> listTableNamesByPattern(String dbName,
  162. String tablePattern) throws HCatException {
  163. List<String> tableNames = null;
  164. try {
  165. tableNames = hmsClient.getTables(checkDB(dbName), tablePattern);
  166. } catch (MetaException e) {
  167. throw new HCatException("MetaException while fetching table names. " + e.getMessage(), e);
  168. } catch (UnknownDBException e) {
  169. throw new HCatException("UnknownDB " + dbName + " while fetching table names.", e);
  170. } catch (TException e) {
  171. throw new HCatException("Transport exception while fetching table names. "
  172. + e.getMessage(), e);
  173. }
  174. return tableNames;
  175. }
  176. @Override
  177. public HCatTable getTable(String dbName, String tableName)
  178. throws HCatException {
  179. HCatTable table = null;
  180. try {
  181. Table hiveTable = hmsClient.getTable(checkDB(dbName), tableName);
  182. if (hiveTable != null) {
  183. table = new HCatTable(hiveTable);
  184. }
  185. } catch (MetaException e) {
  186. throw new HCatException("MetaException while fetching table.", e);
  187. } catch (NoSuchObjectException e) {
  188. throw new ObjectNotFoundException(
  189. "NoSuchObjectException while fetching table.", e);
  190. } catch (TException e) {
  191. throw new ConnectionFailureException(
  192. "TException while fetching table.", e);
  193. }
  194. return table;
  195. }
  196. @Override
  197. public void createTable(HCatCreateTableDesc createTableDesc)
  198. throws HCatException {
  199. try {
  200. hmsClient.createTable(createTableDesc.getHCatTable().toHiveTable());
  201. } catch (AlreadyExistsException e) {
  202. if (!createTableDesc.getIfNotExists()) {
  203. throw new HCatException(
  204. "AlreadyExistsException while creating table.", e);
  205. }
  206. } catch (InvalidObjectException e) {
  207. throw new HCatException(
  208. "InvalidObjectException while creating table.", e);
  209. } catch (MetaException e) {
  210. throw new HCatException("MetaException while creating table.", e);
  211. } catch (NoSuchObjectException e) {
  212. throw new ObjectNotFoundException(
  213. "NoSuchObjectException while creating table.", e);
  214. } catch (TException e) {
  215. throw new ConnectionFailureException(
  216. "TException while creating table.", e);
  217. } catch (IOException e) {
  218. throw new HCatException("IOException while creating hive conf.", e);
  219. }
  220. }
  221. @Override
  222. public void updateTableSchema(String dbName, String tableName, List<HCatFieldSchema> columnSchema)
  223. throws HCatException {
  224. try {
  225. Table table = hmsClient.getTable(dbName, tableName);
  226. table.getSd().setCols(HCatSchemaUtils.getFieldSchemas(columnSchema));
  227. hmsClient.alter_table(dbName, tableName, table);
  228. }
  229. catch (InvalidOperationException e) {
  230. throw new HCatException("InvalidOperationException while updating table schema.", e);
  231. }
  232. catch (MetaException e) {
  233. throw new HCatException("MetaException while updating table schema.", e);
  234. }
  235. catch (NoSuchObjectException e) {
  236. throw new ObjectNotFoundException(
  237. "NoSuchObjectException while updating table schema.", e);
  238. }
  239. catch (TException e) {
  240. throw new ConnectionFailureException(
  241. "TException while updating table schema.", e);
  242. }
  243. }
  244. @Override
  245. public void updateTableSchema(String dbName, String tableName, HCatTable newTableDefinition) throws HCatException {
  246. try {
  247. hmsClient.alter_table(dbName, tableName, newTableDefinition.toHiveTable());
  248. }
  249. catch (InvalidOperationException e) {
  250. throw new HCatException("InvalidOperationException while updating table schema.", e);
  251. }
  252. catch (MetaException e) {
  253. throw new HCatException("MetaException while updating table schema.", e);
  254. }
  255. catch (NoSuchObjectException e) {
  256. throw new ObjectNotFoundException(
  257. "NoSuchObjectException while updating table schema.", e);
  258. }
  259. catch (TException e) {
  260. throw new ConnectionFailureException(
  261. "TException while updating table schema.", e);
  262. }
  263. }
  264. @Override
  265. public void createTableLike(String dbName, String existingTblName,
  266. String newTableName, boolean ifNotExists, boolean isExternal,
  267. String location) throws HCatException {
  268. Table hiveTable = getHiveTableLike(checkDB(dbName), existingTblName,
  269. newTableName, ifNotExists, location);
  270. if (hiveTable != null) {
  271. try {
  272. hmsClient.createTable(hiveTable);
  273. } catch (AlreadyExistsException e) {
  274. if (!ifNotExists) {
  275. throw new HCatException(
  276. "A table already exists with the name "
  277. + newTableName, e);
  278. }
  279. } catch (InvalidObjectException e) {
  280. throw new HCatException(
  281. "InvalidObjectException in create table like command.",
  282. e);
  283. } catch (MetaException e) {
  284. throw new HCatException(
  285. "MetaException in create table like command.", e);
  286. } catch (NoSuchObjectException e) {
  287. throw new ObjectNotFoundException(
  288. "NoSuchObjectException in create table like command.",
  289. e);
  290. } catch (TException e) {
  291. throw new ConnectionFailureException(
  292. "TException in create table like command.", e);
  293. }
  294. }
  295. }
  296. @Override
  297. public void dropTable(String dbName, String tableName, boolean ifExists)
  298. throws HCatException {
  299. try {
  300. hmsClient.dropTable(checkDB(dbName), tableName, true, ifExists);
  301. } catch (NoSuchObjectException e) {
  302. if (!ifExists) {
  303. throw new ObjectNotFoundException(
  304. "NoSuchObjectException while dropping table.", e);
  305. }
  306. } catch (MetaException e) {
  307. throw new HCatException("MetaException while dropping table.", e);
  308. } catch (TException e) {
  309. throw new ConnectionFailureException(
  310. "TException while dropping table.", e);
  311. }
  312. }
  313. @Override
  314. public void renameTable(String dbName, String oldName, String newName)
  315. throws HCatException {
  316. Table tbl;
  317. try {
  318. Table oldtbl = hmsClient.getTable(checkDB(dbName), oldName);
  319. if (oldtbl != null) {
  320. // TODO : Should be moved out.
  321. if (oldtbl
  322. .getParameters()
  323. .get(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) != null) {
  324. throw new HCatException(
  325. "Cannot use rename command on a non-native table");
  326. }
  327. tbl = new Table(oldtbl);
  328. tbl.setTableName(newName);
  329. hmsClient.alter_table(checkDB(dbName), oldName, tbl);
  330. }
  331. } catch (MetaException e) {
  332. throw new HCatException("MetaException while renaming table", e);
  333. } catch (NoSuchObjectException e) {
  334. throw new ObjectNotFoundException(
  335. "NoSuchObjectException while renaming table", e);
  336. } catch (InvalidOperationException e) {
  337. throw new HCatException(
  338. "InvalidOperationException while renaming table", e);
  339. } catch (TException e) {
  340. throw new ConnectionFailureException(
  341. "TException while renaming table", e);
  342. }
  343. }
  344. @Override
  345. public List<HCatPartition> getPartitions(String dbName, String tblName)
  346. throws HCatException {
  347. List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
  348. try {
  349. HCatTable hcatTable = getTable(dbName, tblName);
  350. List<Partition> hivePtns = hmsClient.listPartitions(
  351. checkDB(dbName), tblName, (short) -1);
  352. for (Partition ptn : hivePtns) {
  353. hcatPtns.add(new HCatPartition(hcatTable, ptn));
  354. }
  355. } catch (NoSuchObjectException e) {
  356. throw new ObjectNotFoundException(
  357. "NoSuchObjectException while retrieving partition.", e);
  358. } catch (MetaException e) {
  359. throw new HCatException(
  360. "MetaException while retrieving partition.", e);
  361. } catch (TException e) {
  362. throw new ConnectionFailureException(
  363. "TException while retrieving partition.", e);
  364. }
  365. return hcatPtns;
  366. }
  367. @Override
  368. public List<HCatPartition> getPartitions(String dbName, String tblName, Map<String, String> partitionSpec) throws HCatException {
  369. return listPartitionsByFilter(dbName, tblName, getFilterString(partitionSpec));
  370. }
  371. @Override
  372. @InterfaceAudience.LimitedPrivate({"Hive"})
  373. @InterfaceStability.Evolving
  374. public HCatPartitionSpec getPartitionSpecs(String dbName, String tableName, int maxPartitions) throws HCatException {
  375. try {
  376. return new HCatPartitionSpec(getTable(dbName, tableName),
  377. hmsClient.listPartitionSpecs(dbName, tableName, maxPartitions));
  378. }
  379. catch (NoSuchObjectException e) {
  380. throw new ObjectNotFoundException(
  381. "NoSuchObjectException while retrieving partition.", e);
  382. } catch (MetaException e) {
  383. throw new HCatException(
  384. "MetaException while retrieving partition.", e);
  385. } catch (TException e) {
  386. throw new ConnectionFailureException(
  387. "TException while retrieving partition.", e);
  388. }
  389. }
  390. @Override
  391. public HCatPartitionSpec getPartitionSpecs(String dbName, String tableName, Map<String, String> partitionSelector, int maxPartitions) throws HCatException {
  392. return listPartitionSpecsByFilter(dbName, tableName, getFilterString(partitionSelector), maxPartitions);
  393. }
  394. private static String getFilterString(Map<String, String> partitionSpec) {
  395. final String AND = " AND ";
  396. StringBuilder filter = new StringBuilder();
  397. for (Map.Entry<String, String> entry : partitionSpec.entrySet()) {
  398. filter.append(entry.getKey()).append("=").append("\"").append(entry.getValue()).append("\"").append(AND);
  399. }
  400. int length = filter.toString().length();
  401. if (length > 0)
  402. filter.delete(length - AND.length(), length);
  403. return filter.toString();
  404. }
  405. @Override
  406. public HCatPartition getPartition(String dbName, String tableName,
  407. Map<String, String> partitionSpec) throws HCatException {
  408. HCatPartition partition = null;
  409. try {
  410. HCatTable hcatTable = getTable(dbName, tableName);
  411. List<HCatFieldSchema> partitionColumns = hcatTable.getPartCols();
  412. if (partitionColumns.size() != partitionSpec.size()) {
  413. throw new HCatException("Partition-spec doesn't have the right number of partition keys.");
  414. }
  415. ArrayList<String> ptnValues = new ArrayList<String>();
  416. for (HCatFieldSchema partitionColumn : partitionColumns) {
  417. String partKey = partitionColumn.getName();
  418. if (partitionSpec.containsKey(partKey)) {
  419. ptnValues.add(partitionSpec.get(partKey)); // Partition-keys added in order.
  420. }
  421. else {
  422. throw new HCatException("Invalid partition-key specified: " + partKey);
  423. }
  424. }
  425. Partition hivePartition = hmsClient.getPartition(checkDB(dbName),
  426. tableName, ptnValues);
  427. if (hivePartition != null) {
  428. partition = new HCatPartition(hcatTable, hivePartition);
  429. }
  430. } catch (MetaException e) {
  431. throw new HCatException(
  432. "MetaException while retrieving partition.", e);
  433. } catch (NoSuchObjectException e) {
  434. throw new ObjectNotFoundException(
  435. "NoSuchObjectException while retrieving partition.", e);
  436. } catch (TException e) {
  437. throw new ConnectionFailureException(
  438. "TException while retrieving partition.", e);
  439. }
  440. return partition;
  441. }
  442. @Override
  443. public void addPartition(HCatAddPartitionDesc partInfo)
  444. throws HCatException {
  445. Table tbl = null;
  446. try {
  447. tbl = hmsClient.getTable(partInfo.getDatabaseName(),
  448. partInfo.getTableName());
  449. // TODO: Should be moved out.
  450. if (tbl.getPartitionKeysSize() == 0) {
  451. throw new HCatException("The table " + partInfo.getTableName()
  452. + " is not partitioned.");
  453. }
  454. HCatTable hcatTable = new HCatTable(tbl);
  455. HCatPartition hcatPartition = partInfo.getHCatPartition();
  456. // TODO: Remove in Hive 0.16.
  457. // This is only required to support the deprecated methods in HCatAddPartitionDesc.Builder.
  458. if (hcatPartition == null) {
  459. hcatPartition = partInfo.getHCatPartition(hcatTable);
  460. }
  461. hmsClient.add_partition(hcatPartition.toHivePartition());
  462. } catch (InvalidObjectException e) {
  463. throw new HCatException(
  464. "InvalidObjectException while adding partition.", e);
  465. } catch (AlreadyExistsException e) {
  466. throw new HCatException(
  467. "AlreadyExistsException while adding partition.", e);
  468. } catch (MetaException e) {
  469. throw new HCatException("MetaException while adding partition.", e);
  470. } catch (NoSuchObjectException e) {
  471. throw new ObjectNotFoundException("The table " + partInfo.getTableName()
  472. + " is could not be found.", e);
  473. } catch (TException e) {
  474. throw new ConnectionFailureException(
  475. "TException while adding partition.", e);
  476. }
  477. }
  478. /**
  479. * Helper class to help build ExprDesc tree to represent the partitions to be dropped.
  480. * Note: At present, the ExpressionBuilder only constructs partition predicates where
  481. * partition-keys equal specific values, and logical-AND expressions. E.g.
  482. * ( dt = '20150310' AND region = 'US' )
  483. * This only supports the partition-specs specified by the Map argument of:
  484. * {@link org.apache.hive.hcatalog.api.HCatClient#dropPartitions(String, String, Map, boolean)}
  485. */
  486. private static class ExpressionBuilder {
  487. private Map<String, PrimitiveTypeInfo> partColumnTypesMap = Maps.newHashMap();
  488. private Map<String, String> partSpecs;
  489. public ExpressionBuilder(Table table, Map<String, String> partSpecs) {
  490. this.partSpecs = partSpecs;
  491. for (FieldSchema partField : table.getPartitionKeys()) {
  492. partColumnTypesMap.put(partField.getName().toLowerCase(),
  493. TypeInfoFactory.getPrimitiveTypeInfo(partField.getType()));
  494. }
  495. }
  496. private PrimitiveTypeInfo getTypeFor(String partColumn) {
  497. return partColumnTypesMap.get(partColumn.toLowerCase());
  498. }
  499. private Object getTypeAppropriateValueFor(PrimitiveTypeInfo type, String value) {
  500. ObjectInspectorConverters.Converter converter = ObjectInspectorConverters.getConverter(
  501. TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(TypeInfoFactory.stringTypeInfo),
  502. TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(type));
  503. return converter.convert(value);
  504. }
  505. public ExprNodeGenericFuncDesc equalityPredicate(String partColumn, String value) throws SemanticException {
  506. PrimitiveTypeInfo partColumnType = getTypeFor(partColumn);
  507. ExprNodeColumnDesc partColumnExpr = new ExprNodeColumnDesc(partColumnType, partColumn, null, true);
  508. ExprNodeConstantDesc valueExpr = new ExprNodeConstantDesc(partColumnType,
  509. getTypeAppropriateValueFor(partColumnType, value));
  510. return binaryPredicate("=", partColumnExpr, valueExpr);
  511. }
  512. public ExprNodeGenericFuncDesc binaryPredicate(String function, ExprNodeDesc lhs, ExprNodeDesc rhs) throws SemanticException {
  513. return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
  514. FunctionRegistry.getFunctionInfo(function).getGenericUDF(),
  515. Lists.newArrayList(lhs, rhs));
  516. }
  517. public ExprNodeGenericFuncDesc build() throws SemanticException {
  518. ExprNodeGenericFuncDesc resultExpr = null;
  519. for (Map.Entry<String,String> partSpec : partSpecs.entrySet()) {
  520. String column = partSpec.getKey();
  521. String value = partSpec.getValue();
  522. ExprNodeGenericFuncDesc partExpr = equalityPredicate(column, value);
  523. resultExpr = (resultExpr == null? partExpr : binaryPredicate("and", resultExpr, partExpr));
  524. }
  525. return resultExpr;
  526. }
  527. } // class ExpressionBuilder;
  528. private static boolean isExternal(Table table) {
  529. return table.getParameters() != null
  530. && "TRUE".equalsIgnoreCase(table.getParameters().get("EXTERNAL"));
  531. }
  532. private void dropPartitionsUsingExpressions(Table table, Map<String, String> partitionSpec,
  533. boolean ifExists, boolean deleteData)
  534. throws SemanticException, TException {
  535. LOG.info("HCatClient: Dropping partitions using partition-predicate Expressions.");
  536. ExprNodeGenericFuncDesc partitionExpression = new ExpressionBuilder(table, partitionSpec).build();
  537. Pair<Integer, byte[]> serializedPartitionExpression = Pair.of(partitionSpec.size(),
  538. SerializationUtilities.serializeExpressionToKryo(partitionExpression));
  539. hmsClient.dropPartitions(table.getDbName(), table.getTableName(), Arrays.asList(serializedPartitionExpression),
  540. deleteData && !isExternal(table), // Delete data?
  541. ifExists, // Fail if table doesn't exist?
  542. false); // Need results back?
  543. }
  544. private void dropPartitionsIteratively(String dbName, String tableName,
  545. Map<String, String> partitionSpec, boolean ifExists, boolean deleteData)
  546. throws HCatException, TException {
  547. LOG.info("HCatClient: Dropping partitions iteratively.");
  548. List<Partition> partitions = hmsClient.listPartitionsByFilter(dbName, tableName,
  549. getFilterString(partitionSpec), (short) -1);
  550. for (Partition partition : partitions) {
  551. dropPartition(partition, ifExists, deleteData);
  552. }
  553. }
  554. @Override
  555. public void dropPartitions(String dbName, String tableName,
  556. Map<String, String> partitionSpec, boolean ifExists, boolean deleteData)
  557. throws HCatException {
  558. LOG.info("HCatClient dropPartitions(db=" + dbName + ",table=" + tableName + ", partitionSpec: ["+ partitionSpec + "]).");
  559. try {
  560. dbName = checkDB(dbName);
  561. Table table = hmsClient.getTable(dbName, tableName);
  562. if (hiveConfig.getBoolVar(HiveConf.ConfVars.METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS)) {
  563. try {
  564. dropPartitionsUsingExpressions(table, partitionSpec, ifExists, deleteData);
  565. }
  566. catch (SemanticException parseFailure) {
  567. LOG.warn("Could not push down partition-specification to back-end, for dropPartitions(). Resorting to iteration.",
  568. parseFailure);
  569. dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
  570. }
  571. }
  572. else {
  573. // Not using expressions.
  574. dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
  575. }
  576. } catch (NoSuchObjectException e) {
  577. throw new ObjectNotFoundException(
  578. "NoSuchObjectException while dropping partition. " +
  579. "Either db(" + dbName + ") or table(" + tableName + ") missing.", e);
  580. } catch (MetaException e) {
  581. throw new HCatException("MetaException while dropping partition.",
  582. e);
  583. } catch (TException e) {
  584. throw new ConnectionFailureException(
  585. "TException while dropping partition.", e);
  586. }
  587. }
  588. @Override
  589. public void dropPartitions(String dbName, String tableName,
  590. Map<String, String> partitionSpec, boolean ifExists) throws HCatException {
  591. dropPartitions(dbName, tableName, partitionSpec, ifExists, true);
  592. }
  593. private void dropPartition(Partition partition, boolean ifExists, boolean deleteData)
  594. throws HCatException, MetaException, TException {
  595. try {
  596. hmsClient.dropPartition(partition.getDbName(), partition.getTableName(), partition.getValues(), deleteData);
  597. } catch (NoSuchObjectException e) {
  598. if (!ifExists) {
  599. throw new ObjectNotFoundException(
  600. "NoSuchObjectException while dropping partition: " + partition.getValues(), e);
  601. }
  602. }
  603. }
  604. @Override
  605. public List<HCatPartition> listPartitionsByFilter(String dbName,
  606. String tblName, String filter) throws HCatException {
  607. List<HCatPartition> hcatPtns = new ArrayList<HCatPartition>();
  608. try {
  609. HCatTable table = getTable(dbName, tblName);
  610. List<Partition> hivePtns = hmsClient.listPartitionsByFilter(
  611. table.getDbName(), table.getTableName(), filter, (short) -1);
  612. for (Partition ptn : hivePtns) {
  613. hcatPtns.add(new HCatPartition(table, ptn));
  614. }
  615. } catch (MetaException e) {
  616. throw new HCatException("MetaException while fetching partitions.",
  617. e);
  618. } catch (NoSuchObjectException e) {
  619. throw new ObjectNotFoundException(
  620. "NoSuchObjectException while fetching partitions.", e);
  621. } catch (TException e) {
  622. throw new ConnectionFailureException(
  623. "TException while fetching partitions.", e);
  624. }
  625. return hcatPtns;
  626. }
  627. @Override
  628. @InterfaceAudience.LimitedPrivate({"Hive"})
  629. @InterfaceStability.Evolving
  630. public HCatPartitionSpec listPartitionSpecsByFilter(String dbName, String tblName, String filter, int maxPartitions)
  631. throws HCatException {
  632. try {
  633. return new HCatPartitionSpec(getTable(dbName, tblName),
  634. hmsClient.listPartitionSpecsByFilter(dbName, tblName, filter, maxPartitions));
  635. }
  636. catch(MetaException e) {
  637. throw new HCatException("MetaException while fetching partitions.", e);
  638. }
  639. catch (NoSuchObjectException e) {
  640. throw new ObjectNotFoundException(
  641. "NoSuchObjectException while fetching partitions.", e);
  642. }
  643. catch (TException e) {
  644. throw new ConnectionFailureException(
  645. "TException while fetching partitions.", e);
  646. }
  647. }
  648. @Override
  649. public void markPartitionForEvent(String dbName, String tblName,
  650. Map<String, String> partKVs, PartitionEventType eventType)
  651. throws HCatException {
  652. try {
  653. hmsClient.markPartitionForEvent(checkDB(dbName), tblName, partKVs,
  654. eventType);
  655. } catch (MetaException e) {
  656. throw new HCatException(
  657. "MetaException while marking partition for event.", e);
  658. } catch (NoSuchObjectException e) {
  659. throw new ObjectNotFoundException(
  660. "NoSuchObjectException while marking partition for event.",
  661. e);
  662. } catch (UnknownTableException e) {
  663. throw new HCatException(
  664. "UnknownTableException while marking partition for event.",
  665. e);
  666. } catch (UnknownDBException e) {
  667. throw new HCatException(
  668. "UnknownDBException while marking partition for event.", e);
  669. } catch (TException e) {
  670. throw new ConnectionFailureException(
  671. "TException while marking partition for event.", e);
  672. }
  673. }
  674. @Override
  675. public boolean isPartitionMarkedForEvent(String dbName, String tblName,
  676. Map<String, String> partKVs, PartitionEventType eventType)
  677. throws HCatException {
  678. boolean isMarked = false;
  679. try {
  680. isMarked = hmsClient.isPartitionMarkedForEvent(checkDB(dbName),
  681. tblName, partKVs, eventType);
  682. } catch (MetaException e) {
  683. throw new HCatException(
  684. "MetaException while checking partition for event.", e);
  685. } catch (NoSuchObjectException e) {
  686. throw new ObjectNotFoundException(
  687. "NoSuchObjectException while checking partition for event.",
  688. e);
  689. } catch (UnknownTableException e) {
  690. throw new HCatException(
  691. "UnknownTableException while checking partition for event.",
  692. e);
  693. } catch (UnknownDBException e) {
  694. throw new HCatException(
  695. "UnknownDBException while checking partition for event.", e);
  696. } catch (TException e) {
  697. throw new ConnectionFailureException(
  698. "TException while checking partition for event.", e);
  699. }
  700. return isMarked;
  701. }
  702. @Override
  703. public String getDelegationToken(String owner,
  704. String renewerKerberosPrincipalName) throws HCatException {
  705. String token = null;
  706. try {
  707. token = hmsClient.getDelegationToken(owner,
  708. renewerKerberosPrincipalName);
  709. } catch (MetaException e) {
  710. throw new HCatException(
  711. "MetaException while getting delegation token.", e);
  712. } catch (TException e) {
  713. throw new ConnectionFailureException(
  714. "TException while getting delegation token.", e);
  715. }
  716. return token;
  717. }
  718. @Override
  719. public long renewDelegationToken(String tokenStrForm) throws HCatException {
  720. long time = 0;
  721. try {
  722. time = hmsClient.renewDelegationToken(tokenStrForm);
  723. } catch (MetaException e) {
  724. throw new HCatException(
  725. "MetaException while renewing delegation token.", e);
  726. } catch (TException e) {
  727. throw new ConnectionFailureException(
  728. "TException while renewing delegation token.", e);
  729. }
  730. return time;
  731. }
  732. @Override
  733. public void cancelDelegationToken(String tokenStrForm)
  734. throws HCatException {
  735. try {
  736. hmsClient.cancelDelegationToken(tokenStrForm);
  737. } catch (MetaException e) {
  738. throw new HCatException(
  739. "MetaException while canceling delegation token.", e);
  740. } catch (TException e) {
  741. throw new ConnectionFailureException(
  742. "TException while canceling delegation token.", e);
  743. }
  744. }
  745. /*
  746. * @param conf /* @throws HCatException,ConnectionFailureException
  747. *
  748. * @see
  749. * org.apache.hive.hcatalog.api.HCatClient#initialize(org.apache.hadoop.conf.
  750. * Configuration)
  751. */
  752. @Override
  753. void initialize(Configuration conf) throws HCatException {
  754. this.config = conf;
  755. try {
  756. hiveConfig = HCatUtil.getHiveConf(config);
  757. hmsClient = HCatUtil.getHiveMetastoreClient(hiveConfig);
  758. } catch (MetaException exp) {
  759. throw new HCatException("MetaException while creating HMS client",
  760. exp);
  761. } catch (IOException exp) {
  762. throw new HCatException("IOException while creating HMS client",
  763. exp);
  764. }
  765. }
  766. @Override
  767. public String getConfVal(String key, String defaultVal) {
  768. return hiveConfig.get(key,defaultVal);
  769. }
  770. private Table getHiveTableLike(String dbName, String existingTblName,
  771. String newTableName, boolean isExternal, String location)
  772. throws HCatException {
  773. Table oldtbl = null;
  774. Table newTable = null;
  775. try {
  776. oldtbl = hmsClient.getTable(checkDB(dbName), existingTblName);
  777. } catch (MetaException e1) {
  778. throw new HCatException(
  779. "MetaException while retrieving existing table.", e1);
  780. } catch (NoSuchObjectException e1) {
  781. throw new ObjectNotFoundException(
  782. "NoSuchObjectException while retrieving existing table.",
  783. e1);
  784. } catch (TException e1) {
  785. throw new ConnectionFailureException(
  786. "TException while retrieving existing table.", e1);
  787. }
  788. if (oldtbl != null) {
  789. newTable = new Table();
  790. newTable.setTableName(newTableName);
  791. newTable.setDbName(dbName);
  792. StorageDescriptor sd = new StorageDescriptor(oldtbl.getSd());
  793. newTable.setSd(sd);
  794. newTable.setParameters(oldtbl.getParameters());
  795. if (location == null) {
  796. newTable.getSd().setLocation(oldtbl.getSd().getLocation());
  797. } else {
  798. newTable.getSd().setLocation(location);
  799. }
  800. if (isExternal) {
  801. newTable.putToParameters("EXTERNAL", "TRUE");
  802. newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
  803. } else {
  804. newTable.getParameters().remove("EXTERNAL");
  805. }
  806. // set create time
  807. newTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
  808. newTable.setLastAccessTimeIsSet(false);
  809. }
  810. return newTable;
  811. }
  812. /*
  813. * @throws HCatException
  814. *
  815. * @see org.apache.hive.hcatalog.api.HCatClient#closeClient()
  816. */
  817. @Override
  818. public void close() throws HCatException {
  819. hmsClient.close();
  820. }
  821. private String checkDB(String name) {
  822. if (StringUtils.isEmpty(name)) {
  823. return Warehouse.DEFAULT_DATABASE_NAME;
  824. } else {
  825. return name;
  826. }
  827. }
  828. /*
  829. * @param partInfoList
  830. * @return The size of the list of partitions.
  831. * @throws HCatException,ConnectionFailureException
  832. * @see org.apache.hive.hcatalog.api.HCatClient#addPartitions(java.util.List)
  833. */
  834. @Override
  835. public int addPartitions(List<HCatAddPartitionDesc> partInfoList)
  836. throws HCatException {
  837. int numPartitions = -1;
  838. if ((partInfoList == null) || (partInfoList.size() == 0)) {
  839. throw new HCatException("The partition list is null or empty.");
  840. }
  841. Table tbl = null;
  842. try {
  843. tbl = hmsClient.getTable(partInfoList.get(0).getDatabaseName(),
  844. partInfoList.get(0).getTableName());
  845. HCatTable hcatTable = new HCatTable(tbl);
  846. ArrayList<Partition> ptnList = new ArrayList<Partition>();
  847. for (HCatAddPartitionDesc desc : partInfoList) {
  848. HCatPartition hCatPartition = desc.getHCatPartition();
  849. // TODO: Remove in Hive 0.16.
  850. // This is required only to support the deprecated HCatAddPartitionDesc.Builder interfaces.
  851. if (hCatPartition == null) {
  852. hCatPartition = desc.getHCatPartition(hcatTable);
  853. }
  854. ptnList.add(hCatPartition.toHivePartition());
  855. }
  856. numPartitions = hmsClient.add_partitions(ptnList);
  857. } catch (InvalidObjectException e) {
  858. throw new HCatException(
  859. "InvalidObjectException while adding partition.", e);
  860. } catch (AlreadyExistsException e) {
  861. throw new HCatException(
  862. "AlreadyExistsException while adding partition.", e);
  863. } catch (MetaException e) {
  864. throw new HCatException("MetaException while adding partition.", e);
  865. } catch (NoSuchObjectException e) {
  866. throw new ObjectNotFoundException("The table "
  867. + partInfoList.get(0).getTableName()
  868. + " is could not be found.", e);
  869. } catch (TException e) {
  870. throw new ConnectionFailureException(
  871. "TException while adding partition.", e);
  872. }
  873. return numPartitions;
  874. }
  875. @Override
  876. @InterfaceAudience.LimitedPrivate({"Hive"})
  877. @InterfaceStability.Evolving
  878. public int addPartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException {
  879. try {
  880. return hmsClient.add_partitions_pspec(partitionSpec.toPartitionSpecProxy());
  881. } catch (InvalidObjectException e) {
  882. throw new HCatException(
  883. "InvalidObjectException while adding partition.", e);
  884. } catch (AlreadyExistsException e) {
  885. throw new HCatException(
  886. "AlreadyExistsException while adding partition.", e);
  887. } catch (MetaException e) {
  888. throw new HCatException("MetaException while adding partition.", e);
  889. } catch (NoSuchObjectException e) {
  890. throw new ObjectNotFoundException("The table "
  891. + "could not be found.", e);
  892. } catch (TException e) {
  893. throw new ConnectionFailureException(
  894. "TException while adding partition.", e);
  895. }
  896. }
  897. @Override
  898. public String getMessageBusTopicName(String dbName, String tableName) throws HCatException {
  899. try {
  900. return hmsClient.getTable(dbName, tableName).getParameters().get(
  901. HCatConstants.HCAT_MSGBUS_TOPIC_NAME);
  902. }
  903. catch (MetaException e) {
  904. throw new HCatException("MetaException while retrieving JMS Topic name.", e);
  905. } catch (NoSuchObjectException e) {
  906. throw new HCatException("Could not find DB:" + dbName + " or Table:" + tableName, e);
  907. } catch (TException e) {
  908. throw new ConnectionFailureException(
  909. "TException while retrieving JMS Topic name.", e);
  910. }
  911. }
  912. @Override
  913. public Iterator<ReplicationTask> getReplicationTasks(
  914. long lastEventId, int maxEvents, String dbName, String tableName) throws HCatException {
  915. return new HCatReplicationTaskIterator(this,lastEventId,maxEvents,dbName,tableName);
  916. }
  917. @Override
  918. public List<HCatNotificationEvent> getNextNotification(long lastEventId, int maxEvents,
  919. IMetaStoreClient.NotificationFilter filter)
  920. throws HCatException {
  921. try {
  922. NotificationEventResponse rsp = hmsClient.getNextNotification(lastEventId, maxEvents, filter);
  923. if (rsp != null && rsp.getEvents() != null) {
  924. return Lists.transform(rsp.getEvents(), new Function<NotificationEvent, HCatNotificationEvent>() {
  925. @Override
  926. public HCatNotificationEvent apply(@Nullable NotificationEvent notificationEvent) {
  927. return new HCatNotificationEvent(notificationEvent);
  928. }
  929. });
  930. } else {
  931. return Collections.emptyList();
  932. }
  933. } catch (TException e) {
  934. throw new ConnectionFailureException("TException while getting notifications", e);
  935. }
  936. }
  937. @Override
  938. public long getCurrentNotificationEventId() throws HCatException {
  939. try {
  940. CurrentNotificationEventId id = hmsClient.getCurrentNotificationEventId();
  941. return id.getEventId();
  942. } catch (TException e) {
  943. throw new ConnectionFailureException("TException while getting current notification event " +
  944. "id " , e);
  945. }
  946. }
  947. @Override
  948. public String serializeTable(HCatTable hcatTable) throws HCatException {
  949. return MetadataSerializer.get().serializeTable(hcatTable);
  950. }
  951. @Override
  952. public HCatTable deserializeTable(String hcatTableStringRep) throws HCatException {
  953. return MetadataSerializer.get().deserializeTable(hcatTableStringRep);
  954. }
  955. @Override
  956. public String serializePartition(HCatPartition hcatPartition) throws HCatException {
  957. return MetadataSerializer.get().serializePartition(hcatPartition);
  958. }
  959. @Override
  960. public List<String> serializePartitions(List<HCatPartition> hcatPartitions) throws HCatException {
  961. List<String> partStrings = new ArrayList<String>(hcatPartitions.size());
  962. MetadataSerializer serializer = MetadataSerializer.get();
  963. for (HCatPartition partition : hcatPartitions) {
  964. partStrings.add(serializer.serializePartition(partition));
  965. }
  966. return partStrings;
  967. }
  968. @Override
  969. public HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException {
  970. HCatPartition hcatPartition = MetadataSerializer.get().deserializePartition(hcatPartitionStringRep);
  971. hcatPartition.hcatTable(getTable(hcatPartition.getDatabaseName(), hcatPartition.getTableName()));
  972. return hcatPartition;
  973. }
  974. @Override
  975. public List<HCatPartition> deserializePartitions(List<String> hcatPartitionStringReps) throws HCatException {
  976. List<HCatPartition> partitions = new ArrayList<HCatPartition>(hcatPartitionStringReps.size());
  977. MetadataSerializer deserializer = MetadataSerializer.get();
  978. HCatTable table = null;
  979. for (String partString : hcatPartitionStringReps) {
  980. HCatPartition partition;
  981. if (table == null) {
  982. partition = deserializePartition(partString);
  983. table = partition.hcatTable();
  984. }
  985. else {
  986. partition = deserializer.deserializePartition(partString);
  987. if (partition.getDatabaseName().equals(table.getDbName())
  988. && partition.getTableName().equals(table.getTableName())) {
  989. partition.hcatTable(table);
  990. }
  991. else {
  992. throw new HCatException("All partitions are not of the same table: "
  993. + table.getDbName() + "." + table.getTableName());
  994. }
  995. }
  996. partitions.add(partition);
  997. }
  998. return partitions;
  999. }
  1000. @Override
  1001. public List<String> serializePartitionSpec(HCatPartitionSpec partitionSpec) throws HCatException {
  1002. return MetadataSerializer.get().serializePartitionSpec(partitionSpec);
  1003. }
  1004. @Override
  1005. public HCatPartitionSpec deserializePartitionSpec(List<String> hcatPartitionSpecStrings) throws HCatException {
  1006. HCatPartitionSpec hcatPartitionSpec = MetadataSerializer.get()
  1007. .deserializePartitionSpec(hcatPartitionSpecStrings);
  1008. hcatPartitionSpec
  1009. .hcatTable(getTable(hcatPartitionSpec.getDbName(), hcatPartitionSpec.getTableName()));
  1010. return hcatPartitionSpec;
  1011. }
  1012. }