PageRenderTime 481ms CodeModel.GetById 16ms RepoModel.GetById 1ms app.codeStats 0ms

/tags/release-0.0.0-rc0/hive/external/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java

#
Java | 808 lines | 571 code | 101 blank | 136 comment | 100 complexity | 87df1a305cbd0f0a1d1c311d64207d00 MD5 | raw file
Possible License(s): Apache-2.0, BSD-3-Clause, JSON, CPL-1.0
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.hadoop.hive.ql.metadata;
  19. import java.io.IOException;
  20. import java.io.Serializable;
  21. import java.net.URI;
  22. import java.util.ArrayList;
  23. import java.util.HashMap;
  24. import java.util.Iterator;
  25. import java.util.LinkedHashMap;
  26. import java.util.List;
  27. import java.util.Map;
  28. import java.util.Properties;
  29. import org.apache.commons.logging.Log;
  30. import org.apache.commons.logging.LogFactory;
  31. import org.apache.hadoop.fs.FileSystem;
  32. import org.apache.hadoop.fs.Path;
  33. import org.apache.hadoop.hive.common.JavaUtils;
  34. import org.apache.hadoop.hive.metastore.MetaStoreUtils;
  35. import org.apache.hadoop.hive.metastore.ProtectMode;
  36. import org.apache.hadoop.hive.metastore.TableType;
  37. import org.apache.hadoop.hive.metastore.api.FieldSchema;
  38. import org.apache.hadoop.hive.metastore.api.MetaException;
  39. import org.apache.hadoop.hive.metastore.api.Order;
  40. import org.apache.hadoop.hive.metastore.api.SerDeInfo;
  41. import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
  42. import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
  43. import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
  44. import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
  45. import org.apache.hadoop.hive.serde.Constants;
  46. import org.apache.hadoop.hive.serde2.Deserializer;
  47. import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
  48. import org.apache.hadoop.hive.serde2.SerDeException;
  49. import org.apache.hadoop.hive.serde2.SerDeUtils;
  50. import org.apache.hadoop.hive.serde2.objectinspector.StructField;
  51. import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
  52. import org.apache.hadoop.io.Writable;
  53. import org.apache.hadoop.io.WritableComparable;
  54. import org.apache.hadoop.mapred.InputFormat;
  55. import org.apache.hadoop.mapred.SequenceFileInputFormat;
  56. /**
  57. * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
  58. *
  59. * Please note that the ql code should always go through methods of this class to access the
  60. * metadata, instead of directly accessing org.apache.hadoop.hive.metastore.api.Table. This
  61. * helps to isolate the metastore code and the ql code.
  62. */
  63. public class Table implements Serializable {
  64. private static final long serialVersionUID = 1L;
  65. static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Table");
  66. private org.apache.hadoop.hive.metastore.api.Table tTable;
  67. /**
  68. * These fields are all cached fields. The information comes from tTable.
  69. */
  70. private Deserializer deserializer;
  71. private Class<? extends HiveOutputFormat> outputFormatClass;
  72. private Class<? extends InputFormat> inputFormatClass;
  73. private URI uri;
  74. private HiveStorageHandler storageHandler;
  75. /**
  76. * Used only for serialization.
  77. */
  78. public Table() {
  79. }
  80. public Table(org.apache.hadoop.hive.metastore.api.Table table) {
  81. tTable = table;
  82. if (!isView()) {
  83. // This will set up field: inputFormatClass
  84. getInputFormatClass();
  85. // This will set up field: outputFormatClass
  86. getOutputFormatClass();
  87. }
  88. }
  89. public Table(String databaseName, String tableName) {
  90. this(getEmptyTable(databaseName, tableName));
  91. }
  92. /**
  93. * This function should only be used in serialization.
  94. * We should never call this function to modify the fields, because
  95. * the cached fields will become outdated.
  96. */
  97. public org.apache.hadoop.hive.metastore.api.Table getTTable() {
  98. return tTable;
  99. }
  100. /**
  101. * This function should only be called by Java serialization.
  102. */
  103. public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) {
  104. this.tTable = tTable;
  105. }
  106. /**
  107. * Initialize an emtpy table.
  108. */
  109. static org.apache.hadoop.hive.metastore.api.Table
  110. getEmptyTable(String databaseName, String tableName) {
  111. StorageDescriptor sd = new StorageDescriptor();
  112. {
  113. sd.setSerdeInfo(new SerDeInfo());
  114. sd.setNumBuckets(-1);
  115. sd.setBucketCols(new ArrayList<String>());
  116. sd.setCols(new ArrayList<FieldSchema>());
  117. sd.setParameters(new HashMap<String, String>());
  118. sd.setSortCols(new ArrayList<Order>());
  119. sd.getSerdeInfo().setParameters(new HashMap<String, String>());
  120. // We have to use MetadataTypedColumnsetSerDe because LazySimpleSerDe does
  121. // not support a table with no columns.
  122. sd.getSerdeInfo().setSerializationLib(MetadataTypedColumnsetSerDe.class.getName());
  123. sd.getSerdeInfo().getParameters().put(Constants.SERIALIZATION_FORMAT, "1");
  124. sd.setInputFormat(SequenceFileInputFormat.class.getName());
  125. sd.setOutputFormat(HiveSequenceFileOutputFormat.class.getName());
  126. }
  127. org.apache.hadoop.hive.metastore.api.Table t = new org.apache.hadoop.hive.metastore.api.Table();
  128. {
  129. t.setSd(sd);
  130. t.setPartitionKeys(new ArrayList<FieldSchema>());
  131. t.setParameters(new HashMap<String, String>());
  132. t.setTableType(TableType.MANAGED_TABLE.toString());
  133. t.setDbName(databaseName);
  134. t.setTableName(tableName);
  135. }
  136. return t;
  137. }
  138. public void checkValidity() throws HiveException {
  139. // check for validity
  140. String name = tTable.getTableName();
  141. if (null == name || name.length() == 0
  142. || !MetaStoreUtils.validateName(name)) {
  143. throw new HiveException("[" + name + "]: is not a valid table name");
  144. }
  145. if (0 == getCols().size()) {
  146. throw new HiveException(
  147. "at least one column must be specified for the table");
  148. }
  149. if (!isView()) {
  150. if (null == getDeserializer()) {
  151. throw new HiveException("must specify a non-null serDe");
  152. }
  153. if (null == getInputFormatClass()) {
  154. throw new HiveException("must specify an InputFormat class");
  155. }
  156. if (null == getOutputFormatClass()) {
  157. throw new HiveException("must specify an OutputFormat class");
  158. }
  159. }
  160. if (isView()) {
  161. assert(getViewOriginalText() != null);
  162. assert(getViewExpandedText() != null);
  163. } else {
  164. assert(getViewOriginalText() == null);
  165. assert(getViewExpandedText() == null);
  166. }
  167. Iterator<FieldSchema> iterCols = getCols().iterator();
  168. List<String> colNames = new ArrayList<String>();
  169. while (iterCols.hasNext()) {
  170. String colName = iterCols.next().getName();
  171. Iterator<String> iter = colNames.iterator();
  172. while (iter.hasNext()) {
  173. String oldColName = iter.next();
  174. if (colName.equalsIgnoreCase(oldColName)) {
  175. throw new HiveException("Duplicate column name " + colName
  176. + " in the table definition.");
  177. }
  178. }
  179. colNames.add(colName.toLowerCase());
  180. }
  181. if (getPartCols() != null) {
  182. // there is no overlap between columns and partitioning columns
  183. Iterator<FieldSchema> partColsIter = getPartCols().iterator();
  184. while (partColsIter.hasNext()) {
  185. String partCol = partColsIter.next().getName();
  186. if (colNames.contains(partCol.toLowerCase())) {
  187. throw new HiveException("Partition column name " + partCol
  188. + " conflicts with table columns.");
  189. }
  190. }
  191. }
  192. return;
  193. }
  194. public void setInputFormatClass(Class<? extends InputFormat> inputFormatClass) {
  195. this.inputFormatClass = inputFormatClass;
  196. tTable.getSd().setInputFormat(inputFormatClass.getName());
  197. }
  198. public void setOutputFormatClass(Class<? extends HiveOutputFormat> outputFormatClass) {
  199. this.outputFormatClass = outputFormatClass;
  200. tTable.getSd().setOutputFormat(outputFormatClass.getName());
  201. }
  202. final public Properties getSchema() {
  203. return MetaStoreUtils.getSchema(tTable);
  204. }
  205. final public Path getPath() {
  206. String location = tTable.getSd().getLocation();
  207. if (location == null) {
  208. return null;
  209. }
  210. return new Path(location);
  211. }
  212. final public String getTableName() {
  213. return tTable.getTableName();
  214. }
  215. final public URI getDataLocation() {
  216. if (uri == null) {
  217. Path path = getPath();
  218. if (path != null) {
  219. uri = path.toUri();
  220. }
  221. }
  222. return uri;
  223. }
  224. final public Deserializer getDeserializer() {
  225. if (deserializer == null) {
  226. try {
  227. deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(), tTable);
  228. } catch (MetaException e) {
  229. throw new RuntimeException(e);
  230. } catch (HiveException e) {
  231. throw new RuntimeException(e);
  232. }
  233. }
  234. return deserializer;
  235. }
  236. public HiveStorageHandler getStorageHandler() {
  237. if (storageHandler != null) {
  238. return storageHandler;
  239. }
  240. try {
  241. storageHandler = HiveUtils.getStorageHandler(
  242. Hive.get().getConf(),
  243. getProperty(
  244. org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE));
  245. } catch (Exception e) {
  246. throw new RuntimeException(e);
  247. }
  248. return storageHandler;
  249. }
  250. final public Class<? extends InputFormat> getInputFormatClass() {
  251. if (inputFormatClass == null) {
  252. try {
  253. String className = tTable.getSd().getInputFormat();
  254. if (className == null) {
  255. if (getStorageHandler() == null) {
  256. return null;
  257. }
  258. inputFormatClass = getStorageHandler().getInputFormatClass();
  259. } else {
  260. inputFormatClass = (Class<? extends InputFormat>)
  261. Class.forName(className, true, JavaUtils.getClassLoader());
  262. }
  263. } catch (ClassNotFoundException e) {
  264. throw new RuntimeException(e);
  265. }
  266. }
  267. return inputFormatClass;
  268. }
  269. final public Class<? extends HiveOutputFormat> getOutputFormatClass() {
  270. // Replace FileOutputFormat for backward compatibility
  271. if (outputFormatClass == null) {
  272. try {
  273. String className = tTable.getSd().getOutputFormat();
  274. Class<?> c;
  275. if (className == null) {
  276. if (getStorageHandler() == null) {
  277. return null;
  278. }
  279. c = getStorageHandler().getOutputFormatClass();
  280. } else {
  281. c = Class.forName(className, true,
  282. JavaUtils.getClassLoader());
  283. }
  284. if (!HiveOutputFormat.class.isAssignableFrom(c)) {
  285. outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(c);
  286. } else {
  287. outputFormatClass = (Class<? extends HiveOutputFormat>)c;
  288. }
  289. } catch (ClassNotFoundException e) {
  290. throw new RuntimeException(e);
  291. }
  292. }
  293. return outputFormatClass;
  294. }
  295. final public boolean isValidSpec(Map<String, String> spec)
  296. throws HiveException {
  297. // TODO - types need to be checked.
  298. List<FieldSchema> partCols = tTable.getPartitionKeys();
  299. if (partCols == null || (partCols.size() == 0)) {
  300. if (spec != null) {
  301. throw new HiveException(
  302. "table is not partitioned but partition spec exists: " + spec);
  303. } else {
  304. return true;
  305. }
  306. }
  307. if ((spec == null) || (spec.size() != partCols.size())) {
  308. throw new HiveException(
  309. "table is partitioned but partition spec is not specified or tab: "
  310. + spec);
  311. }
  312. for (FieldSchema field : partCols) {
  313. if (spec.get(field.getName()) == null) {
  314. throw new HiveException(field.getName()
  315. + " not found in table's partition spec: " + spec);
  316. }
  317. }
  318. return true;
  319. }
  320. public void setProperty(String name, String value) {
  321. tTable.getParameters().put(name, value);
  322. }
  323. public String getProperty(String name) {
  324. return tTable.getParameters().get(name);
  325. }
  326. public void setTableType(TableType tableType) {
  327. tTable.setTableType(tableType.toString());
  328. }
  329. public TableType getTableType() {
  330. return Enum.valueOf(TableType.class, tTable.getTableType());
  331. }
  332. public ArrayList<StructField> getFields() {
  333. ArrayList<StructField> fields = new ArrayList<StructField>();
  334. try {
  335. Deserializer decoder = getDeserializer();
  336. // Expand out all the columns of the table
  337. StructObjectInspector structObjectInspector = (StructObjectInspector) decoder
  338. .getObjectInspector();
  339. List<? extends StructField> fld_lst = structObjectInspector
  340. .getAllStructFieldRefs();
  341. for (StructField field : fld_lst) {
  342. fields.add(field);
  343. }
  344. } catch (SerDeException e) {
  345. throw new RuntimeException(e);
  346. }
  347. return fields;
  348. }
  349. public StructField getField(String fld) {
  350. try {
  351. StructObjectInspector structObjectInspector = (StructObjectInspector) getDeserializer()
  352. .getObjectInspector();
  353. return structObjectInspector.getStructFieldRef(fld);
  354. } catch (Exception e) {
  355. throw new RuntimeException(e);
  356. }
  357. }
  358. @Override
  359. public String toString() {
  360. return tTable.getTableName();
  361. }
  362. public List<FieldSchema> getPartCols() {
  363. List<FieldSchema> partKeys = tTable.getPartitionKeys();
  364. if (partKeys == null) {
  365. partKeys = new ArrayList<FieldSchema>();
  366. tTable.setPartitionKeys(partKeys);
  367. }
  368. return partKeys;
  369. }
  370. public boolean isPartitionKey(String colName) {
  371. for (FieldSchema key : getPartCols()) {
  372. if (key.getName().toLowerCase().equals(colName)) {
  373. return true;
  374. }
  375. }
  376. return false;
  377. }
  378. // TODO merge this with getBucketCols function
  379. public String getBucketingDimensionId() {
  380. List<String> bcols = tTable.getSd().getBucketCols();
  381. if (bcols == null || bcols.size() == 0) {
  382. return null;
  383. }
  384. if (bcols.size() > 1) {
  385. LOG.warn(this
  386. + " table has more than one dimensions which aren't supported yet");
  387. }
  388. return bcols.get(0);
  389. }
  390. public void setDataLocation(URI uri) {
  391. this.uri = uri;
  392. tTable.getSd().setLocation(uri.toString());
  393. }
  394. public void unsetDataLocation() {
  395. this.uri = null;
  396. tTable.getSd().unsetLocation();
  397. }
  398. public void setBucketCols(List<String> bucketCols) throws HiveException {
  399. if (bucketCols == null) {
  400. return;
  401. }
  402. for (String col : bucketCols) {
  403. if (!isField(col)) {
  404. throw new HiveException("Bucket columns " + col
  405. + " is not part of the table columns (" + getCols() );
  406. }
  407. }
  408. tTable.getSd().setBucketCols(bucketCols);
  409. }
  410. public void setSortCols(List<Order> sortOrder) throws HiveException {
  411. tTable.getSd().setSortCols(sortOrder);
  412. }
  413. private boolean isField(String col) {
  414. for (FieldSchema field : getCols()) {
  415. if (field.getName().equals(col)) {
  416. return true;
  417. }
  418. }
  419. return false;
  420. }
  421. public List<FieldSchema> getCols() {
  422. boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
  423. getSerializationLib());
  424. if (!getColsFromSerDe) {
  425. return tTable.getSd().getCols();
  426. } else {
  427. try {
  428. return Hive.getFieldsFromDeserializer(getTableName(), getDeserializer());
  429. } catch (HiveException e) {
  430. LOG.error("Unable to get field from serde: " + getSerializationLib(), e);
  431. }
  432. return new ArrayList<FieldSchema>();
  433. }
  434. }
  435. /**
  436. * Returns a list of all the columns of the table (data columns + partition
  437. * columns in that order.
  438. *
  439. * @return List<FieldSchema>
  440. */
  441. public List<FieldSchema> getAllCols() {
  442. ArrayList<FieldSchema> f_list = new ArrayList<FieldSchema>();
  443. f_list.addAll(getPartCols());
  444. f_list.addAll(getCols());
  445. return f_list;
  446. }
  447. public void setPartCols(List<FieldSchema> partCols) {
  448. tTable.setPartitionKeys(partCols);
  449. }
  450. public String getDbName() {
  451. return tTable.getDbName();
  452. }
  453. public int getNumBuckets() {
  454. return tTable.getSd().getNumBuckets();
  455. }
  456. /**
  457. * Replaces the directory corresponding to the table by srcf. Works by
  458. * deleting the table directory and renaming the source directory.
  459. *
  460. * @param srcf
  461. * Source directory
  462. * @param tmpd
  463. * Temporary directory
  464. */
  465. protected void replaceFiles(Path srcf) throws HiveException {
  466. Path tableDest = new Path(getDataLocation().getPath());
  467. Hive.replaceFiles(srcf, tableDest, tableDest, Hive.get().getConf());
  468. }
  469. /**
  470. * Inserts files specified into the partition. Works by moving files
  471. *
  472. * @param srcf
  473. * Files to be moved. Leaf directories or globbed file paths
  474. */
  475. protected void copyFiles(Path srcf) throws HiveException {
  476. FileSystem fs;
  477. try {
  478. fs = FileSystem.get(getDataLocation(), Hive.get().getConf());
  479. Hive.copyFiles(srcf, new Path(getDataLocation().getPath()), fs);
  480. } catch (IOException e) {
  481. throw new HiveException("addFiles: filesystem error in check phase", e);
  482. }
  483. }
  484. public void setInputFormatClass(String name) throws HiveException {
  485. if (name == null) {
  486. inputFormatClass = null;
  487. tTable.getSd().setInputFormat(null);
  488. return;
  489. }
  490. try {
  491. setInputFormatClass((Class<? extends InputFormat<WritableComparable, Writable>>) Class
  492. .forName(name, true, JavaUtils.getClassLoader()));
  493. } catch (ClassNotFoundException e) {
  494. throw new HiveException("Class not found: " + name, e);
  495. }
  496. }
  497. public void setOutputFormatClass(String name) throws HiveException {
  498. if (name == null) {
  499. outputFormatClass = null;
  500. tTable.getSd().setOutputFormat(null);
  501. return;
  502. }
  503. try {
  504. Class<?> origin = Class.forName(name, true, JavaUtils.getClassLoader());
  505. setOutputFormatClass(HiveFileFormatUtils
  506. .getOutputFormatSubstitute(origin));
  507. } catch (ClassNotFoundException e) {
  508. throw new HiveException("Class not found: " + name, e);
  509. }
  510. }
  511. public boolean isPartitioned() {
  512. if (getPartCols() == null) {
  513. return false;
  514. }
  515. return (getPartCols().size() != 0);
  516. }
  517. public void setFields(List<FieldSchema> fields) {
  518. tTable.getSd().setCols(fields);
  519. }
  520. public void setNumBuckets(int nb) {
  521. tTable.getSd().setNumBuckets(nb);
  522. }
  523. /**
  524. * @return The owner of the table.
  525. * @see org.apache.hadoop.hive.metastore.api.Table#getOwner()
  526. */
  527. public String getOwner() {
  528. return tTable.getOwner();
  529. }
  530. /**
  531. * @return The table parameters.
  532. * @see org.apache.hadoop.hive.metastore.api.Table#getParameters()
  533. */
  534. public Map<String, String> getParameters() {
  535. return tTable.getParameters();
  536. }
  537. /**
  538. * @return The retention on the table.
  539. * @see org.apache.hadoop.hive.metastore.api.Table#getRetention()
  540. */
  541. public int getRetention() {
  542. return tTable.getRetention();
  543. }
  544. /**
  545. * @param owner
  546. * @see org.apache.hadoop.hive.metastore.api.Table#setOwner(java.lang.String)
  547. */
  548. public void setOwner(String owner) {
  549. tTable.setOwner(owner);
  550. }
  551. /**
  552. * @param retention
  553. * @see org.apache.hadoop.hive.metastore.api.Table#setRetention(int)
  554. */
  555. public void setRetention(int retention) {
  556. tTable.setRetention(retention);
  557. }
  558. private SerDeInfo getSerdeInfo() {
  559. return tTable.getSd().getSerdeInfo();
  560. }
  561. public void setSerializationLib(String lib) {
  562. getSerdeInfo().setSerializationLib(lib);
  563. }
  564. public String getSerializationLib() {
  565. return getSerdeInfo().getSerializationLib();
  566. }
  567. public String getSerdeParam(String param) {
  568. return getSerdeInfo().getParameters().get(param);
  569. }
  570. public String setSerdeParam(String param, String value) {
  571. return getSerdeInfo().getParameters().put(param, value);
  572. }
  573. public List<String> getBucketCols() {
  574. return tTable.getSd().getBucketCols();
  575. }
  576. public List<Order> getSortCols() {
  577. return tTable.getSd().getSortCols();
  578. }
  579. public void setTableName(String tableName) {
  580. tTable.setTableName(tableName);
  581. }
  582. public void setDbName(String databaseName) {
  583. tTable.setDbName(databaseName);
  584. }
  585. public List<FieldSchema> getPartitionKeys() {
  586. return tTable.getPartitionKeys();
  587. }
  588. /**
  589. * @return the original view text, or null if this table is not a view
  590. */
  591. public String getViewOriginalText() {
  592. return tTable.getViewOriginalText();
  593. }
  594. /**
  595. * @param viewOriginalText
  596. * the original view text to set
  597. */
  598. public void setViewOriginalText(String viewOriginalText) {
  599. tTable.setViewOriginalText(viewOriginalText);
  600. }
  601. /**
  602. * @return the expanded view text, or null if this table is not a view
  603. */
  604. public String getViewExpandedText() {
  605. return tTable.getViewExpandedText();
  606. }
  607. public void clearSerDeInfo() {
  608. tTable.getSd().getSerdeInfo().getParameters().clear();
  609. }
  610. /**
  611. * @param viewExpandedText
  612. * the expanded view text to set
  613. */
  614. public void setViewExpandedText(String viewExpandedText) {
  615. tTable.setViewExpandedText(viewExpandedText);
  616. }
  617. /**
  618. * @return whether this table is actually a view
  619. */
  620. public boolean isView() {
  621. return TableType.VIRTUAL_VIEW.equals(getTableType());
  622. }
  623. /**
  624. * Creates a partition name -> value spec map object
  625. *
  626. * @param tp
  627. * Use the information from this partition.
  628. * @return Partition name to value mapping.
  629. */
  630. public LinkedHashMap<String, String> createSpec(
  631. org.apache.hadoop.hive.metastore.api.Partition tp) {
  632. List<FieldSchema> fsl = getPartCols();
  633. List<String> tpl = tp.getValues();
  634. LinkedHashMap<String, String> spec = new LinkedHashMap<String, String>();
  635. for (int i = 0; i < fsl.size(); i++) {
  636. FieldSchema fs = fsl.get(i);
  637. String value = tpl.get(i);
  638. spec.put(fs.getName(), value);
  639. }
  640. return spec;
  641. }
  642. public Table copy() throws HiveException {
  643. return new Table(tTable.deepCopy());
  644. }
  645. public void setCreateTime(int createTime) {
  646. tTable.setCreateTime(createTime);
  647. }
  648. public int getLastAccessTime() {
  649. return tTable.getLastAccessTime();
  650. }
  651. public void setLastAccessTime(int lastAccessTime) {
  652. tTable.setLastAccessTime(lastAccessTime);
  653. }
  654. public boolean isNonNative() {
  655. return getProperty(
  656. org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE)
  657. != null;
  658. }
  659. /**
  660. * @param protectMode
  661. */
  662. public void setProtectMode(ProtectMode protectMode){
  663. Map<String, String> parameters = tTable.getParameters();
  664. parameters.put(ProtectMode.PARAMETER_NAME, protectMode.toString());
  665. tTable.setParameters(parameters);
  666. }
  667. /**
  668. * @return protect mode
  669. */
  670. public ProtectMode getProtectMode(){
  671. Map<String, String> parameters = tTable.getParameters();
  672. if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
  673. return new ProtectMode();
  674. } else {
  675. return ProtectMode.getProtectModeFromString(
  676. parameters.get(ProtectMode.PARAMETER_NAME));
  677. }
  678. }
  679. /**
  680. * @return True protect mode indicates the table if offline.
  681. */
  682. public boolean isOffline(){
  683. return getProtectMode().offline;
  684. }
  685. /**
  686. * @return True if protect mode attribute of the partition indicate
  687. * that it is OK to drop the partition
  688. */
  689. public boolean canDrop() {
  690. ProtectMode mode = getProtectMode();
  691. return (!mode.noDrop && !mode.offline && !mode.readOnly);
  692. }
  693. /**
  694. * @return True if protect mode attribute of the table indicate
  695. * that it is OK to write the table
  696. */
  697. public boolean canWrite() {
  698. ProtectMode mode = getProtectMode();
  699. return (!mode.offline && !mode.readOnly);
  700. }
  701. /**
  702. * @return include the db name
  703. */
  704. public String getCompleteName() {
  705. return getDbName() + "@" + getTableName();
  706. }
  707. };