PageRenderTime 69ms CodeModel.GetById 17ms app.highlight 45ms RepoModel.GetById 1ms app.codeStats 0ms

/tags/release-0.0.0-rc0/hive/external/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java

#
Java | 808 lines | 571 code | 101 blank | 136 comment | 100 complexity | 87df1a305cbd0f0a1d1c311d64207d00 MD5 | raw file
  1/**
  2 * Licensed to the Apache Software Foundation (ASF) under one
  3 * or more contributor license agreements.  See the NOTICE file
  4 * distributed with this work for additional information
  5 * regarding copyright ownership.  The ASF licenses this file
  6 * to you under the Apache License, Version 2.0 (the
  7 * "License"); you may not use this file except in compliance
  8 * with the License.  You may obtain a copy of the License at
  9 *
 10 *     http://www.apache.org/licenses/LICENSE-2.0
 11 *
 12 * Unless required by applicable law or agreed to in writing, software
 13 * distributed under the License is distributed on an "AS IS" BASIS,
 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 15 * See the License for the specific language governing permissions and
 16 * limitations under the License.
 17 */
 18
 19package org.apache.hadoop.hive.ql.metadata;
 20
 21import java.io.IOException;
 22import java.io.Serializable;
 23import java.net.URI;
 24import java.util.ArrayList;
 25import java.util.HashMap;
 26import java.util.Iterator;
 27import java.util.LinkedHashMap;
 28import java.util.List;
 29import java.util.Map;
 30import java.util.Properties;
 31
 32import org.apache.commons.logging.Log;
 33import org.apache.commons.logging.LogFactory;
 34import org.apache.hadoop.fs.FileSystem;
 35import org.apache.hadoop.fs.Path;
 36import org.apache.hadoop.hive.common.JavaUtils;
 37import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 38import org.apache.hadoop.hive.metastore.ProtectMode;
 39import org.apache.hadoop.hive.metastore.TableType;
 40import org.apache.hadoop.hive.metastore.api.FieldSchema;
 41import org.apache.hadoop.hive.metastore.api.MetaException;
 42import org.apache.hadoop.hive.metastore.api.Order;
 43import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 44import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 45import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 46import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 47import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 48import org.apache.hadoop.hive.serde.Constants;
 49import org.apache.hadoop.hive.serde2.Deserializer;
 50import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
 51import org.apache.hadoop.hive.serde2.SerDeException;
 52import org.apache.hadoop.hive.serde2.SerDeUtils;
 53import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 54import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 55import org.apache.hadoop.io.Writable;
 56import org.apache.hadoop.io.WritableComparable;
 57import org.apache.hadoop.mapred.InputFormat;
 58import org.apache.hadoop.mapred.SequenceFileInputFormat;
 59
 60/**
 61 * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
 62 *
 63 * Please note that the ql code should always go through methods of this class to access the
 64 * metadata, instead of directly accessing org.apache.hadoop.hive.metastore.api.Table.  This
 65 * helps to isolate the metastore code and the ql code.
 66 */
 67public class Table implements Serializable {
 68
 69  private static final long serialVersionUID = 1L;
 70
 71  static final private Log LOG = LogFactory.getLog("hive.ql.metadata.Table");
 72
 73  private org.apache.hadoop.hive.metastore.api.Table tTable;
 74
 75  /**
 76   * These fields are all cached fields.  The information comes from tTable.
 77   */
 78  private Deserializer deserializer;
 79  private Class<? extends HiveOutputFormat> outputFormatClass;
 80  private Class<? extends InputFormat> inputFormatClass;
 81  private URI uri;
 82  private HiveStorageHandler storageHandler;
 83
 84  /**
 85   * Used only for serialization.
 86   */
 87  public Table() {
 88  }
 89
 90  public Table(org.apache.hadoop.hive.metastore.api.Table table) {
 91    tTable = table;
 92    if (!isView()) {
 93      // This will set up field: inputFormatClass
 94      getInputFormatClass();
 95      // This will set up field: outputFormatClass
 96      getOutputFormatClass();
 97    }
 98  }
 99
100  public Table(String databaseName, String tableName) {
101    this(getEmptyTable(databaseName, tableName));
102  }
103
104  /**
105   * This function should only be used in serialization.
106   * We should never call this function to modify the fields, because
107   * the cached fields will become outdated.
108   */
109  public org.apache.hadoop.hive.metastore.api.Table getTTable() {
110    return tTable;
111  }
112
113  /**
114   * This function should only be called by Java serialization.
115   */
116  public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) {
117    this.tTable = tTable;
118  }
119
120  /**
121   * Initialize an emtpy table.
122   */
123  static org.apache.hadoop.hive.metastore.api.Table
124  getEmptyTable(String databaseName, String tableName) {
125    StorageDescriptor sd = new StorageDescriptor();
126    {
127      sd.setSerdeInfo(new SerDeInfo());
128      sd.setNumBuckets(-1);
129      sd.setBucketCols(new ArrayList<String>());
130      sd.setCols(new ArrayList<FieldSchema>());
131      sd.setParameters(new HashMap<String, String>());
132      sd.setSortCols(new ArrayList<Order>());
133      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
134      // We have to use MetadataTypedColumnsetSerDe because LazySimpleSerDe does
135      // not support a table with no columns.
136      sd.getSerdeInfo().setSerializationLib(MetadataTypedColumnsetSerDe.class.getName());
137      sd.getSerdeInfo().getParameters().put(Constants.SERIALIZATION_FORMAT, "1");
138      sd.setInputFormat(SequenceFileInputFormat.class.getName());
139      sd.setOutputFormat(HiveSequenceFileOutputFormat.class.getName());
140    }
141
142    org.apache.hadoop.hive.metastore.api.Table t = new org.apache.hadoop.hive.metastore.api.Table();
143    {
144      t.setSd(sd);
145      t.setPartitionKeys(new ArrayList<FieldSchema>());
146      t.setParameters(new HashMap<String, String>());
147      t.setTableType(TableType.MANAGED_TABLE.toString());
148      t.setDbName(databaseName);
149      t.setTableName(tableName);
150    }
151    return t;
152  }
153
154  public void checkValidity() throws HiveException {
155    // check for validity
156    String name = tTable.getTableName();
157    if (null == name || name.length() == 0
158        || !MetaStoreUtils.validateName(name)) {
159      throw new HiveException("[" + name + "]: is not a valid table name");
160    }
161    if (0 == getCols().size()) {
162      throw new HiveException(
163          "at least one column must be specified for the table");
164    }
165    if (!isView()) {
166      if (null == getDeserializer()) {
167        throw new HiveException("must specify a non-null serDe");
168      }
169      if (null == getInputFormatClass()) {
170        throw new HiveException("must specify an InputFormat class");
171      }
172      if (null == getOutputFormatClass()) {
173        throw new HiveException("must specify an OutputFormat class");
174      }
175    }
176
177    if (isView()) {
178      assert(getViewOriginalText() != null);
179      assert(getViewExpandedText() != null);
180    } else {
181      assert(getViewOriginalText() == null);
182      assert(getViewExpandedText() == null);
183    }
184
185    Iterator<FieldSchema> iterCols = getCols().iterator();
186    List<String> colNames = new ArrayList<String>();
187    while (iterCols.hasNext()) {
188      String colName = iterCols.next().getName();
189      Iterator<String> iter = colNames.iterator();
190      while (iter.hasNext()) {
191        String oldColName = iter.next();
192        if (colName.equalsIgnoreCase(oldColName)) {
193          throw new HiveException("Duplicate column name " + colName
194              + " in the table definition.");
195        }
196      }
197      colNames.add(colName.toLowerCase());
198    }
199
200    if (getPartCols() != null) {
201      // there is no overlap between columns and partitioning columns
202      Iterator<FieldSchema> partColsIter = getPartCols().iterator();
203      while (partColsIter.hasNext()) {
204        String partCol = partColsIter.next().getName();
205        if (colNames.contains(partCol.toLowerCase())) {
206          throw new HiveException("Partition column name " + partCol
207              + " conflicts with table columns.");
208        }
209      }
210    }
211    return;
212  }
213
214  public void setInputFormatClass(Class<? extends InputFormat> inputFormatClass) {
215    this.inputFormatClass = inputFormatClass;
216    tTable.getSd().setInputFormat(inputFormatClass.getName());
217  }
218
219  public void setOutputFormatClass(Class<? extends HiveOutputFormat> outputFormatClass) {
220    this.outputFormatClass = outputFormatClass;
221    tTable.getSd().setOutputFormat(outputFormatClass.getName());
222  }
223
224  final public Properties getSchema() {
225    return MetaStoreUtils.getSchema(tTable);
226  }
227
228  final public Path getPath() {
229    String location = tTable.getSd().getLocation();
230    if (location == null) {
231      return null;
232    }
233    return new Path(location);
234  }
235
236  final public String getTableName() {
237    return tTable.getTableName();
238  }
239
240  final public URI getDataLocation() {
241    if (uri == null) {
242      Path path = getPath();
243      if (path != null) {
244        uri = path.toUri();
245      }
246    }
247    return uri;
248  }
249
250  final public Deserializer getDeserializer() {
251    if (deserializer == null) {
252      try {
253        deserializer = MetaStoreUtils.getDeserializer(Hive.get().getConf(), tTable);
254      } catch (MetaException e) {
255        throw new RuntimeException(e);
256      } catch (HiveException e) {
257        throw new RuntimeException(e);
258      }
259    }
260    return deserializer;
261  }
262
263  public HiveStorageHandler getStorageHandler() {
264    if (storageHandler != null) {
265      return storageHandler;
266    }
267    try {
268      storageHandler = HiveUtils.getStorageHandler(
269        Hive.get().getConf(),
270        getProperty(
271          org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE));
272    } catch (Exception e) {
273      throw new RuntimeException(e);
274    }
275    return storageHandler;
276  }
277
278  final public Class<? extends InputFormat> getInputFormatClass() {
279    if (inputFormatClass == null) {
280      try {
281        String className = tTable.getSd().getInputFormat();
282        if (className == null) {
283          if (getStorageHandler() == null) {
284            return null;
285          }
286          inputFormatClass = getStorageHandler().getInputFormatClass();
287        } else {
288          inputFormatClass = (Class<? extends InputFormat>)
289            Class.forName(className, true, JavaUtils.getClassLoader());
290        }
291      } catch (ClassNotFoundException e) {
292        throw new RuntimeException(e);
293      }
294    }
295    return inputFormatClass;
296  }
297
298  final public Class<? extends HiveOutputFormat> getOutputFormatClass() {
299    // Replace FileOutputFormat for backward compatibility
300
301    if (outputFormatClass == null) {
302      try {
303        String className = tTable.getSd().getOutputFormat();
304        Class<?> c;
305        if (className == null) {
306          if (getStorageHandler() == null) {
307            return null;
308          }
309          c = getStorageHandler().getOutputFormatClass();
310        } else {
311          c = Class.forName(className, true,
312            JavaUtils.getClassLoader());
313        }
314        if (!HiveOutputFormat.class.isAssignableFrom(c)) {
315          outputFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(c);
316        } else {
317          outputFormatClass = (Class<? extends HiveOutputFormat>)c;
318        }
319      } catch (ClassNotFoundException e) {
320        throw new RuntimeException(e);
321      }
322    }
323    return outputFormatClass;
324  }
325
326  final public boolean isValidSpec(Map<String, String> spec)
327      throws HiveException {
328
329    // TODO - types need to be checked.
330    List<FieldSchema> partCols = tTable.getPartitionKeys();
331    if (partCols == null || (partCols.size() == 0)) {
332      if (spec != null) {
333        throw new HiveException(
334            "table is not partitioned but partition spec exists: " + spec);
335      } else {
336        return true;
337      }
338    }
339
340    if ((spec == null) || (spec.size() != partCols.size())) {
341      throw new HiveException(
342          "table is partitioned but partition spec is not specified or tab: "
343              + spec);
344    }
345
346    for (FieldSchema field : partCols) {
347      if (spec.get(field.getName()) == null) {
348        throw new HiveException(field.getName()
349            + " not found in table's partition spec: " + spec);
350      }
351    }
352
353    return true;
354  }
355
356  public void setProperty(String name, String value) {
357    tTable.getParameters().put(name, value);
358  }
359
360  public String getProperty(String name) {
361    return tTable.getParameters().get(name);
362  }
363
364  public void setTableType(TableType tableType) {
365     tTable.setTableType(tableType.toString());
366   }
367
368  public TableType getTableType() {
369     return Enum.valueOf(TableType.class, tTable.getTableType());
370   }
371
372  public ArrayList<StructField> getFields() {
373
374    ArrayList<StructField> fields = new ArrayList<StructField>();
375    try {
376      Deserializer decoder = getDeserializer();
377
378      // Expand out all the columns of the table
379      StructObjectInspector structObjectInspector = (StructObjectInspector) decoder
380          .getObjectInspector();
381      List<? extends StructField> fld_lst = structObjectInspector
382          .getAllStructFieldRefs();
383      for (StructField field : fld_lst) {
384        fields.add(field);
385      }
386    } catch (SerDeException e) {
387      throw new RuntimeException(e);
388    }
389    return fields;
390  }
391
392  public StructField getField(String fld) {
393    try {
394      StructObjectInspector structObjectInspector = (StructObjectInspector) getDeserializer()
395          .getObjectInspector();
396      return structObjectInspector.getStructFieldRef(fld);
397    } catch (Exception e) {
398      throw new RuntimeException(e);
399    }
400  }
401
402   @Override
403  public String toString() {
404    return tTable.getTableName();
405  }
406
407  public List<FieldSchema> getPartCols() {
408    List<FieldSchema> partKeys = tTable.getPartitionKeys();
409    if (partKeys == null) {
410      partKeys = new ArrayList<FieldSchema>();
411      tTable.setPartitionKeys(partKeys);
412    }
413    return partKeys;
414  }
415
416  public boolean isPartitionKey(String colName) {
417    for (FieldSchema key : getPartCols()) {
418      if (key.getName().toLowerCase().equals(colName)) {
419        return true;
420      }
421    }
422    return false;
423  }
424
425  // TODO merge this with getBucketCols function
426  public String getBucketingDimensionId() {
427    List<String> bcols = tTable.getSd().getBucketCols();
428    if (bcols == null || bcols.size() == 0) {
429      return null;
430    }
431
432    if (bcols.size() > 1) {
433      LOG.warn(this
434          + " table has more than one dimensions which aren't supported yet");
435    }
436
437    return bcols.get(0);
438  }
439
440  public void setDataLocation(URI uri) {
441    this.uri = uri;
442    tTable.getSd().setLocation(uri.toString());
443  }
444
445  public void unsetDataLocation() {
446    this.uri = null;
447    tTable.getSd().unsetLocation();
448  }
449
450  public void setBucketCols(List<String> bucketCols) throws HiveException {
451    if (bucketCols == null) {
452      return;
453    }
454
455    for (String col : bucketCols) {
456      if (!isField(col)) {
457        throw new HiveException("Bucket columns " + col
458            + " is not part of the table columns (" + getCols() );
459      }
460    }
461    tTable.getSd().setBucketCols(bucketCols);
462  }
463
464  public void setSortCols(List<Order> sortOrder) throws HiveException {
465    tTable.getSd().setSortCols(sortOrder);
466  }
467
468  private boolean isField(String col) {
469    for (FieldSchema field : getCols()) {
470      if (field.getName().equals(col)) {
471        return true;
472      }
473    }
474    return false;
475  }
476
477  public List<FieldSchema> getCols() {
478    boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
479      getSerializationLib());
480    if (!getColsFromSerDe) {
481      return tTable.getSd().getCols();
482    } else {
483      try {
484        return Hive.getFieldsFromDeserializer(getTableName(), getDeserializer());
485      } catch (HiveException e) {
486        LOG.error("Unable to get field from serde: " + getSerializationLib(), e);
487      }
488      return new ArrayList<FieldSchema>();
489    }
490  }
491
492  /**
493   * Returns a list of all the columns of the table (data columns + partition
494   * columns in that order.
495   *
496   * @return List<FieldSchema>
497   */
498  public List<FieldSchema> getAllCols() {
499    ArrayList<FieldSchema> f_list = new ArrayList<FieldSchema>();
500    f_list.addAll(getPartCols());
501    f_list.addAll(getCols());
502    return f_list;
503  }
504
505  public void setPartCols(List<FieldSchema> partCols) {
506    tTable.setPartitionKeys(partCols);
507  }
508
509  public String getDbName() {
510    return tTable.getDbName();
511  }
512
513  public int getNumBuckets() {
514    return tTable.getSd().getNumBuckets();
515  }
516
517  /**
518   * Replaces the directory corresponding to the table by srcf. Works by
519   * deleting the table directory and renaming the source directory.
520   *
521   * @param srcf
522   *          Source directory
523   * @param tmpd
524   *          Temporary directory
525   */
526  protected void replaceFiles(Path srcf) throws HiveException {
527    Path tableDest =  new Path(getDataLocation().getPath());
528    Hive.replaceFiles(srcf, tableDest, tableDest, Hive.get().getConf());
529  }
530
531  /**
532   * Inserts files specified into the partition. Works by moving files
533   *
534   * @param srcf
535   *          Files to be moved. Leaf directories or globbed file paths
536   */
537  protected void copyFiles(Path srcf) throws HiveException {
538    FileSystem fs;
539    try {
540      fs = FileSystem.get(getDataLocation(), Hive.get().getConf());
541      Hive.copyFiles(srcf, new Path(getDataLocation().getPath()), fs);
542    } catch (IOException e) {
543      throw new HiveException("addFiles: filesystem error in check phase", e);
544    }
545  }
546
547  public void setInputFormatClass(String name) throws HiveException {
548    if (name == null) {
549      inputFormatClass = null;
550      tTable.getSd().setInputFormat(null);
551      return;
552    }
553    try {
554      setInputFormatClass((Class<? extends InputFormat<WritableComparable, Writable>>) Class
555          .forName(name, true, JavaUtils.getClassLoader()));
556    } catch (ClassNotFoundException e) {
557      throw new HiveException("Class not found: " + name, e);
558    }
559  }
560
561  public void setOutputFormatClass(String name) throws HiveException {
562    if (name == null) {
563      outputFormatClass = null;
564      tTable.getSd().setOutputFormat(null);
565      return;
566    }
567    try {
568      Class<?> origin = Class.forName(name, true, JavaUtils.getClassLoader());
569      setOutputFormatClass(HiveFileFormatUtils
570          .getOutputFormatSubstitute(origin));
571    } catch (ClassNotFoundException e) {
572      throw new HiveException("Class not found: " + name, e);
573    }
574  }
575
576  public boolean isPartitioned() {
577    if (getPartCols() == null) {
578      return false;
579    }
580    return (getPartCols().size() != 0);
581  }
582
583  public void setFields(List<FieldSchema> fields) {
584    tTable.getSd().setCols(fields);
585  }
586
587  public void setNumBuckets(int nb) {
588    tTable.getSd().setNumBuckets(nb);
589  }
590
591  /**
592   * @return The owner of the table.
593   * @see org.apache.hadoop.hive.metastore.api.Table#getOwner()
594   */
595  public String getOwner() {
596    return tTable.getOwner();
597  }
598
599  /**
600   * @return The table parameters.
601   * @see org.apache.hadoop.hive.metastore.api.Table#getParameters()
602   */
603  public Map<String, String> getParameters() {
604    return tTable.getParameters();
605  }
606
607  /**
608   * @return The retention on the table.
609   * @see org.apache.hadoop.hive.metastore.api.Table#getRetention()
610   */
611  public int getRetention() {
612    return tTable.getRetention();
613  }
614
615  /**
616   * @param owner
617   * @see org.apache.hadoop.hive.metastore.api.Table#setOwner(java.lang.String)
618   */
619  public void setOwner(String owner) {
620    tTable.setOwner(owner);
621  }
622
623  /**
624   * @param retention
625   * @see org.apache.hadoop.hive.metastore.api.Table#setRetention(int)
626   */
627  public void setRetention(int retention) {
628    tTable.setRetention(retention);
629  }
630
631  private SerDeInfo getSerdeInfo() {
632    return tTable.getSd().getSerdeInfo();
633  }
634
635  public void setSerializationLib(String lib) {
636    getSerdeInfo().setSerializationLib(lib);
637  }
638
639  public String getSerializationLib() {
640    return getSerdeInfo().getSerializationLib();
641  }
642
643  public String getSerdeParam(String param) {
644    return getSerdeInfo().getParameters().get(param);
645  }
646
647  public String setSerdeParam(String param, String value) {
648    return getSerdeInfo().getParameters().put(param, value);
649  }
650
651  public List<String> getBucketCols() {
652    return tTable.getSd().getBucketCols();
653  }
654
655  public List<Order> getSortCols() {
656    return tTable.getSd().getSortCols();
657  }
658
659  public void setTableName(String tableName) {
660    tTable.setTableName(tableName);
661  }
662
663  public void setDbName(String databaseName) {
664    tTable.setDbName(databaseName);
665  }
666
667  public List<FieldSchema> getPartitionKeys() {
668    return tTable.getPartitionKeys();
669  }
670
671  /**
672   * @return the original view text, or null if this table is not a view
673   */
674  public String getViewOriginalText() {
675    return tTable.getViewOriginalText();
676  }
677
678  /**
679   * @param viewOriginalText
680   *          the original view text to set
681   */
682  public void setViewOriginalText(String viewOriginalText) {
683    tTable.setViewOriginalText(viewOriginalText);
684  }
685
686  /**
687   * @return the expanded view text, or null if this table is not a view
688   */
689  public String getViewExpandedText() {
690    return tTable.getViewExpandedText();
691  }
692
693  public void clearSerDeInfo() {
694    tTable.getSd().getSerdeInfo().getParameters().clear();
695  }
696  /**
697   * @param viewExpandedText
698   *          the expanded view text to set
699   */
700  public void setViewExpandedText(String viewExpandedText) {
701    tTable.setViewExpandedText(viewExpandedText);
702  }
703
704  /**
705   * @return whether this table is actually a view
706   */
707  public boolean isView() {
708    return TableType.VIRTUAL_VIEW.equals(getTableType());
709  }
710
711  /**
712   * Creates a partition name -> value spec map object
713   *
714   * @param tp
715   *          Use the information from this partition.
716   * @return Partition name to value mapping.
717   */
718  public LinkedHashMap<String, String> createSpec(
719      org.apache.hadoop.hive.metastore.api.Partition tp) {
720
721    List<FieldSchema> fsl = getPartCols();
722    List<String> tpl = tp.getValues();
723    LinkedHashMap<String, String> spec = new LinkedHashMap<String, String>();
724    for (int i = 0; i < fsl.size(); i++) {
725      FieldSchema fs = fsl.get(i);
726      String value = tpl.get(i);
727      spec.put(fs.getName(), value);
728    }
729    return spec;
730  }
731
732  public Table copy() throws HiveException {
733    return new Table(tTable.deepCopy());
734  }
735
736  public void setCreateTime(int createTime) {
737    tTable.setCreateTime(createTime);
738  }
739
740  public int getLastAccessTime() {
741    return tTable.getLastAccessTime();
742  }
743
744  public void setLastAccessTime(int lastAccessTime) {
745    tTable.setLastAccessTime(lastAccessTime);
746  }
747
748  public boolean isNonNative() {
749    return getProperty(
750      org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE)
751      != null;
752  }
753
754  /**
755   * @param protectMode
756   */
757  public void setProtectMode(ProtectMode protectMode){
758    Map<String, String> parameters = tTable.getParameters();
759    parameters.put(ProtectMode.PARAMETER_NAME, protectMode.toString());
760    tTable.setParameters(parameters);
761  }
762
763  /**
764   * @return protect mode
765   */
766  public ProtectMode getProtectMode(){
767    Map<String, String> parameters = tTable.getParameters();
768
769    if (!parameters.containsKey(ProtectMode.PARAMETER_NAME)) {
770      return new ProtectMode();
771    } else {
772      return ProtectMode.getProtectModeFromString(
773          parameters.get(ProtectMode.PARAMETER_NAME));
774    }
775  }
776
777  /**
778   * @return True protect mode indicates the table if offline.
779   */
780  public boolean isOffline(){
781    return getProtectMode().offline;
782  }
783
784  /**
785   * @return True if protect mode attribute of the partition indicate
786   * that it is OK to drop the partition
787   */
788  public boolean canDrop() {
789    ProtectMode mode = getProtectMode();
790    return (!mode.noDrop && !mode.offline && !mode.readOnly);
791  }
792
793  /**
794   * @return True if protect mode attribute of the table indicate
795   * that it is OK to write the table
796   */
797  public boolean canWrite() {
798    ProtectMode mode = getProtectMode();
799    return (!mode.offline && !mode.readOnly);
800  }
801
802  /**
803   * @return include the db name
804   */
805  public String getCompleteName() {
806    return getDbName() + "@" + getTableName();
807  }
808};