PageRenderTime 199ms CodeModel.GetById 19ms app.highlight 157ms RepoModel.GetById 1ms app.codeStats 1ms

/tags/release-0.1-rc2/hive/external/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java

#
Java | 3258 lines | 2454 code | 332 blank | 472 comment | 676 complexity | 834d1c5310bbb8a1f599a85916c3cad4 MD5 | raw file
   1/**
   2 * Licensed to the Apache Software Foundation (ASF) under one
   3 * or more contributor license agreements.  See the NOTICE file
   4 * distributed with this work for additional information
   5 * regarding copyright ownership.  The ASF licenses this file
   6 * to you under the Apache License, Version 2.0 (the
   7 * "License"); you may not use this file except in compliance
   8 * with the License.  You may obtain a copy of the License at
   9 *
  10 *     http://www.apache.org/licenses/LICENSE-2.0
  11 *
  12 * Unless required by applicable law or agreed to in writing, software
  13 * distributed under the License is distributed on an "AS IS" BASIS,
  14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15 * See the License for the specific language governing permissions and
  16 * limitations under the License.
  17 */
  18
  19package org.apache.hadoop.hive.ql.exec;
  20
  21import static org.apache.commons.lang.StringUtils.join;
  22import static org.apache.hadoop.util.StringUtils.stringifyException;
  23
  24import java.io.BufferedWriter;
  25import java.io.DataOutput;
  26import java.io.FileNotFoundException;
  27import java.io.IOException;
  28import java.io.OutputStreamWriter;
  29import java.io.Serializable;
  30import java.io.Writer;
  31import java.net.URI;
  32import java.net.URISyntaxException;
  33import java.util.ArrayList;
  34import java.util.Collections;
  35import java.util.Comparator;
  36import java.util.HashSet;
  37import java.util.Iterator;
  38import java.util.List;
  39import java.util.Map;
  40import java.util.Set;
  41import java.util.SortedSet;
  42import java.util.TreeSet;
  43import java.util.Map.Entry;
  44
  45import org.apache.commons.logging.Log;
  46import org.apache.commons.logging.LogFactory;
  47import org.apache.hadoop.fs.FSDataOutputStream;
  48import org.apache.hadoop.fs.FileStatus;
  49import org.apache.hadoop.fs.FileSystem;
  50import org.apache.hadoop.fs.FsShell;
  51import org.apache.hadoop.fs.Path;
  52import org.apache.hadoop.hive.conf.HiveConf;
  53import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
  54import org.apache.hadoop.hive.metastore.MetaStoreUtils;
  55import org.apache.hadoop.hive.metastore.ProtectMode;
  56import org.apache.hadoop.hive.metastore.TableType;
  57import org.apache.hadoop.hive.metastore.Warehouse;
  58import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
  59import org.apache.hadoop.hive.metastore.api.Database;
  60import org.apache.hadoop.hive.metastore.api.FieldSchema;
  61import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
  62import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
  63import org.apache.hadoop.hive.metastore.api.HiveObjectType;
  64import org.apache.hadoop.hive.metastore.api.Index;
  65import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
  66import org.apache.hadoop.hive.metastore.api.MetaException;
  67import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
  68import org.apache.hadoop.hive.metastore.api.Order;
  69import org.apache.hadoop.hive.metastore.api.PrincipalType;
  70import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
  71import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
  72import org.apache.hadoop.hive.metastore.api.Role;
  73import org.apache.hadoop.hive.ql.Context;
  74import org.apache.hadoop.hive.ql.DriverContext;
  75import org.apache.hadoop.hive.ql.QueryPlan;
  76import org.apache.hadoop.hive.ql.hooks.ReadEntity;
  77import org.apache.hadoop.hive.ql.hooks.WriteEntity;
  78import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
  79import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
  80import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
  81import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
  82import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
  83import org.apache.hadoop.hive.ql.metadata.CheckResult;
  84import org.apache.hadoop.hive.ql.metadata.Hive;
  85import org.apache.hadoop.hive.ql.metadata.HiveException;
  86import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
  87import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
  88import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
  89import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils;
  90import org.apache.hadoop.hive.ql.metadata.Partition;
  91import org.apache.hadoop.hive.ql.metadata.Table;
  92import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
  93import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
  94import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
  95import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
  96import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
  97import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
  98import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
  99import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
 100import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
 101import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
 102import org.apache.hadoop.hive.ql.plan.DDLWork;
 103import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
 104import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
 105import org.apache.hadoop.hive.ql.plan.DescTableDesc;
 106import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
 107import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
 108import org.apache.hadoop.hive.ql.plan.DropTableDesc;
 109import org.apache.hadoop.hive.ql.plan.GrantDesc;
 110import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
 111import org.apache.hadoop.hive.ql.plan.LockTableDesc;
 112import org.apache.hadoop.hive.ql.plan.MsckDesc;
 113import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
 114import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
 115import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
 116import org.apache.hadoop.hive.ql.plan.RevokeDesc;
 117import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 118import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
 119import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
 120import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
 121import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
 122import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
 123import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
 124import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
 125import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
 126import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
 127import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 128import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
 129import org.apache.hadoop.hive.ql.plan.api.StageType;
 130import org.apache.hadoop.hive.ql.security.authorization.Privilege;
 131import org.apache.hadoop.hive.serde.Constants;
 132import org.apache.hadoop.hive.serde2.Deserializer;
 133import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
 134import org.apache.hadoop.hive.serde2.SerDeException;
 135import org.apache.hadoop.hive.serde2.SerDeUtils;
 136import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
 137import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
 138import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 139import org.apache.hadoop.hive.shims.HadoopShims;
 140import org.apache.hadoop.hive.shims.ShimLoader;
 141import org.apache.hadoop.util.ToolRunner;
 142
 143/**
 144 * DDLTask implementation.
 145 *
 146 **/
 147public class DDLTask extends Task<DDLWork> implements Serializable {
 148  private static final long serialVersionUID = 1L;
 149  private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
 150
 151  transient HiveConf conf;
 152  private static final int separator = Utilities.tabCode;
 153  private static final int terminator = Utilities.newLineCode;
 154
 155  // These are suffixes attached to intermediate directory names used in the
 156  // archiving / un-archiving process.
 157  private static String INTERMEDIATE_ARCHIVED_DIR_SUFFIX;
 158  private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX;
 159  private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX;
 160
 161  public DDLTask() {
 162    super();
 163  }
 164
 165  @Override
 166  public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
 167    super.initialize(conf, queryPlan, ctx);
 168    this.conf = conf;
 169
 170    INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
 171      HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
 172    INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
 173      HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL);
 174    INTERMEDIATE_EXTRACTED_DIR_SUFFIX =
 175      HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED);
 176  }
 177
 178  @Override
 179  public int execute(DriverContext driverContext) {
 180
 181    // Create the db
 182    Hive db;
 183    try {
 184      db = Hive.get(conf);
 185
 186      CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
 187      if (null != createDatabaseDesc) {
 188        return createDatabase(db, createDatabaseDesc);
 189      }
 190
 191      DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
 192      if (dropDatabaseDesc != null) {
 193        return dropDatabase(db, dropDatabaseDesc);
 194      }
 195
 196      SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
 197      if (switchDatabaseDesc != null) {
 198        return switchDatabase(db, switchDatabaseDesc);
 199      }
 200
 201      DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
 202      if (descDatabaseDesc != null) {
 203        return descDatabase(descDatabaseDesc);
 204      }
 205
 206      AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
 207      if (alterDatabaseDesc != null) {
 208        return alterDatabase(alterDatabaseDesc);
 209      }
 210
 211      CreateTableDesc crtTbl = work.getCreateTblDesc();
 212      if (crtTbl != null) {
 213        return createTable(db, crtTbl);
 214      }
 215
 216      CreateIndexDesc crtIndex = work.getCreateIndexDesc();
 217      if (crtIndex != null) {
 218        return createIndex(db, crtIndex);
 219      }
 220
 221      AlterIndexDesc alterIndex = work.getAlterIndexDesc();
 222      if (alterIndex != null) {
 223        return alterIndex(db, alterIndex);
 224      }
 225
 226      DropIndexDesc dropIdx = work.getDropIdxDesc();
 227      if (dropIdx != null) {
 228        return dropIndex(db, dropIdx);
 229      }
 230
 231      CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
 232      if (crtTblLike != null) {
 233        return createTableLike(db, crtTblLike);
 234      }
 235
 236      DropTableDesc dropTbl = work.getDropTblDesc();
 237      if (dropTbl != null) {
 238        return dropTable(db, dropTbl);
 239      }
 240
 241      AlterTableDesc alterTbl = work.getAlterTblDesc();
 242      if (alterTbl != null) {
 243        return alterTable(db, alterTbl);
 244      }
 245
 246      CreateViewDesc crtView = work.getCreateViewDesc();
 247      if (crtView != null) {
 248        return createView(db, crtView);
 249      }
 250
 251      AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
 252      if (addPartitionDesc != null) {
 253        return addPartition(db, addPartitionDesc);
 254      }
 255
 256      AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
 257      if (simpleDesc != null) {
 258        if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
 259          return touch(db, simpleDesc);
 260        } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
 261          return archive(db, simpleDesc, driverContext);
 262        } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
 263          return unarchive(db, simpleDesc);
 264        }
 265      }
 266
 267      MsckDesc msckDesc = work.getMsckDesc();
 268      if (msckDesc != null) {
 269        return msck(db, msckDesc);
 270      }
 271
 272      DescTableDesc descTbl = work.getDescTblDesc();
 273      if (descTbl != null) {
 274        return describeTable(db, descTbl);
 275      }
 276
 277      DescFunctionDesc descFunc = work.getDescFunctionDesc();
 278      if (descFunc != null) {
 279        return describeFunction(descFunc);
 280      }
 281
 282      ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
 283      if (showDatabases != null) {
 284        return showDatabases(db, showDatabases);
 285      }
 286
 287      ShowTablesDesc showTbls = work.getShowTblsDesc();
 288      if (showTbls != null) {
 289        return showTables(db, showTbls);
 290      }
 291
 292      ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
 293      if (showTblStatus != null) {
 294        return showTableStatus(db, showTblStatus);
 295      }
 296
 297      ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
 298      if (showFuncs != null) {
 299        return showFunctions(showFuncs);
 300      }
 301
 302      ShowLocksDesc showLocks = work.getShowLocksDesc();
 303      if (showLocks != null) {
 304        return showLocks(showLocks);
 305      }
 306
 307      LockTableDesc lockTbl = work.getLockTblDesc();
 308      if (lockTbl != null) {
 309        return lockTable(lockTbl);
 310      }
 311
 312      UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
 313      if (unlockTbl != null) {
 314        return unlockTable(unlockTbl);
 315      }
 316
 317      ShowPartitionsDesc showParts = work.getShowPartsDesc();
 318      if (showParts != null) {
 319        return showPartitions(db, showParts);
 320      }
 321
 322      RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
 323      if (roleDDLDesc != null) {
 324        return roleDDL(roleDDLDesc);
 325      }
 326
 327      GrantDesc grantDesc = work.getGrantDesc();
 328      if (grantDesc != null) {
 329        return grantOrRevokePrivileges(grantDesc.getPrincipals(), grantDesc
 330            .getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
 331      }
 332
 333      RevokeDesc revokeDesc = work.getRevokeDesc();
 334      if (revokeDesc != null) {
 335        return grantOrRevokePrivileges(revokeDesc.getPrincipals(), revokeDesc
 336            .getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, false, false);
 337      }
 338
 339      ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
 340      if (showGrantDesc != null) {
 341        return showGrants(showGrantDesc);
 342      }
 343
 344      GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
 345      if (grantOrRevokeRoleDDL != null) {
 346        return grantOrRevokeRole(grantOrRevokeRoleDDL);
 347      }
 348
 349      ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
 350      if (showIndexes != null) {
 351        return showIndexes(db, showIndexes);
 352      }
 353
 354    } catch (InvalidTableException e) {
 355      console.printError("Table " + e.getTableName() + " does not exist");
 356      LOG.debug(stringifyException(e));
 357      return 1;
 358    } catch (HiveException e) {
 359      console.printError("FAILED: Error in metadata: " + e.getMessage(), "\n"
 360          + stringifyException(e));
 361      LOG.debug(stringifyException(e));
 362      return 1;
 363    } catch (Exception e) {
 364      console.printError("Failed with exception " + e.getMessage(), "\n"
 365          + stringifyException(e));
 366      return (1);
 367    }
 368    assert false;
 369    return 0;
 370  }
 371
 372  private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL)
 373      throws HiveException {
 374    try {
 375      boolean grantRole = grantOrRevokeRoleDDL.getGrant();
 376      List<PrincipalDesc> principals = grantOrRevokeRoleDDL.getPrincipalDesc();
 377      List<String> roles = grantOrRevokeRoleDDL.getRoles();
 378      for (PrincipalDesc principal : principals) {
 379        String userName = principal.getName();
 380        for (String roleName : roles) {
 381          if (grantRole) {
 382            db.grantRole(roleName, userName, principal.getType(),
 383                grantOrRevokeRoleDDL.getGrantor(), grantOrRevokeRoleDDL
 384                    .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption());
 385          } else {
 386            db.revokeRole(roleName, userName, principal.getType());
 387          }
 388        }
 389      }
 390    } catch (Exception e) {
 391      throw new HiveException(e);
 392    }
 393    return 0;
 394  }
 395
 396  private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException {
 397    try {
 398      Path resFile = new Path(showGrantDesc.getResFile());
 399      FileSystem fs = resFile.getFileSystem(conf);
 400      DataOutput outStream = fs.create(resFile);
 401      PrincipalDesc principalDesc = showGrantDesc.getPrincipalDesc();
 402      PrivilegeObjectDesc hiveObjectDesc = showGrantDesc.getHiveObj();
 403      String principalName = principalDesc.getName();
 404      if (hiveObjectDesc == null) {
 405        List<HiveObjectPrivilege> users = db.showPrivilegeGrant(
 406            HiveObjectType.GLOBAL, principalName, principalDesc.getType(),
 407            null, null, null, null);
 408        if (users != null && users.size() > 0) {
 409          boolean first = true;
 410          for (HiveObjectPrivilege usr : users) {
 411            if (!first) {
 412              outStream.write(terminator);
 413            } else {
 414              first = false;
 415            }
 416
 417            writeGrantInfo(outStream, principalDesc.getType(), principalName,
 418                null, null, null, null, usr.getGrantInfo());
 419
 420          }
 421        }
 422      } else {
 423        String obj = hiveObjectDesc.getObject();
 424        boolean notFound = true;
 425        String dbName = null;
 426        String tableName = null;
 427        Table tableObj = null;
 428        Database dbObj = null;
 429
 430        if (hiveObjectDesc.getTable()) {
 431          String[] dbTab = obj.split("\\.");
 432          if (dbTab.length == 2) {
 433            dbName = dbTab[0];
 434            tableName = dbTab[1];
 435          } else {
 436            dbName = db.getCurrentDatabase();
 437            tableName = obj;
 438          }
 439          dbObj = db.getDatabase(dbName);
 440          tableObj = db.getTable(dbName, tableName);
 441          notFound = (dbObj == null || tableObj == null);
 442        } else {
 443          dbName = hiveObjectDesc.getObject();
 444          dbObj = db.getDatabase(dbName);
 445          notFound = (dbObj == null);
 446        }
 447        if (notFound) {
 448          throw new HiveException(obj + " can not be found");
 449        }
 450
 451        String partName = null;
 452        List<String> partValues = null;
 453        if (hiveObjectDesc.getPartSpec() != null) {
 454          partName = Warehouse
 455              .makePartName(hiveObjectDesc.getPartSpec(), false);
 456          partValues = Warehouse.getPartValuesFromPartName(partName);
 457        }
 458
 459        if (!hiveObjectDesc.getTable()) {
 460          // show database level privileges
 461          List<HiveObjectPrivilege> dbs = db.showPrivilegeGrant(HiveObjectType.DATABASE, principalName,
 462              principalDesc.getType(), dbName, null, null, null);
 463          if (dbs != null && dbs.size() > 0) {
 464            boolean first = true;
 465            for (HiveObjectPrivilege db : dbs) {
 466              if (!first) {
 467                outStream.write(terminator);
 468              } else {
 469                first = false;
 470              }
 471
 472              writeGrantInfo(outStream, principalDesc.getType(), principalName,
 473                  dbName, null, null, null, db.getGrantInfo());
 474
 475            }
 476          }
 477
 478        } else {
 479          if (showGrantDesc.getColumns() != null) {
 480            // show column level privileges
 481            for (String columnName : showGrantDesc.getColumns()) {
 482              List<HiveObjectPrivilege> columnss = db.showPrivilegeGrant(
 483                  HiveObjectType.COLUMN, principalName,
 484                  principalDesc.getType(), dbName, tableName, partValues,
 485                  columnName);
 486              if (columnss != null && columnss.size() > 0) {
 487                boolean first = true;
 488                for (HiveObjectPrivilege col : columnss) {
 489                  if (!first) {
 490                    outStream.write(terminator);
 491                  } else {
 492                    first = false;
 493                  }
 494
 495                  writeGrantInfo(outStream, principalDesc.getType(),
 496                      principalName, dbName, tableName, partName, columnName,
 497                      col.getGrantInfo());
 498                }
 499              }
 500            }
 501          } else if (hiveObjectDesc.getPartSpec() != null) {
 502            // show partition level privileges
 503            List<HiveObjectPrivilege> parts = db.showPrivilegeGrant(
 504                HiveObjectType.PARTITION, principalName, principalDesc
 505                    .getType(), dbName, tableName, partValues, null);
 506            if (parts != null && parts.size() > 0) {
 507              boolean first = true;
 508              for (HiveObjectPrivilege part : parts) {
 509                if (!first) {
 510                  outStream.write(terminator);
 511                } else {
 512                  first = false;
 513                }
 514
 515                writeGrantInfo(outStream, principalDesc.getType(),
 516                    principalName, dbName, tableName, partName, null, part.getGrantInfo());
 517
 518              }
 519            }
 520          } else {
 521            // show table level privileges
 522            List<HiveObjectPrivilege> tbls = db.showPrivilegeGrant(
 523                HiveObjectType.TABLE, principalName, principalDesc.getType(),
 524                dbName, tableName, null, null);
 525            if (tbls != null && tbls.size() > 0) {
 526              boolean first = true;
 527              for (HiveObjectPrivilege tbl : tbls) {
 528                if (!first) {
 529                  outStream.write(terminator);
 530                } else {
 531                  first = false;
 532                }
 533
 534                writeGrantInfo(outStream, principalDesc.getType(),
 535                    principalName, dbName, tableName, null, null, tbl.getGrantInfo());
 536
 537              }
 538            }
 539          }
 540        }
 541      }
 542      ((FSDataOutputStream) outStream).close();
 543    } catch (FileNotFoundException e) {
 544      LOG.info("show table status: " + stringifyException(e));
 545      return 1;
 546    } catch (IOException e) {
 547      LOG.info("show table status: " + stringifyException(e));
 548      return 1;
 549    } catch (Exception e) {
 550      e.printStackTrace();
 551      throw new HiveException(e);
 552    }
 553    return 0;
 554  }
 555
 556  private int grantOrRevokePrivileges(List<PrincipalDesc> principals,
 557      List<PrivilegeDesc> privileges, PrivilegeObjectDesc privSubjectDesc,
 558      String grantor, PrincipalType grantorType, boolean grantOption, boolean isGrant) {
 559    if (privileges == null || privileges.size() == 0) {
 560      console.printError("No privilege found.");
 561      return 1;
 562    }
 563
 564    String dbName = null;
 565    String tableName = null;
 566    Table tableObj = null;
 567    Database dbObj = null;
 568
 569    try {
 570
 571      if (privSubjectDesc != null) {
 572        if (privSubjectDesc.getPartSpec() != null && isGrant) {
 573          throw new HiveException("Grant does not support partition level.");
 574        }
 575        String obj = privSubjectDesc.getObject();
 576        boolean notFound = true;
 577        if (privSubjectDesc.getTable()) {
 578          String[] dbTab = obj.split("\\.");
 579          if (dbTab.length == 2) {
 580            dbName = dbTab[0];
 581            tableName = dbTab[1];
 582          } else {
 583            dbName = db.getCurrentDatabase();
 584            tableName = obj;
 585          }
 586          dbObj = db.getDatabase(dbName);
 587          tableObj = db.getTable(dbName, tableName);
 588          notFound = (dbObj == null || tableObj == null);
 589        } else {
 590          dbName = privSubjectDesc.getObject();
 591          dbObj = db.getDatabase(dbName);
 592          notFound = (dbObj == null);
 593        }
 594        if (notFound) {
 595          throw new HiveException(obj + " can not be found");
 596        }
 597      }
 598
 599      PrivilegeBag privBag = new PrivilegeBag();
 600      if (privSubjectDesc == null) {
 601        for (int idx = 0; idx < privileges.size(); idx++) {
 602          Privilege priv = privileges.get(idx).getPrivilege();
 603          if (privileges.get(idx).getColumns() != null
 604              && privileges.get(idx).getColumns().size() > 0) {
 605            throw new HiveException(
 606                "For user-level privileges, column sets should be null. columns="
 607                    + privileges.get(idx).getColumns().toString());
 608          }
 609
 610          privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef(
 611              HiveObjectType.GLOBAL, null, null, null, null), null, null,
 612              new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType,
 613                  grantOption)));
 614        }
 615      } else {
 616        org.apache.hadoop.hive.metastore.api.Partition partObj = null;
 617        List<String> partValues = null;
 618        if (tableObj != null) {
 619          if ((!tableObj.isPartitioned())
 620              && privSubjectDesc.getPartSpec() != null) {
 621            throw new HiveException(
 622                "Table is not partitioned, but partition name is present: partSpec="
 623                    + privSubjectDesc.getPartSpec().toString());
 624          }
 625
 626          if (privSubjectDesc.getPartSpec() != null) {
 627            partObj = db.getPartition(tableObj, privSubjectDesc.getPartSpec(),
 628                false).getTPartition();
 629            partValues = partObj.getValues();
 630          }
 631        }
 632
 633        for (PrivilegeDesc privDesc : privileges) {
 634          List<String> columns = privDesc.getColumns();
 635          Privilege priv = privDesc.getPrivilege();
 636          if (columns != null && columns.size() > 0) {
 637            if (!priv.supportColumnLevel()) {
 638              throw new HiveException(priv.toString()
 639                  + " does not support column level.");
 640            }
 641            if (privSubjectDesc == null || tableName == null) {
 642              throw new HiveException(
 643                  "For user-level/database-level privileges, column sets should be null. columns="
 644                      + columns);
 645            }
 646            for (int i = 0; i < columns.size(); i++) {
 647              privBag.addToPrivileges(new HiveObjectPrivilege(
 648                  new HiveObjectRef(HiveObjectType.COLUMN, dbName, tableName,
 649                      partValues, columns.get(i)), null, null,  new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
 650            }
 651          } else {
 652            if (privSubjectDesc.getTable()) {
 653              if (privSubjectDesc.getPartSpec() != null) {
 654                privBag.addToPrivileges(new HiveObjectPrivilege(
 655                    new HiveObjectRef(HiveObjectType.PARTITION, dbName,
 656                        tableName, partValues, null), null, null,  new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
 657              } else {
 658                privBag
 659                    .addToPrivileges(new HiveObjectPrivilege(
 660                        new HiveObjectRef(HiveObjectType.TABLE, dbName,
 661                            tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
 662              }
 663            } else {
 664              privBag.addToPrivileges(new HiveObjectPrivilege(
 665                  new HiveObjectRef(HiveObjectType.DATABASE, dbName, null,
 666                      null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
 667            }
 668          }
 669        }
 670      }
 671
 672      for (PrincipalDesc principal : principals) {
 673        for (int i = 0; i < privBag.getPrivileges().size(); i++) {
 674          HiveObjectPrivilege objPrivs = privBag.getPrivileges().get(i);
 675          objPrivs.setPrincipalName(principal.getName());
 676          objPrivs.setPrincipalType(principal.getType());
 677        }
 678        if (isGrant) {
 679          db.grantPrivileges(privBag);
 680        } else {
 681          db.revokePrivileges(privBag);
 682        }
 683      }
 684    } catch (Exception e) {
 685      console.printError("Error: " + e.getMessage());
 686      return 1;
 687    }
 688
 689    return 0;
 690  }
 691
 692  private int roleDDL(RoleDDLDesc roleDDLDesc) {
 693    RoleDDLDesc.RoleOperation operation = roleDDLDesc.getOperation();
 694    try {
 695      if (operation.equals(RoleDDLDesc.RoleOperation.CREATE_ROLE)) {
 696        db.createRole(roleDDLDesc.getName(), roleDDLDesc.getRoleOwnerName());
 697      } else if (operation.equals(RoleDDLDesc.RoleOperation.DROP_ROLE)) {
 698        db.dropRole(roleDDLDesc.getName());
 699      } else if (operation.equals(RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT)) {
 700        List<Role> roles = db.showRoleGrant(roleDDLDesc.getName(), roleDDLDesc
 701            .getPrincipalType());
 702        if (roles != null && roles.size() > 0) {
 703          Path resFile = new Path(roleDDLDesc.getResFile());
 704          FileSystem fs = resFile.getFileSystem(conf);
 705          DataOutput outStream = fs.create(resFile);
 706          for (Role role : roles) {
 707            outStream.writeBytes("role name:" + role.getRoleName());
 708            outStream.write(terminator);
 709          }
 710          ((FSDataOutputStream) outStream).close();
 711        }
 712      } else {
 713        throw new HiveException("Unkown role operation "
 714            + operation.getOperationName());
 715      }
 716    } catch (HiveException e) {
 717      console.printError("Error in role operation "
 718          + operation.getOperationName() + " on role name "
 719          + roleDDLDesc.getName() + ", error message " + e.getMessage());
 720      return 1;
 721    } catch (IOException e) {
 722      LOG.info("role ddl exception: " + stringifyException(e));
 723      return 1;
 724    }
 725
 726    return 0;
 727  }
 728
 729  private int alterDatabase(AlterDatabaseDesc alterDbDesc) throws HiveException {
 730
 731    String dbName = alterDbDesc.getDatabaseName();
 732    Database database = db.getDatabase(dbName);
 733    Map<String, String> newParams = alterDbDesc.getDatabaseProperties();
 734
 735    if (database != null) {
 736      Map<String, String> params = database.getParameters();
 737      // if both old and new params are not null, merge them
 738      if (params != null && newParams != null) {
 739        params.putAll(newParams);
 740        database.setParameters(params);
 741      } else { // if one of them is null, replace the old params with the new one
 742        database.setParameters(newParams);
 743      }
 744      db.alterDatabase(database.getName(), database);
 745    } else {
 746      throw new HiveException("ERROR: The database " + dbName + " does not exist.");
 747    }
 748    return 0;
 749  }
 750
 751  private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException {
 752    db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(),
 753        dropIdx.getIndexName(), true);
 754    return 0;
 755  }
 756
 757  private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException {
 758
 759    if( crtIndex.getSerde() != null) {
 760      validateSerDe(crtIndex.getSerde());
 761    }
 762
 763    db
 764        .createIndex(
 765        crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(),
 766        crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(),
 767        crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(),
 768        crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(),
 769        crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(),
 770        crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment()
 771        );
 772    return 0;
 773  }
 774
 775  private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
 776    String dbName = alterIndex.getDbName();
 777    String baseTableName = alterIndex.getBaseTableName();
 778    String indexName = alterIndex.getIndexName();
 779    Index idx = db.getIndex(dbName, baseTableName, indexName);
 780
 781    if (alterIndex.getOp() == AlterIndexDesc.AlterIndexTypes.ADDPROPS) {
 782      idx.getParameters().putAll(alterIndex.getProps());
 783    } else {
 784      console.printError("Unsupported Alter commnad");
 785      return 1;
 786    }
 787
 788    // set last modified by properties
 789    if (!updateModifiedParameters(idx.getParameters(), conf)) {
 790      return 1;
 791    }
 792
 793    try {
 794      db.alterIndex(dbName, baseTableName, indexName, idx);
 795    } catch (InvalidOperationException e) {
 796      console.printError("Invalid alter operation: " + e.getMessage());
 797      LOG.info("alter index: " + stringifyException(e));
 798      return 1;
 799    } catch (HiveException e) {
 800      console.printError("Invalid alter operation: " + e.getMessage());
 801      return 1;
 802    }
 803    return 0;
 804  }
 805
 806  /**
 807   * Add a partition to a table.
 808   *
 809   * @param db
 810   *          Database to add the partition to.
 811   * @param addPartitionDesc
 812   *          Add this partition.
 813   * @return Returns 0 when execution succeeds and above 0 if it fails.
 814   * @throws HiveException
 815   */
 816  /**
 817   * Add a partition to a table.
 818   *
 819   * @param db
 820   *          Database to add the partition to.
 821   * @param addPartitionDesc
 822   *          Add this partition.
 823   * @return Returns 0 when execution succeeds and above 0 if it fails.
 824   * @throws HiveException
 825   */
 826  private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
 827
 828    Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
 829
 830    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION);
 831
 832    // If the add partition was created with IF NOT EXISTS, then we should
 833    // not throw an error if the specified part does exist.
 834    Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false);
 835    if (checkPart != null && addPartitionDesc.getIfNotExists()) {
 836      return 0;
 837    }
 838
 839    if (addPartitionDesc.getLocation() == null) {
 840      db.createPartition(tbl, addPartitionDesc.getPartSpec());
 841    } else {
 842      // set partition path relative to table
 843      db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl
 844          .getPath(), addPartitionDesc.getLocation()));
 845    }
 846
 847    Partition part = db
 848        .getPartition(tbl, addPartitionDesc.getPartSpec(), false);
 849    work.getOutputs().add(new WriteEntity(part));
 850
 851    return 0;
 852  }
 853
 854  /**
 855   * Rewrite the partition's metadata and force the pre/post execute hooks to
 856   * be fired.
 857   *
 858   * @param db
 859   * @param touchDesc
 860   * @return
 861   * @throws HiveException
 862   */
 863  private int touch(Hive db, AlterTableSimpleDesc touchDesc)
 864      throws HiveException {
 865
 866    String dbName = touchDesc.getDbName();
 867    String tblName = touchDesc.getTableName();
 868
 869    Table tbl = db.getTable(dbName, tblName);
 870
 871    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.TOUCH);
 872
 873    if (touchDesc.getPartSpec() == null) {
 874      try {
 875        db.alterTable(tblName, tbl);
 876      } catch (InvalidOperationException e) {
 877        throw new HiveException("Uable to update table");
 878      }
 879      work.getInputs().add(new ReadEntity(tbl));
 880      work.getOutputs().add(new WriteEntity(tbl));
 881    } else {
 882      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
 883      if (part == null) {
 884        throw new HiveException("Specified partition does not exist");
 885      }
 886      try {
 887        db.alterPartition(tblName, part);
 888      } catch (InvalidOperationException e) {
 889        throw new HiveException(e);
 890      }
 891      work.getInputs().add(new ReadEntity(part));
 892      work.getOutputs().add(new WriteEntity(part));
 893    }
 894    return 0;
 895  }
 896
 897  /**
 898   * Determines whether a partition has been archived
 899   *
 900   * @param p
 901   * @return
 902   */
 903
 904  private boolean isArchived(Partition p) {
 905    Map<String, String> params = p.getParameters();
 906    if ("true".equalsIgnoreCase(params.get(
 907        org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED))) {
 908      return true;
 909    } else {
 910      return false;
 911    }
 912  }
 913
 914  private void setIsArchived(Partition p, boolean state) {
 915    Map<String, String> params = p.getParameters();
 916    if (state) {
 917      params.put(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED,
 918          "true");
 919    } else {
 920      params.remove(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED);
 921    }
 922  }
 923
 924  private String getOriginalLocation(Partition p) {
 925    Map<String, String> params = p.getParameters();
 926    return params.get(
 927        org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION);
 928  }
 929
 930  private void setOriginalLocation(Partition p, String loc) {
 931    Map<String, String> params = p.getParameters();
 932    if (loc == null) {
 933      params.remove(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION);
 934    } else {
 935      params.put(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION, loc);
 936    }
 937  }
 938
 939  // Returns only the path component of the URI
 940  private String getArchiveDirOnly(Path parentDir, String archiveName) {
 941    URI parentUri = parentDir.toUri();
 942    Path harDir = new Path(parentUri.getPath(), archiveName);
 943    return harDir.toString();
 944  }
 945
 946  /**
 947   * Sets the appropriate attributes in the supplied Partition object to mark
 948   * it as archived. Note that the metastore is not touched - a separate
 949   * call to alter_partition is needed.
 950   *
 951   * @param p - the partition object to modify
 952   * @param parentDir - the parent directory of the archive, which is the
 953   * original directory that the partition's files resided in
 954   * @param dirInArchive - the directory within the archive file that contains
 955   * the partitions files
 956   * @param archiveName - the name of the archive
 957   * @throws URISyntaxException
 958   */
 959  private void setArchived(Partition p, Path parentDir, String dirInArchive, String archiveName)
 960      throws URISyntaxException {
 961    assert(isArchived(p) == false);
 962    Map<String, String> params = p.getParameters();
 963
 964    URI parentUri = parentDir.toUri();
 965    String parentHost = parentUri.getHost();
 966    String harHost = null;
 967    if (parentHost == null) {
 968     harHost = "";
 969    } else {
 970      harHost = parentUri.getScheme() + "-" + parentHost;
 971    }
 972
 973    // harUri is used to access the partition's files, which are in the archive
 974    // The format of the RI is something like:
 975    // har://underlyingfsscheme-host:port/archivepath
 976    URI harUri = null;
 977    if (dirInArchive.length() == 0) {
 978      harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(),
 979          getArchiveDirOnly(parentDir, archiveName),
 980          parentUri.getQuery(), parentUri.getFragment());
 981    } else {
 982      harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(),
 983          new Path(getArchiveDirOnly(parentDir, archiveName), dirInArchive).toUri().getPath(),
 984          parentUri.getQuery(), parentUri.getFragment());
 985    }
 986    setIsArchived(p, true);
 987    setOriginalLocation(p, parentDir.toString());
 988    p.setLocation(harUri.toString());
 989  }
 990
 991  /**
 992   * Sets the appropriate attributes in the supplied Partition object to mark
 993   * it as not archived. Note that the metastore is not touched - a separate
 994   * call to alter_partition is needed.
 995   *
 996   * @param p - the partition to modify
 997   */
 998  private void setUnArchived(Partition p) {
 999    assert(isArchived(p) == true);
1000    String parentDir = getOriginalLocation(p);
1001    setIsArchived(p, false);
1002    setOriginalLocation(p, null);
1003    assert(parentDir != null);
1004    p.setLocation(parentDir);
1005  }
1006
1007  private boolean pathExists(Path p) throws HiveException {
1008    try {
1009      FileSystem fs = p.getFileSystem(conf);
1010      return fs.exists(p);
1011    } catch (IOException e) {
1012      throw new HiveException(e);
1013    }
1014  }
1015
1016  private void moveDir(FileSystem fs, Path from, Path to) throws HiveException {
1017    try {
1018      if (!fs.rename(from, to)) {
1019        throw new HiveException("Moving " + from + " to " + to + " failed!");
1020      }
1021    } catch (IOException e) {
1022      throw new HiveException(e);
1023    }
1024  }
1025
1026  private void deleteDir(Path dir) throws HiveException {
1027    try {
1028      Warehouse wh = new Warehouse(conf);
1029      wh.deleteDir(dir, true);
1030    } catch (MetaException e) {
1031      throw new HiveException(e);
1032    }
1033  }
1034
1035  private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext)
1036      throws HiveException {
1037    String dbName = simpleDesc.getDbName();
1038    String tblName = simpleDesc.getTableName();
1039
1040    Table tbl = db.getTable(dbName, tblName);
1041    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ARCHIVE);
1042
1043    Map<String, String> partSpec = simpleDesc.getPartSpec();
1044    Partition p = db.getPartition(tbl, partSpec, false);
1045
1046    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
1047      throw new HiveException("ARCHIVE can only be performed on managed tables");
1048    }
1049
1050    if (p == null) {
1051      throw new HiveException("Specified partition does not exist");
1052    }
1053
1054    if (isArchived(p)) {
1055      // If there were a failure right after the metadata was updated in an
1056      // archiving operation, it's possible that the original, unarchived files
1057      // weren't deleted.
1058      Path originalDir = new Path(getOriginalLocation(p));
1059      Path leftOverIntermediateOriginal = new Path(originalDir.getParent(),
1060          originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
1061
1062      if (pathExists(leftOverIntermediateOriginal)) {
1063        console.printInfo("Deleting " + leftOverIntermediateOriginal +
1064        " left over from a previous archiving operation");
1065        deleteDir(leftOverIntermediateOriginal);
1066      }
1067
1068      throw new HiveException("Specified partition is already archived");
1069    }
1070
1071    Path originalDir = p.getPartitionPath();
1072    Path intermediateArchivedDir = new Path(originalDir.getParent(),
1073        originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
1074    Path intermediateOriginalDir = new Path(originalDir.getParent(),
1075        originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
1076    String archiveName = "data.har";
1077    FileSystem fs = null;
1078    try {
1079      fs = originalDir.getFileSystem(conf);
1080    } catch (IOException e) {
1081      throw new HiveException(e);
1082    }
1083
1084    // The following steps seem roundabout, but they are meant to aid in
1085    // recovery if a failure occurs and to keep a consistent state in the FS
1086
1087    // Steps:
1088    // 1. Create the archive in a temporary folder
1089    // 2. Move the archive dir to an intermediate dir that is in at the same
1090    //    dir as the original partition dir. Call the new dir
1091    //    intermediate-archive.
1092    // 3. Rename the original partition dir to an intermediate dir. Call the
1093    //    renamed dir intermediate-original
1094    // 4. Rename intermediate-archive to the original partition dir
1095    // 5. Change the metadata
1096    // 6. Delete the original partition files in intermediate-original
1097
1098    // The original partition files are deleted after the metadata change
1099    // because the presence of those files are used to indicate whether
1100    // the original partition directory contains archived or unarchived files.
1101
1102    // Create an archived version of the partition in a directory ending in
1103    // ARCHIVE_INTERMEDIATE_DIR_SUFFIX that's the same level as the partition,
1104    // if it does not already exist. If it does exist, we assume the dir is good
1105    // to use as the move operation that created it is atomic.
1106    if (!pathExists(intermediateArchivedDir) &&
1107        !pathExists(intermediateOriginalDir)) {
1108
1109      // First create the archive in a tmp dir so that if the job fails, the
1110      // bad files don't pollute the filesystem
1111      Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI(originalDir.toUri()), "partlevel");
1112
1113      console.printInfo("Creating " + archiveName + " for " + originalDir.toString());
1114      console.printInfo("in " + tmpDir);
1115      console.printInfo("Please wait... (this may take a while)");
1116
1117      // Create the Hadoop archive
1118      HadoopShims shim = ShimLoader.getHadoopShims();
1119      int ret=0;
1120      try {
1121        ret = shim.createHadoopArchive(conf, originalDir, tmpDir, archiveName);
1122      } catch (Exception e) {
1123        throw new HiveException(e);
1124      }
1125      if (ret != 0) {
1126        throw new HiveException("Error while creating HAR");
1127      }
1128      // Move from the tmp dir to an intermediate directory, in the same level as
1129      // the partition directory. e.g. .../hr=12-intermediate-archived
1130      try {
1131        console.printInfo("Moving " + tmpDir + " to " + intermediateArchivedDir);
1132        if (pathExists(intermediateArchivedDir)) {
1133          throw new HiveException("The intermediate archive directory already exists.");
1134        }
1135        fs.rename(tmpDir, intermediateArchivedDir);
1136      } catch (IOException e) {
1137        throw new HiveException("Error while moving tmp directory");
1138      }
1139    } else {
1140      if (pathExists(intermediateArchivedDir)) {
1141        console.printInfo("Intermediate archive directory " + intermediateArchivedDir +
1142        " already exists. Assuming it contains an archived version of the partition");
1143      }
1144    }
1145
1146    // If we get to here, we know that we've archived the partition files, but
1147    // they may be in the original partition location, or in the intermediate
1148    // original dir.
1149
1150    // Move the original parent directory to the intermediate original directory
1151    // if the move hasn't been made already
1152    if (!pathExists(intermediateOriginalDir)) {
1153      console.printInfo("Moving " + originalDir + " to " +
1154          intermediateOriginalDir);
1155      moveDir(fs, originalDir, intermediateOriginalDir);
1156    } else {
1157      console.printInfo(intermediateOriginalDir + " already exists. " +
1158          "Assuming it contains the original files in the partition");
1159    }
1160
1161    // If there's a failure from here to when the metadata is updated,
1162    // there will be no data in the partition, or an error while trying to read
1163    // the partition (if the archive files have been moved to the original
1164    // partition directory.) But re-running the archive command will allow
1165    // recovery
1166
1167    // Move the intermediate archived directory to the original parent directory
1168    if (!pathExists(originalDir)) {
1169      console.printInfo("Moving " + intermediateArchivedDir + " to " +
1170          originalDir);
1171      moveDir(fs, intermediateArchivedDir, originalDir);
1172    } else {
1173      console.printInfo(originalDir + " already exists. " +
1174          "Assuming it contains the archived version of the partition");
1175    }
1176
1177    // Record this change in the metastore
1178    try {
1179      boolean parentSettable =
1180        conf.getBoolVar(HiveConf.ConfVars.HIVEHARPARENTDIRSETTABLE);
1181
1182      // dirInArchive is the directory within the archive that has all the files
1183      // for this partition. With older versions of Hadoop, archiving a
1184      // a directory would produce the same directory structure
1185      // in the archive. So if you created myArchive.har of /tmp/myDir, the
1186      // files in /tmp/myDir would be located under myArchive.har/tmp/myDir/*
1187      // In this case, dirInArchive should be tmp/myDir
1188
1189      // With newer versions of Hadoop, the parent directory could be specified.
1190      // Assuming the parent directory was set to /tmp/myDir when creating the
1191      // archive, the files can be found under myArchive.har/*
1192      // In this case, dirInArchive should be empty
1193
1194      String dirInArchive = "";
1195      if (!parentSettable) {
1196        dirInArchive = originalDir.toUri().getPath();
1197        if(dirInArchive.length() > 1 && dirInArchive.charAt(0)=='/') {
1198          dirInArchive = dirInArchive.substring(1);
1199        }
1200      }
1201      setArchived(p, originalDir, dirInArchive, archiveName);
1202      db.alterPartition(tblName, p);
1203    } catch (Exception e) {
1204      throw new HiveException("Unable to change the partition info for HAR", e);
1205    }
1206
1207    // If a failure occurs here, the directory containing the original files
1208    // will not be deleted. The user will run ARCHIVE again to clear this up
1209    deleteDir(intermediateOriginalDir);
1210
1211
1212    return 0;
1213  }
1214
1215  private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc)
1216      throws HiveException {
1217    String dbName = simpleDesc.getDbName();
1218    String tblName = simpleDesc.getTableName();
1219
1220    Table tbl = db.getTable(dbName, tblName);
1221    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.UNARCHIVE);
1222
1223    // Means user specified a table, not a partition
1224    if (simpleDesc.getPartSpec() == null) {
1225      throw new HiveException("ARCHIVE is for partitions only");
1226    }
1227
1228    Map<String, String> partSpec = simpleDesc.getPartSpec();
1229    Partition p = db.getPartition(tbl, partSpec, false);
1230
1231    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
1232      throw new HiveException("UNARCHIVE can only be performed on managed tables");
1233    }
1234
1235    if (p == null) {
1236      throw new HiveException("Specified partition does not exist");
1237    }
1238
1239    if (!isArchived(p)) {
1240      Path location = new Path(p.getLocation());
1241      Path leftOverArchiveDir = new Path(location.getParent(),
1242          location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
1243
1244      if (pathExists(leftOverArchiveDir)) {
1245        console.printInfo("Deleting " + leftOverArchiveDir + " left over " +
1246        "from a previous unarchiving operation");
1247        deleteDir(leftOverArchiveDir);
1248      }
1249
1250      throw new HiveException("Specified partition is not archived");
1251    }
1252
1253    Path originalLocation = new Path(getOriginalLocation(p));
1254    Path sourceDir = new Path(p.getLocation());
1255    Path intermediateArchiveDir = new Path(originalLocation.getParent(),
1256        originalLocation.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
1257    Path intermediateExtractedDir = new Path(originalLocation.getParent(),
1258        originalLocation.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX);
1259
1260    Path tmpDir = new Path(driverContext
1261          .getCtx()
1262          .getExternalTmpFileURI(originalLocation.toUri()));
1263
1264    FileSystem fs = null;
1265    try {
1266      fs = tmpDir.getFileSystem(conf);
1267      // Verify that there are no files in the tmp dir, because if there are, it
1268      // would be copied to the partition
1269      FileStatus [] filesInTmpDir = fs.listStatus(tmpDir);
1270      if (filesInTmpDir != null && filesInTmpDir.length != 0) {
1271        for (FileStatus file : filesInTmpDir) {
1272          console.printInfo(file.getPath().toString());
1273        }
1274        throw new HiveException("Temporary directory " + tmpDir + " is not empty");
1275      }
1276
1277    } catch (IOException e) {
1278      throw new HiveException(e);
1279    }
1280
1281    // Some sanity checks
1282    if (originalLocation == null) {
1283      throw new HiveException("Missing archive data in the partition");
1284    }
1285    if (!"har".equals(sourceDir.toUri().getScheme())) {
1286      throw new HiveException("Location should refer to a HAR");
1287    }
1288
1289    // Clarification of terms:
1290    // - The originalLocation directory represents the original directory of the
1291    //   partition's files. They now contain an archived version of those files
1292    //   eg. hdfs:/warehouse/myTable/ds=1/
1293    // - The source directory is the directory containing all the files that
1294    //   should be in the partition. e.g. har:/warehouse/myTable/ds=1/myTable.har/
1295    //   Note the har:/ scheme
1296
1297    // Steps:
1298    // 1. Extract the archive in a temporary folder
1299    // 2. Move the archive dir to an intermediate dir that is in at the same
1300    //    dir as originalLocation. Call the new dir intermediate-extracted.
1301    // 3. Rename the original partition dir to an intermediate dir. Call the
1302    //    renamed dir intermediate-archive
1303    // 4. Rename intermediate-extracted to the original partition dir
1304    // 5. Change the metadata
1305    // 6. Delete the archived partition files in intermediate-archive
1306
1307    if (!pathExists(intermediateExtractedDir) &&
1308        !pathExists(intermediateArchiveDir)) {
1309      try {
1310
1311        // Copy the files out of the archive into the temporary directory
1312        String copySource = (new Path(sourceDir, "*")).toString();
1313        String copyDest = tmpDir.toString();
1314        List<String> args = new ArrayList<String>();
1315        args.add("-cp");
1316        args.add(copySource);
1317        args.add(copyDest);
1318
1319        console.printInfo("Copying " + copySource + " to " + copyDest);
1320        FsShell fss = new FsShell(conf);
1321        int ret = 0;
1322        try {
1323          ret = ToolRunner.run(fss, args.toArray(new String[0]));
1324        } catch (Exception e) {
1325          throw new HiveException(e);
1326        }
1327        if (ret != 0) {
1328          throw new HiveException("Error while copying files from archive");
1329        }
1330
1331        console.printInfo("Moving " + tmpDir + " to " + intermediateExtractedDir);
1332        if (fs.exists(intermediateExtractedDir)) {
1333          throw new HiveException("Invalid state: the intermediate extracted " +
1334              "directory already exists.");
1335        }
1336        fs.rename(tmpDir, intermediateExtractedDir);
1337      } catch (Exception e) {
1338        throw new HiveException(e);
1339      }
1340    }
1341
1342    // At this point, we know that the extracted files are in the intermediate
1343    // extracted dir, or in the the original directory.
1344
1345    if (!pathExists(intermediateArchiveDir)) {
1346      try {
1347        console.printInfo("Moving " + originalLocation + " to " + intermediateArchiveDir);
1348        fs.rename(originalLocation, intermediateArchiveDir);
1349      } catch (IOException e) {
1350        throw new HiveException(e);
1351      }
1352    } else {
1353      console.printInfo(intermediateArchiveDir + " already exists. " +
1354      "Assuming it contains the archived version of the partition");
1355    }
1356
1357    // If there is a failure from here to until when the metadata is changed,
1358    // the partition will be empty or throw errors on read.
1359
1360    // If the original location exists here, then it must be the extracted files
1361    // because in the previous step, we moved the previous original location
1362    // (containing the archived version of the files) to intermediateArchiveDir
1363    if (!pathExists(originalLocation)) {
1364      try {
1365        console.printInfo("Moving " + intermediateExtractedDir + " to " + originalLocation);
1366        fs.rename(intermediateExtractedDir, originalLocation);
1367      } catch (IOException e) {
1368        throw new HiveException(e);
1369      }
1370    } else {
1371      console.printInfo(originalLocation + " already exists. " +
1372      "Assuming it contains the extracted files in the partition");
1373    }
1374
1375    setUnArchived(p);
1376    try {
1377      db.alterPartition(tblName, p);
1378    } catch (InvalidOperationException e) {
1379      throw new HiveException(e);
1380    }
1381    // If a failure happens here, the intermediate archive files won't be
1382    // deleted. The user will need to call unarchive again to clear those up.
1383    deleteDir(intermediateArchiveDir);
1384
1385    return 0;
1386  }
1387
1388  private void validateAlterTableType(
1389    Table tbl, AlterTableDesc.AlterTableTypes alterType)  throws HiveException {
1390
1391    if (tbl.isView()) {
1392      switch (alterType) {
1393      case ADDPROPS:
1394        // allow this form
1395        break;
1396      default:
1397        throw new HiveException(
1398          "Cannot use this form of ALTER TABLE on a view");
1399      }
1400    }
1401
1402    if (tbl.isNonNative()) {
1403      throw new HiveException("Cannot use ALTER TABLE on a non-native table");
1404    }
1405  }
1406
1407  /**
1408   * MetastoreCheck, see if the data in the metastore matches what is on the
1409   * dfs. Current version checks for tables and partitions that are either
1410   * missing on disk on in the metastore.
1411   *
1412   * @param db
1413   *          The database in question.
1414   * @param msckDesc
1415   *          Information about the tables and partitions we want to check for.
1416   * @return Returns 0 when execution succeeds and above 0 if it fails.
1417   */
1418  private int msck(Hive db, MsckDesc msckDesc) {
1419    CheckResult result = new CheckResult();
1420    List<String> repairOutput = new ArrayList<String>();
1421    try {
1422      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
1423      Table t = db.newTable(msckDesc.getTableName());
1424      checker.checkMetastore(t.getDbName(), t.getTableName(), msckDesc.getPartSpecs(), result);
1425      if (msckDesc.isRepairPartitions()) {
1426        Table table = db.getTable(msckDesc.getTableName());
1427        for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
1428          try {
1429            db.createPartition(table, Warehouse.makeSpecFromName(part
1430                .getPartitionName()));
1431            repairOutput.add("Repair: Added partition to metastore "
1432                + msckDesc.getTableName() + ':' + part.getPartitionName());
1433          } catch (Exception e) {
1434            LOG.warn("Repair error, could not add partition to metastore: ", e);
1435          }
1436        }
1437      }
1438    } catch (HiveException e) {
1439      LOG.warn("Failed to run metacheck: ", e);
1440      return 1;
1441    } catch (IOException e) {
1442      LOG.warn("Failed to run metacheck: ", e);
1443      return 1;
1444    } finally {
1445      BufferedWriter resultOut = null;
1446      try {
1447        Path resFile = new Path(msckDesc.getResFile());
1448        FileSystem fs = resFile.getFileSystem(conf);
1449        resultOut = new BufferedWriter(new OutputStreamWriter(fs
1450            .create(resFile)));
1451
1452        boolean firstWritten = false;
1453        firstWritten |= writeMsckResult(result.getTablesNotInMs(),
1454            "Tables not in metastore:", resultOut, firstWritten);
1455        firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
1456            "Tables missing on filesystem:", resultOut, firstWritten);
1457        firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
1458            "Partitions not in metastore:", resultOut, firstWritten);
1459        firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
1460            "Partitions missing from filesystem:", resultOut, firstWritten);
1461        for (String rout : repairOutput) {
1462          if (firstWritten) {
1463            resultOut.write(terminator);
1464          } else {
1465            firstWritten = true;
1466          }
1467          resultOut.write(rout);
1468        }
1469      } catch (IOException e) {
1470        LOG.warn("Failed to save metacheck output: ", e);
1471        return 1;
1472      } finally {
1473        if (resultOut != null) {
1474          try {
1475            resultOut.close();
1476          } catch (IOException e) {
1477            LOG.warn("Failed to close output file: ", e);
1478            return 1;
1479          }
1480        }
1481      }
1482    }
1483
1484    return 0;
1485  }
1486
1487  /**
1488   * Write the result of msck to a writer.
1489   *
1490   * @param result
1491   *          The result we're going to write
1492   * @param msg
1493   *          Message to write.
1494   * @param out
1495   *          Writer to write to
1496   * @param wrote
1497   *          if any previous call wrote data
1498   * @return true if something was written
1499   * @throws IOException
1500   *           In case the writing fails
1501   */
1502  private boolean writeMsckResult(List<? extends Object> result, String msg,
1503      Writer out, boolean wrote) throws IOException {
1504
1505    if (!result.isEmpty()) {
1506      if (wrote) {
1507        out.write(terminator);
1508      }
1509
1510      out.write(msg);
1511      for (Object entry : result) {
1512        out.write(separator);
1513        out.write(entry.toString());
1514      }
1515      return true;
1516    }
1517
1518    return false;
1519  }
1520
1521  /**
1522   * Write a list of partitions to a file.
1523   *
1524   * @param db
1525   *          The database in question.
1526   * @param showParts
1527   *          These are the partitions we're interested in.
1528   * @return Returns 0 when execution succeeds and above 0 if it fails.
1529   * @throws HiveException
1530   *           Throws this exception if an unexpected error occurs.
1531   */
1532  private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveException {
1533    // get the partitions for the table and populate the output
1534    String tabName = showParts.getTabName();
1535    Table tbl = null;
1536    List<String> parts = null;
1537
1538    tbl = db.getTable(tabName);
1539
1540    if (!tbl.isPartitioned()) {
1541      console.printError("Table " + tabName + " is not a partitioned table");
1542      return 1;
1543    }
1544    if (showParts.getPartSpec() != null) {
1545      parts = db.getPartitionNames(tbl.getDbName(),
1546          tbl.getTableName(), showParts.getPartSpec(), (short) -1);
1547    } else {
1548      parts = db.getPartitionNames(tbl.getDbName(), tbl.getTableName(), (short) -1);
1549    }
1550
1551    // write the results in the file
1552    try {
1553      Path resFile = new Path(showParts.getResFile());
1554      FileSystem fs = resFile.getFileSystem(conf);
1555      DataOutput outStream = fs.create(resFile);
1556      Iterator<String> iterParts = parts.iterator();
1557
1558      while (iterParts.hasNext()) {
1559        // create a row per partition name
1560        outStream.writeBytes(iterParts.next());
1561        outStream.write(terminator);
1562      }
1563      ((FSDataOutputStream) outStream).close();
1564    } catch (FileNotFoundException e) {
1565      LOG.info("show partitions: " + stringifyException(e));
1566      throw new HiveException(e.toString());
1567    } catch (IOException e) {
1568      LOG.info("show partitions: " + stringifyException(e));
1569      throw new HiveException(e.toString());
1570    } catch (Exception e) {
1571      throw new HiveException(e.toString());
1572    }
1573
1574    return 0;
1575  }
1576
1577  /**
1578   * Write a list of indexes to a file.
1579   *
1580   * @param db
1581   *          The database in question.
1582   * @param showIndexes
1583   *          These are the indexes we're interested in.
1584   * @return Returns 0 when execution succeeds and above 0 if it fails.
1585   * @throws HiveException
1586   *           Throws this exception if an unexpected error occurs.
1587   */
1588  private int showIndexes(Hive db, ShowIndexesDesc showIndexes) throws HiveException {
1589    // get the indexes for the table and populate the output
1590    String tableName = showIndexes.getTableName();
1591    Table tbl = null;
1592    List<Index> indexes = null;
1593
1594    tbl = db.getTable(tableName);
1595
1596    indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1);
1597
1598    // write the results in the file
1599    try {
1600      Path resFile = new Path(showIndexes.getResFile());
1601      FileSystem fs = resFile.getFileSystem(conf);
1602      DataOutput outStream = fs.create(resFile);
1603
1604      if (showIndexes.isFormatted()) {
1605        // column headers
1606        outStream.writeBytes(MetaDataFormatUtils.getIndexColumnsHeader());
1607        outStream.write(terminator);
1608        outStream.write(terminator);
1609      }
1610
1611      for (Index index : indexes)
1612      {
1613        outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(index));
1614      }
1615
1616      ((FSDataOutputStream) outStream).close();
1617
1618    } catch (FileNotFoundException e) {
1619      LOG.info("show indexes: " + stringifyException(e));
1620      throw new HiveException(e.toString());
1621    } catch (IOException e) {
1622      LOG.info("show indexes: " + stringifyException(e));
1623      throw new HiveException(e.toString());
1624    } catch (Exception e) {
1625      throw new HiveException(e.toString());
1626    }
1627
1628    return 0;
1629  }
1630
1631  /**
1632   * Write a list of the available databases to a file.
1633   *
1634   * @param showDatabases
1635   *          These are the databases we're interested in.
1636   * @return Returns 0 when execution succeeds and above 0 if it fails.
1637   * @throws HiveException
1638   *           Throws this exception if an unexpected error occurs.
1639   */
1640  private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException {
1641    // get the databases for the desired pattern - populate the output stream
1642    List<String> databases = null;
1643    if (showDatabasesDesc.getPattern() != null) {
1644      LOG.info("pattern: " + showDatabasesDesc.getPattern());
1645      databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
1646    } else {
1647      databases = db.getAllDatabases();
1648    }
1649    LOG.info("results : " + databases.size());
1650
1651    // write the results in the file
1652    try {
1653      Path resFile = new Path(showDatabasesDesc.getResFile());
1654      FileSystem fs = resFile.getFileSystem(conf);
1655      DataOutput outStream = fs.create(resFile);
1656
1657      for (String database : databases) {
1658        // create a row per database name
1659        outStream.writeBytes(database);
1660        outStream.write(terminator);
1661      }
1662      ((FSDataOutputStream) outStream).close();
1663    } catch (FileNotFoundException e) {
1664      LOG.warn("show databases: " + stringifyException(e));
1665      return 1;
1666    } catch (IOException e) {
1667      LOG.warn("show databases: " + stringifyException(e));
1668      return 1;
1669    } catch (Exception e) {
1670      throw new HiveException(e.toString());
1671    }
1672    return 0;
1673  }
1674
1675  /**
1676   * Write a list of the tables in the database to a file.
1677   *
1678   * @param db
1679   *          The database in question.
1680   * @param showTbls
1681   *          These are the tables we're interested in.
1682   * @return Returns 0 when execution succeeds and above 0 if it fails.
1683   * @throws HiveException
1684   *           Throws this exception if an unexpected error occurs.
1685   */
1686  private int showTables(Hive db, ShowTablesDesc showTbls) throws HiveException {
1687    // get the tables for the desired pattenn - populate the output stream
1688    List<String> tbls = null;
1689    String dbName = showTbls.getDbName();
1690
1691    if (!db.databaseExists(dbName)) {
1692      throw new HiveException("ERROR: The database " + dbName + " does not exist.");
1693
1694    }
1695    if (showTbls.getPattern() != null) {
1696      LOG.info("pattern: " + showTbls.getPattern());
1697      tbls = db.getTablesByPattern(dbName, showTbls.getPattern());
1698      LOG.info("results : " + tbls.size());
1699    } else {
1700      tbls = db.getAllTables(dbName);
1701    }
1702
1703    // write the results in the file
1704    try {
1705      Path resFile = new Path(showTbls.getResFile());
1706      FileSystem fs = resFile.getFileSystem(conf);
1707      DataOutput outStream = fs.create(resFile);
1708      SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
1709      Iterator<String> iterTbls = sortedTbls.iterator();
1710
1711      while (iterTbls.hasNext()) {
1712        // create a row per table name
1713        outStream.writeBytes(iterTbls.next());
1714        outStream.write(terminator);
1715      }
1716      ((FSDataOutputStream) outStream).close();
1717    } catch (FileNotFoundException e) {
1718      LOG.warn("show table: " + stringifyException(e));
1719      return 1;
1720    } catch (IOException e) {
1721      LOG.warn("show table: " + stringifyException(e));
1722      return 1;
1723    } catch (Exception e) {
1724      throw new HiveException(e.toString());
1725    }
1726    return 0;
1727  }
1728
1729  /**
1730   * Write a list of the user defined functions to a file.
1731   *
1732   * @param showFuncs
1733   *          are the functions we're interested in.
1734   * @return Returns 0 when execution succeeds and above 0 if it fails.
1735   * @throws HiveException
1736   *           Throws this exception if an unexpected error occurs.
1737   */
1738  private int showFunctions(ShowFunctionsDesc showFuncs) throws HiveException {
1739    // get the tables for the desired pattenn - populate the output stream
1740    Set<String> funcs = null;
1741    if (showFuncs.getPattern() != null) {
1742      LOG.info("pattern: " + showFuncs.getPattern());
1743      funcs = FunctionRegistry.getFunctionNames(showFuncs.getPattern());
1744      LOG.info("results : " + funcs.size());
1745    } else {
1746      funcs = FunctionRegistry.getFunctionNames();
1747    }
1748
1749    // write the results in the file
1750    try {
1751      Path resFile = new Path(showFuncs.getResFile());
1752      FileSystem fs = resFile.getFileSystem(conf);
1753      DataOutput outStream = fs.create(resFile);
1754      SortedSet<String> sortedFuncs = new TreeSet<String>(funcs);
1755      Iterator<String> iterFuncs = sortedFuncs.iterator();
1756
1757      while (iterFuncs.hasNext()) {
1758        // create a row per table name
1759        outStream.writeBytes(iterFuncs.next());
1760        outStream.write(terminator);
1761      }
1762      ((FSDataOutputStream) outStream).close();
1763    } catch (FileNotFoundException e) {
1764      LOG.warn("show function: " + stringifyException(e));
1765      return 1;
1766    } catch (IOException e) {
1767      LOG.warn("show function: " + stringifyException(e));
1768      return 1;
1769    } catch (Exception e) {
1770      throw new HiveException(e.toString());
1771    }
1772    return 0;
1773  }
1774
1775  /**
1776   * Write a list of the current locks to a file.
1777   *
1778   * @param showLocks
1779   *          the locks we're interested in.
1780   * @return Returns 0 when execution succeeds and above 0 if it fails.
1781   * @throws HiveException
1782   *           Throws this exception if an unexpected error occurs.
1783   */
1784  private int showLocks(ShowLocksDesc showLocks) throws HiveException {
1785    Context ctx = driverContext.getCtx();
1786    HiveLockManager lockMgr = ctx.getHiveLockMgr();
1787    boolean isExt = showLocks.isExt();
1788    if (lockMgr == null) {
1789      throw new HiveException("show Locks LockManager not specified");
1790    }
1791
1792    // write the results in the file
1793    try {
1794      Path resFile = new Path(showLocks.getResFile());
1795      FileSystem fs = resFile.getFileSystem(conf);
1796      DataOutput outStream = fs.create(resFile);
1797      List<HiveLock> locks = null;
1798
1799      if (showLocks.getTableName() == null) {
1800        locks = lockMgr.getLocks(false, isExt);
1801      }
1802      else {
1803        locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(),
1804                                               showLocks.getPartSpec()),
1805                                 true, isExt);
1806      }
1807
1808      Collections.sort(locks, new Comparator<HiveLock>() {
1809
1810          @Override
1811            public int compare(HiveLock o1, HiveLock o2) {
1812            int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
1813            if (cmp == 0) {
1814              if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
1815                return cmp;
1816              }
1817              // EXCLUSIVE locks occur before SHARED locks
1818              if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
1819                return -1;
1820              }
1821              return +1;
1822            }
1823            return cmp;
1824          }
1825
1826        });
1827
1828      Iterator<HiveLock> locksIter = locks.iterator();
1829
1830      while (locksIter.hasNext()) {
1831        HiveLock lock = locksIter.next();
1832        outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
1833        outStream.write(separator);
1834        outStream.writeBytes(lock.getHiveLockMode().toString());
1835        if (isExt) {
1836          outStream.write(terminator);
1837          HiveLockObjectData lockData = lock.getHiveLockObject().getData();
1838          if (lockData != null) {
1839            outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId() + " ");
1840            outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime() + " ");
1841            outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode() + " ");
1842          }
1843        }
1844        outStream.write(terminator);
1845      }
1846      ((FSDataOutputStream) outStream).close();
1847    } catch (FileNotFoundException e) {
1848      LOG.warn("show function: " + stringifyException(e));
1849      return 1;
1850    } catch (IOException e) {
1851      LOG.warn("show function: " + stringifyException(e));
1852      return 1;
1853    } catch (Exception e) {
1854      throw new HiveException(e.toString());
1855    }
1856    return 0;
1857  }
1858
1859  /**
1860   * Lock the table/partition specified
1861   *
1862   * @param lockTbl
1863   *          the table/partition to be locked along with the mode
1864   * @return Returns 0 when execution succeeds and above 0 if it fails.
1865   * @throws HiveException
1866   *           Throws this exception if an unexpected error occurs.
1867   */
1868  private int lockTable(LockTableDesc lockTbl) throws HiveException {
1869    Context ctx = driverContext.getCtx();
1870    HiveLockManager lockMgr = ctx.getHiveLockMgr();
1871    if (lockMgr == null) {
1872      throw new HiveException("lock Table LockManager not specified");
1873    }
1874
1875    HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode());
1876    String tabName = lockTbl.getTableName();
1877    Table  tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName);
1878    if (tbl == null) {
1879      throw new HiveException("Table " + tabName + " does not exist ");
1880    }
1881
1882    Map<String, String> partSpec = lockTbl.getPartSpec();
1883    HiveLockObjectData lockData =
1884      new HiveLockObjectData(lockTbl.getQueryId(),
1885                             String.valueOf(System.currentTimeMillis()),
1886                             "EXPLICIT");
1887
1888    if (partSpec == null) {
1889      HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true, 0, 0);
1890      if (lck == null) {
1891        return 1;
1892      }
1893      return 0;
1894    }
1895
1896    Partition par = db.getPartition(tbl, partSpec, false);
1897    if (par == null) {
1898      throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
1899    }
1900    HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true, 0, 0);
1901    if (lck == null) {
1902      return 1;
1903    }
1904    return 0;
1905  }
1906
1907  private HiveLockObject getHiveObject(String tabName,
1908                                       Map<String, String> partSpec) throws HiveException {
1909    Table  tbl = db.getTable(tabName);
1910    if (tbl == null) {
1911      throw new HiveException("Table " + tabName + " does not exist ");
1912    }
1913
1914    HiveLockObject obj = null;
1915
1916    if  (partSpec == null) {
1917      obj = new HiveLockObject(tbl, null);
1918    }
1919    else {
1920      Partition par = db.getPartition(tbl, partSpec, false);
1921      if (par == null) {
1922        throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
1923      }
1924      obj = new HiveLockObject(par, null);
1925    }
1926    return obj;
1927  }
1928
1929  /**
1930   * Unlock the table/partition specified
1931   *
1932   * @param unlockTbl
1933   *          the table/partition to be unlocked
1934   * @return Returns 0 when execution succeeds and above 0 if it fails.
1935   * @throws HiveException
1936   *           Throws this exception if an unexpected error occurs.
1937   */
1938  private int unlockTable(UnlockTableDesc unlockTbl) throws HiveException {
1939    Context ctx = driverContext.getCtx();
1940    HiveLockManager lockMgr = ctx.getHiveLockMgr();
1941    if (lockMgr == null) {
1942      throw new HiveException("unlock Table LockManager not specified");
1943    }
1944
1945    String tabName = unlockTbl.getTableName();
1946    HiveLockObject obj = getHiveObject(tabName, unlockTbl.getPartSpec());
1947
1948    List<HiveLock> locks = lockMgr.getLocks(obj, false, false);
1949    if ((locks == null) || (locks.isEmpty())) {
1950      throw new HiveException("Table " + tabName + " is not locked ");
1951    }
1952    Iterator<HiveLock> locksIter = locks.iterator();
1953    while (locksIter.hasNext()) {
1954      HiveLock lock = locksIter.next();
1955      lockMgr.unlock(lock);
1956    }
1957
1958    return 0;
1959  }
1960
1961  /**
1962   * Shows a description of a function.
1963   *
1964   * @param descFunc
1965   *          is the function we are describing
1966   * @throws HiveException
1967   */
1968  private int describeFunction(DescFunctionDesc descFunc) throws HiveException {
1969    String funcName = descFunc.getName();
1970
1971    // write the results in the file
1972    try {
1973      Path resFile = new Path(descFunc.getResFile());
1974      FileSystem fs = resFile.getFileSystem(conf);
1975      DataOutput outStream = fs.create(resFile);
1976
1977      // get the function documentation
1978      Description desc = null;
1979      Class<?> funcClass = null;
1980      FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(funcName);
1981      if (functionInfo != null) {
1982        funcClass = functionInfo.getFunctionClass();
1983      }
1984      if (funcClass != null) {
1985        desc = funcClass.getAnnotation(Description.class);
1986      }
1987      if (desc != null) {
1988        outStream.writeBytes(desc.value().replace("_FUNC_", funcName));
1989        if (descFunc.isExtended()) {
1990          Set<String> synonyms = FunctionRegistry.getFunctionSynonyms(funcName);
1991          if (synonyms.size() > 0) {
1992            outStream.writeBytes("\nSynonyms: " + join(synonyms, ", "));
1993          }
1994          if (desc.extended().length() > 0) {
1995            outStream.writeBytes("\n"
1996                + desc.extended().replace("_FUNC_", funcName));
1997          }
1998        }
1999      } else {
2000        if (funcClass != null) {
2001          outStream.writeBytes("There is no documentation for function '"
2002              + funcName + "'");
2003        } else {
2004          outStream.writeBytes("Function '" + funcName + "' does not exist.");
2005        }
2006      }
2007
2008      outStream.write(terminator);
2009
2010      ((FSDataOutputStream) outStream).close();
2011    } catch (FileNotFoundException e) {
2012      LOG.warn("describe function: " + stringifyException(e));
2013      return 1;
2014    } catch (IOException e) {
2015      LOG.warn("describe function: " + stringifyException(e));
2016      return 1;
2017    } catch (Exception e) {
2018      throw new HiveException(e.toString());
2019    }
2020    return 0;
2021  }
2022
2023  private int descDatabase(DescDatabaseDesc descDatabase) throws HiveException {
2024    try {
2025      Path resFile = new Path(descDatabase.getResFile());
2026      FileSystem fs = resFile.getFileSystem(conf);
2027      DataOutput outStream = fs.create(resFile);
2028
2029      Database database = db.getDatabase(descDatabase.getDatabaseName());
2030
2031      if (database != null) {
2032        outStream.writeBytes(database.getName());
2033        outStream.write(separator);
2034        if (database.getDescription() != null) {
2035          outStream.writeBytes(database.getDescription());
2036        }
2037        outStream.write(separator);
2038        if (database.getLocationUri() != null) {
2039          outStream.writeBytes(database.getLocationUri());
2040        }
2041
2042        outStream.write(separator);
2043        if (descDatabase.isExt() && database.getParametersSize() > 0) {
2044          Map<String, String> params = database.getParameters();
2045          outStream.writeBytes(params.toString());
2046        }
2047
2048      } else {
2049        outStream.writeBytes("No such database: " + descDatabase.getDatabaseName());
2050      }
2051
2052      outStream.write(terminator);
2053
2054      ((FSDataOutputStream) outStream).close();
2055
2056    } catch (FileNotFoundException e) {
2057      LOG.warn("describe database: " + stringifyException(e));
2058      return 1;
2059    } catch (IOException e) {
2060      LOG.warn("describe database: " + stringifyException(e));
2061      return 1;
2062    } catch (Exception e) {
2063      throw new HiveException(e.toString());
2064    }
2065    return 0;
2066  }
2067
2068  /**
2069   * Write the status of tables to a file.
2070   *
2071   * @param db
2072   *          The database in question.
2073   * @param showTblStatus
2074   *          tables we are interested in
2075   * @return Return 0 when execution succeeds and above 0 if it fails.
2076   */
2077  private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException {
2078    // get the tables for the desired pattenn - populate the output stream
2079    List<Table> tbls = new ArrayList<Table>();
2080    Map<String, String> part = showTblStatus.getPartSpec();
2081    Partition par = null;
2082    if (part != null) {
2083      Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern());
2084      par = db.getPartition(tbl, part, false);
2085      if (par == null) {
2086        throw new HiveException("Partition " + part + " for table "
2087            + showTblStatus.getPattern() + " does not exist.");
2088      }
2089      tbls.add(tbl);
2090    } else {
2091      LOG.info("pattern: " + showTblStatus.getPattern());
2092      List<String> tblStr = db.getTablesForDb(showTblStatus.getDbName(),
2093          showTblStatus.getPattern());
2094      SortedSet<String> sortedTbls = new TreeSet<String>(tblStr);
2095      Iterator<String> iterTbls = sortedTbls.iterator();
2096      while (iterTbls.hasNext()) {
2097        // create a row per table name
2098        String tblName = iterTbls.next();
2099        Table tbl = db.getTable(showTblStatus.getDbName(), tblName);
2100        tbls.add(tbl);
2101      }
2102      LOG.info("results : " + tblStr.size());
2103    }
2104
2105    // write the results in the file
2106    try {
2107      Path resFile = new Path(showTblStatus.getResFile());
2108      FileSystem fs = resFile.getFileSystem(conf);
2109      DataOutput outStream = fs.create(resFile);
2110
2111      Iterator<Table> iterTables = tbls.iterator();
2112      while (iterTables.hasNext()) {
2113        // create a row per table name
2114        Table tbl = iterTables.next();
2115        String tableName = tbl.getTableName();
2116        String tblLoc = null;
2117        String inputFormattCls = null;
2118        String outputFormattCls = null;
2119        if (part != null) {
2120          if (par != null) {
2121            tblLoc = par.getDataLocation().toString();
2122            inputFormattCls = par.getInputFormatClass().getName();
2123            outputFormattCls = par.getOutputFormatClass().getName();
2124          }
2125        } else {
2126          tblLoc = tbl.getDataLocation().toString();
2127          inputFormattCls = tbl.getInputFormatClass().getName();
2128          outputFormattCls = tbl.getOutputFormatClass().getName();
2129        }
2130
2131        String owner = tbl.getOwner();
2132        List<FieldSchema> cols = tbl.getCols();
2133        String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
2134        boolean isPartitioned = tbl.isPartitioned();
2135        String partitionCols = "";
2136        if (isPartitioned) {
2137          partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
2138              "partition_columns", tbl.getPartCols());
2139        }
2140
2141        outStream.writeBytes("tableName:" + tableName);
2142        outStream.write(terminator);
2143        outStream.writeBytes("owner:" + owner);
2144        outStream.write(terminator);
2145        outStream.writeBytes("location:" + tblLoc);
2146        outStream.write(terminator);
2147        outStream.writeBytes("inputformat:" + inputFormattCls);
2148        outStream.write(terminator);
2149        outStream.writeBytes("outputformat:" + outputFormattCls);
2150        outStream.write(terminator);
2151        outStream.writeBytes("columns:" + ddlCols);
2152        outStream.write(terminator);
2153        outStream.writeBytes("partitioned:" + isPartitioned);
2154        outStream.write(terminator);
2155        outStream.writeBytes("partitionColumns:" + partitionCols);
2156        outStream.write(terminator);
2157        // output file system information
2158        Path tablLoc = tbl.getPath();
2159        List<Path> locations = new ArrayList<Path>();
2160        if (isPartitioned) {
2161          if (par == null) {
2162            for (Partition curPart : db.getPartitions(tbl)) {
2163              locations.add(new Path(curPart.getTPartition().getSd()
2164                  .getLocation()));
2165            }
2166          } else {
2167            locations.add(new Path(par.getTPartition().getSd().getLocation()));
2168          }
2169        } else {
2170          locations.add(tablLoc);
2171        }
2172        writeFileSystemStats(outStream, locations, tablLoc, false, 0);
2173
2174        outStream.write(terminator);
2175      }
2176      ((FSDataOutputStream) outStream).close();
2177    } catch (FileNotFoundException e) {
2178      LOG.info("show table status: " + stringifyException(e));
2179      return 1;
2180    } catch (IOException e) {
2181      LOG.info("show table status: " + stringifyException(e));
2182      return 1;
2183    } catch (Exception e) {
2184      throw new HiveException(e);
2185    }
2186    return 0;
2187  }
2188
2189  /**
2190   * Write the description of a table to a file.
2191   *
2192   * @param db
2193   *          The database in question.
2194   * @param descTbl
2195   *          This is the table we're interested in.
2196   * @return Returns 0 when execution succeeds and above 0 if it fails.
2197   * @throws HiveException
2198   *           Throws this exception if an unexpected error occurs.
2199   */
2200  private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException {
2201    String colPath = descTbl.getTableName();
2202    String tableName = colPath.substring(0,
2203        colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.'));
2204
2205    // describe the table - populate the output stream
2206    Table tbl = db.getTable(tableName, false);
2207    Partition part = null;
2208    try {
2209      Path resFile = new Path(descTbl.getResFile());
2210      if (tbl == null) {
2211        FileSystem fs = resFile.getFileSystem(conf);
2212        DataOutput outStream = (DataOutput) fs.open(resFile);
2213        String errMsg = "Table " + tableName + " does not exist";
2214        outStream.write(errMsg.getBytes("UTF-8"));
2215        ((FSDataOutputStream) outStream).close();
2216        return 0;
2217      }
2218      if (descTbl.getPartSpec() != null) {
2219        part = db.getPartition(tbl, descTbl.getPartSpec(), false);
2220        if (part == null) {
2221          FileSystem fs = resFile.getFileSystem(conf);
2222          DataOutput outStream = (DataOutput) fs.open(resFile);
2223          String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
2224              + tableName + " does not exist";
2225          outStream.write(errMsg.getBytes("UTF-8"));
2226          ((FSDataOutputStream) outStream).close();
2227          return 0;
2228        }
2229        tbl = part.getTable();
2230      }
2231    } catch (FileNotFoundException e) {
2232      LOG.info("describe table: " + stringifyException(e));
2233      return 1;
2234    } catch (IOException e) {
2235      LOG.info("describe table: " + stringifyException(e));
2236      return 1;
2237    }
2238
2239    try {
2240
2241      LOG.info("DDLTask: got data for " + tbl.getTableName());
2242
2243      Path resFile = new Path(descTbl.getResFile());
2244      FileSystem fs = resFile.getFileSystem(conf);
2245      DataOutput outStream = fs.create(resFile);
2246
2247      if (colPath.equals(tableName)) {
2248        if (!descTbl.isFormatted()) {
2249          List<FieldSchema> cols = tbl.getCols();
2250          if (tableName.equals(colPath)) {
2251            cols.addAll(tbl.getPartCols());
2252          }
2253          outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
2254        } else {
2255          outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl));
2256        }
2257      } else {
2258        List<FieldSchema> cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
2259        if (descTbl.isFormatted()) {
2260          outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
2261        } else {
2262          outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
2263        }
2264      }
2265
2266      if (tableName.equals(colPath)) {
2267
2268        if (descTbl.isFormatted()) {
2269          if (part != null) {
2270            outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
2271          } else {
2272            outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
2273          }
2274        }
2275
2276        // if extended desc table then show the complete details of the table
2277        if (descTbl.isExt()) {
2278          // add empty line
2279          outStream.write(terminator);
2280          if (part != null) {
2281            // show partition information
2282            outStream.writeBytes("Detailed Partition Information");
2283            outStream.write(separator);
2284            outStream.writeBytes(part.getTPartition().toString());
2285            outStream.write(separator);
2286            // comment column is empty
2287            outStream.write(terminator);
2288          } else {
2289            // show table information
2290            outStream.writeBytes("Detailed Table Information");
2291            outStream.write(separator);
2292            outStream.writeBytes(tbl.getTTable().toString());
2293            outStream.write(separator);
2294            outStream.write(terminator);
2295          }
2296        }
2297      }
2298
2299      LOG.info("DDLTask: written data for " + tbl.getTableName());
2300      ((FSDataOutputStream) outStream).close();
2301
2302    } catch (FileNotFoundException e) {
2303      LOG.info("describe table: " + stringifyException(e));
2304      return 1;
2305    } catch (IOException e) {
2306      LOG.info("describe table: " + stringifyException(e));
2307      return 1;
2308    } catch (Exception e) {
2309      throw new HiveException(e);
2310    }
2311
2312    return 0;
2313  }
2314
2315  public static void writeGrantInfo(DataOutput outStream,
2316      PrincipalType principalType, String principalName, String dbName,
2317      String tableName, String partName, String columnName,
2318      PrivilegeGrantInfo grantInfo) throws IOException {
2319
2320    String privilege = grantInfo.getPrivilege();
2321    int createTime = grantInfo.getCreateTime();
2322    String grantor = grantInfo.getGrantor();
2323
2324    if (dbName != null) {
2325      writeKeyValuePair(outStream, "database", dbName);
2326    }
2327    if (tableName != null) {
2328      writeKeyValuePair(outStream, "table", tableName);
2329    }
2330    if (partName != null) {
2331      writeKeyValuePair(outStream, "partition", partName);
2332    }
2333    if (columnName != null) {
2334      writeKeyValuePair(outStream, "columnName", columnName);
2335    }
2336
2337    writeKeyValuePair(outStream, "principalName", principalName);
2338    writeKeyValuePair(outStream, "principalType", "" + principalType);
2339    writeKeyValuePair(outStream, "privilege", privilege);
2340    writeKeyValuePair(outStream, "grantTime", "" + createTime);
2341    if (grantor != null) {
2342      writeKeyValuePair(outStream, "grantor", grantor);
2343    }
2344  }
2345
2346  private static void writeKeyValuePair(DataOutput outStream, String key,
2347      String value) throws IOException {
2348    outStream.write(terminator);
2349    outStream.writeBytes(key);
2350    outStream.write(separator);
2351    outStream.writeBytes(value);
2352    outStream.write(separator);
2353  }
2354
2355  private void writeFileSystemStats(DataOutput outStream, List<Path> locations,
2356      Path tabLoc, boolean partSpecified, int indent) throws IOException {
2357    long totalFileSize = 0;
2358    long maxFileSize = 0;
2359    long minFileSize = Long.MAX_VALUE;
2360    long lastAccessTime = 0;
2361    long lastUpdateTime = 0;
2362    int numOfFiles = 0;
2363
2364    boolean unknown = false;
2365    FileSystem fs = tabLoc.getFileSystem(conf);
2366    // in case all files in locations do not exist
2367    try {
2368      FileStatus tmpStatus = fs.getFileStatus(tabLoc);
2369      lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
2370      lastUpdateTime = tmpStatus.getModificationTime();
2371      if (partSpecified) {
2372        // check whether the part exists or not in fs
2373        tmpStatus = fs.getFileStatus(locations.get(0));
2374      }
2375    } catch (IOException e) {
2376      LOG.warn(
2377          "Cannot access File System. File System status will be unknown: ", e);
2378      unknown = true;
2379    }
2380
2381    if (!unknown) {
2382      for (Path loc : locations) {
2383        try {
2384          FileStatus status = fs.getFileStatus(tabLoc);
2385          FileStatus[] files = fs.listStatus(loc);
2386          long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
2387          long updateTime = status.getModificationTime();
2388          // no matter loc is the table location or part location, it must be a
2389          // directory.
2390          if (!status.isDir()) {
2391            continue;
2392          }
2393          if (accessTime > lastAccessTime) {
2394            lastAccessTime = accessTime;
2395          }
2396          if (updateTime > lastUpdateTime) {
2397            lastUpdateTime = updateTime;
2398          }
2399          for (FileStatus currentStatus : files) {
2400            if (currentStatus.isDir()) {
2401              continue;
2402            }
2403            numOfFiles++;
2404            long fileLen = currentStatus.getLen();
2405            totalFileSize += fileLen;
2406            if (fileLen > maxFileSize) {
2407              maxFileSize = fileLen;
2408            }
2409            if (fileLen < minFileSize) {
2410              minFileSize = fileLen;
2411            }
2412            accessTime = ShimLoader.getHadoopShims().getAccessTime(
2413                currentStatus);
2414            updateTime = currentStatus.getModificationTime();
2415            if (accessTime > lastAccessTime) {
2416              lastAccessTime = accessTime;
2417            }
2418            if (updateTime > lastUpdateTime) {
2419              lastUpdateTime = updateTime;
2420            }
2421          }
2422        } catch (IOException e) {
2423          // ignore
2424        }
2425      }
2426    }
2427    String unknownString = "unknown";
2428
2429    for (int k = 0; k < indent; k++) {
2430      outStream.writeBytes(Utilities.INDENT);
2431    }
2432    outStream.writeBytes("totalNumberFiles:");
2433    outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
2434    outStream.write(terminator);
2435
2436    for (int k = 0; k < indent; k++) {
2437      outStream.writeBytes(Utilities.INDENT);
2438    }
2439    outStream.writeBytes("totalFileSize:");
2440    outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
2441    outStream.write(terminator);
2442
2443    for (int k = 0; k < indent; k++) {
2444      outStream.writeBytes(Utilities.INDENT);
2445    }
2446    outStream.writeBytes("maxFileSize:");
2447    outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
2448    outStream.write(terminator);
2449
2450    for (int k = 0; k < indent; k++) {
2451      outStream.writeBytes(Utilities.INDENT);
2452    }
2453    outStream.writeBytes("minFileSize:");
2454    if (numOfFiles > 0) {
2455      outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
2456    } else {
2457      outStream.writeBytes(unknown ? unknownString : "" + 0);
2458    }
2459    outStream.write(terminator);
2460
2461    for (int k = 0; k < indent; k++) {
2462      outStream.writeBytes(Utilities.INDENT);
2463    }
2464    outStream.writeBytes("lastAccessTime:");
2465    outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
2466        + lastAccessTime);
2467    outStream.write(terminator);
2468
2469    for (int k = 0; k < indent; k++) {
2470      outStream.writeBytes(Utilities.INDENT);
2471    }
2472    outStream.writeBytes("lastUpdateTime:");
2473    outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
2474    outStream.write(terminator);
2475  }
2476
2477  /**
2478   * Alter a given table.
2479   *
2480   * @param db
2481   *          The database in question.
2482   * @param alterTbl
2483   *          This is the table we're altering.
2484   * @return Returns 0 when execution succeeds and above 0 if it fails.
2485   * @throws HiveException
2486   *           Throws this exception if an unexpected error occurs.
2487   */
2488  private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
2489    // alter the table
2490    Table tbl = db.getTable(alterTbl.getOldName());
2491
2492    Partition part = null;
2493    if(alterTbl.getPartSpec() != null) {
2494      part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
2495      if(part == null) {
2496        console.printError("Partition : " + alterTbl.getPartSpec().toString()
2497            + " does not exist.");
2498        return 1;
2499      }
2500    }
2501
2502    validateAlterTableType(tbl, alterTbl.getOp());
2503
2504    if (tbl.isView()) {
2505      if (!alterTbl.getExpectView()) {
2506        throw new HiveException("Cannot alter a view with ALTER TABLE");
2507      }
2508    } else {
2509      if (alterTbl.getExpectView()) {
2510        throw new HiveException("Cannot alter a base table with ALTER VIEW");
2511      }
2512    }
2513
2514    Table oldTbl = tbl.copy();
2515
2516    if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
2517      tbl.setTableName(alterTbl.getNewName());
2518    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
2519      List<FieldSchema> newCols = alterTbl.getNewCols();
2520      List<FieldSchema> oldCols = tbl.getCols();
2521      if (tbl.getSerializationLib().equals(
2522          "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
2523        console
2524            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
2525        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
2526        tbl.getTTable().getSd().setCols(newCols);
2527      } else {
2528        // make sure the columns does not already exist
2529        Iterator<FieldSchema> iterNewCols = newCols.iterator();
2530        while (iterNewCols.hasNext()) {
2531          FieldSchema newCol = iterNewCols.next();
2532          String newColName = newCol.getName();
2533          Iterator<FieldSchema> iterOldCols = oldCols.iterator();
2534          while (iterOldCols.hasNext()) {
2535            String oldColName = iterOldCols.next().getName();
2536            if (oldColName.equalsIgnoreCase(newColName)) {
2537              console.printError("Column '" + newColName + "' exists");
2538              return 1;
2539            }
2540          }
2541          oldCols.add(newCol);
2542        }
2543        tbl.getTTable().getSd().setCols(oldCols);
2544      }
2545    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
2546      List<FieldSchema> oldCols = tbl.getCols();
2547      List<FieldSchema> newCols = new ArrayList<FieldSchema>();
2548      Iterator<FieldSchema> iterOldCols = oldCols.iterator();
2549      String oldName = alterTbl.getOldColName();
2550      String newName = alterTbl.getNewColName();
2551      String type = alterTbl.getNewColType();
2552      String comment = alterTbl.getNewColComment();
2553      boolean first = alterTbl.getFirst();
2554      String afterCol = alterTbl.getAfterCol();
2555      FieldSchema column = null;
2556
2557      boolean found = false;
2558      int position = -1;
2559      if (first) {
2560        position = 0;
2561      }
2562
2563      int i = 1;
2564      while (iterOldCols.hasNext()) {
2565        FieldSchema col = iterOldCols.next();
2566        String oldColName = col.getName();
2567        if (oldColName.equalsIgnoreCase(newName)
2568            && !oldColName.equalsIgnoreCase(oldName)) {
2569          console.printError("Column '" + newName + "' exists");
2570          return 1;
2571        } else if (oldColName.equalsIgnoreCase(oldName)) {
2572          col.setName(newName);
2573          if (type != null && !type.trim().equals("")) {
2574            col.setType(type);
2575          }
2576          if (comment != null) {
2577            col.setComment(comment);
2578          }
2579          found = true;
2580          if (first || (afterCol != null && !afterCol.trim().equals(""))) {
2581            column = col;
2582            continue;
2583          }
2584        }
2585
2586        if (afterCol != null && !afterCol.trim().equals("")
2587            && oldColName.equalsIgnoreCase(afterCol)) {
2588          position = i;
2589        }
2590
2591        i++;
2592        newCols.add(col);
2593      }
2594
2595      // did not find the column
2596      if (!found) {
2597        console.printError("Column '" + oldName + "' does not exist");
2598        return 1;
2599      }
2600      // after column is not null, but we did not find it.
2601      if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
2602        console.printError("Column '" + afterCol + "' does not exist");
2603        return 1;
2604      }
2605
2606      if (position >= 0) {
2607        newCols.add(position, column);
2608      }
2609
2610      tbl.getTTable().getSd().setCols(newCols);
2611    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
2612      // change SerDe to LazySimpleSerDe if it is columnsetSerDe
2613      if (tbl.getSerializationLib().equals(
2614          "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
2615        console
2616            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
2617        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
2618      } else if (!tbl.getSerializationLib().equals(
2619          MetadataTypedColumnsetSerDe.class.getName())
2620          && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
2621          && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
2622          && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
2623        console.printError("Replace columns is not supported for this table. "
2624            + "SerDe may be incompatible.");
2625        return 1;
2626      }
2627      tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
2628    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
2629      tbl.getTTable().getParameters().putAll(alterTbl.getProps());
2630    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
2631      tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
2632          alterTbl.getProps());
2633    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
2634      tbl.setSerializationLib(alterTbl.getSerdeName());
2635      if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
2636        tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
2637            alterTbl.getProps());
2638      }
2639      tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl
2640          .getDeserializer()));
2641    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
2642      if(part != null) {
2643        part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
2644        part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
2645        if (alterTbl.getSerdeName() != null) {
2646          part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
2647              alterTbl.getSerdeName());
2648        }
2649      } else {
2650        tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
2651        tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
2652        if (alterTbl.getSerdeName() != null) {
2653          tbl.setSerializationLib(alterTbl.getSerdeName());
2654        }
2655      }
2656    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
2657      boolean protectModeEnable = alterTbl.isProtectModeEnable();
2658      AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType();
2659
2660      ProtectMode mode = null;
2661      if(part != null) {
2662        mode = part.getProtectMode();
2663      } else {
2664        mode = tbl.getProtectMode();
2665      }
2666
2667      if (protectModeEnable
2668          && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
2669        mode.offline = true;
2670      } else if (protectModeEnable
2671          && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
2672        mode.noDrop = true;
2673      } else if (!protectModeEnable
2674          && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
2675        mode.offline = false;
2676      } else if (!protectModeEnable
2677          && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
2678        mode.noDrop = false;
2679      }
2680
2681      if (part != null) {
2682        part.setProtectMode(mode);
2683      } else {
2684        tbl.setProtectMode(mode);
2685      }
2686
2687    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
2688      // validate sort columns and bucket columns
2689      List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
2690          .getCols());
2691      Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
2692      if (alterTbl.getSortColumns() != null) {
2693        Utilities.validateColumnNames(columns, Utilities
2694            .getColumnNamesFromSortCols(alterTbl.getSortColumns()));
2695      }
2696
2697      int numBuckets = -1;
2698      ArrayList<String> bucketCols = null;
2699      ArrayList<Order> sortCols = null;
2700
2701      // -1 buckets means to turn off bucketing
2702      if (alterTbl.getNumberBuckets() == -1) {
2703        bucketCols = new ArrayList<String>();
2704        sortCols = new ArrayList<Order>();
2705        numBuckets = -1;
2706      } else {
2707        bucketCols = alterTbl.getBucketColumns();
2708        sortCols = alterTbl.getSortColumns();
2709        numBuckets = alterTbl.getNumberBuckets();
2710      }
2711      tbl.getTTable().getSd().setBucketCols(bucketCols);
2712      tbl.getTTable().getSd().setNumBuckets(numBuckets);
2713      tbl.getTTable().getSd().setSortCols(sortCols);
2714    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
2715      String newLocation = alterTbl.getNewLocation();
2716      try {
2717        URI locURI = new URI(newLocation);
2718        if (!locURI.isAbsolute() || locURI.getScheme() == null
2719            || locURI.getScheme().trim().equals("")) {
2720          throw new HiveException(
2721              newLocation
2722                  + " is not absolute or has no scheme information. "
2723                  + "Please specify a complete absolute uri with scheme information.");
2724        }
2725        if (part != null) {
2726          part.setLocation(newLocation);
2727        } else {
2728          tbl.setDataLocation(locURI);
2729        }
2730      } catch (URISyntaxException e) {
2731        throw new HiveException(e);
2732      }
2733    } else {
2734      console.printError("Unsupported Alter commnad");
2735      return 1;
2736    }
2737
2738    if(part == null) {
2739      if (!updateModifiedParameters(tbl.getTTable().getParameters(), conf)) {
2740        return 1;
2741      }
2742      try {
2743        tbl.checkValidity();
2744      } catch (HiveException e) {
2745        console.printError("Invalid table columns : " + e.getMessage(),
2746            stringifyException(e));
2747        return 1;
2748      }
2749    } else {
2750      if (!updateModifiedParameters(part.getParameters(), conf)) {
2751        return 1;
2752      }
2753    }
2754
2755    try {
2756      if (part == null) {
2757        db.alterTable(alterTbl.getOldName(), tbl);
2758      } else {
2759        db.alterPartition(tbl.getTableName(), part);
2760      }
2761    } catch (InvalidOperationException e) {
2762      console.printError("Invalid alter operation: " + e.getMessage());
2763      LOG.info("alter table: " + stringifyException(e));
2764      return 1;
2765    } catch (HiveException e) {
2766      return 1;
2767    }
2768
2769    // This is kind of hacky - the read entity contains the old table, whereas
2770    // the write entity
2771    // contains the new table. This is needed for rename - both the old and the
2772    // new table names are
2773    // passed
2774    if(part != null) {
2775      work.getInputs().add(new ReadEntity(part));
2776      work.getOutputs().add(new WriteEntity(part));
2777    } else {
2778      work.getInputs().add(new ReadEntity(oldTbl));
2779      work.getOutputs().add(new WriteEntity(tbl));
2780    }
2781    return 0;
2782  }
2783
2784  /**
2785   * Drop a given table.
2786   *
2787   * @param db
2788   *          The database in question.
2789   * @param dropTbl
2790   *          This is the table we're dropping.
2791   * @return Returns 0 when execution succeeds and above 0 if it fails.
2792   * @throws HiveException
2793   *           Throws this exception if an unexpected error occurs.
2794   */
2795  private int dropTable(Hive db, DropTableDesc dropTbl) throws HiveException {
2796    // We need to fetch the table before it is dropped so that it can be passed
2797    // to
2798    // post-execution hook
2799    Table tbl = null;
2800    try {
2801      tbl = db.getTable(dropTbl.getTableName());
2802    } catch (InvalidTableException e) {
2803      // drop table is idempotent
2804    }
2805
2806    if (tbl != null) {
2807      if (tbl.isView()) {
2808        if (!dropTbl.getExpectView()) {
2809          throw new HiveException("Cannot drop a view with DROP TABLE");
2810        }
2811      } else {
2812        if (dropTbl.getExpectView()) {
2813          throw new HiveException("Cannot drop a base table with DROP VIEW");
2814        }
2815      }
2816    }
2817
2818    if (dropTbl.getPartSpecs() == null) {
2819      if (tbl != null && !tbl.canDrop()) {
2820        throw new HiveException("Table " + tbl.getTableName() +
2821            " is protected from being dropped");
2822      }
2823
2824      // We should check that all the partitions of the table can be dropped
2825      if (tbl != null && tbl.isPartitioned()) {
2826        List<Partition> listPartitions = db.getPartitions(tbl);
2827        for (Partition p: listPartitions) {
2828            if (!p.canDrop()) {
2829              throw new HiveException("Table " + tbl.getTableName() +
2830                  " Partition" + p.getName() +
2831                  " is protected from being dropped");
2832            }
2833        }
2834      }
2835
2836      // drop the table
2837      db.dropTable(dropTbl.getTableName());
2838      if (tbl != null) {
2839        work.getOutputs().add(new WriteEntity(tbl));
2840      }
2841    } else {
2842      // get all partitions of the table
2843      List<String> partitionNames =
2844        db.getPartitionNames(dropTbl.getTableName(), (short) -1);
2845      Set<Map<String, String>> partitions = new HashSet<Map<String, String>>();
2846      for (String partitionName : partitionNames) {
2847        try {
2848          partitions.add(Warehouse.makeSpecFromName(partitionName));
2849        } catch (MetaException e) {
2850          LOG.warn("Unrecognized partition name from metastore: " + partitionName);
2851        }
2852      }
2853      // drop partitions in the list
2854      List<Partition> partsToDelete = new ArrayList<Partition>();
2855      for (Map<String, String> partSpec : dropTbl.getPartSpecs()) {
2856        Iterator<Map<String, String>> it = partitions.iterator();
2857        while (it.hasNext()) {
2858          Map<String, String> part = it.next();
2859          // test if partSpec matches part
2860          boolean match = true;
2861          for (Map.Entry<String, String> item : partSpec.entrySet()) {
2862            if (!item.getValue().equals(part.get(item.getKey()))) {
2863              match = false;
2864              break;
2865            }
2866          }
2867          if (match) {
2868            Partition p = db.getPartition(tbl, part, false);
2869            if (!p.canDrop()) {
2870              throw new HiveException("Table " + tbl.getTableName() +
2871                  " Partition " + p.getName() +
2872                  " is protected from being dropped");
2873            }
2874
2875            partsToDelete.add(p);
2876            it.remove();
2877          }
2878        }
2879      }
2880
2881      // drop all existing partitions from the list
2882      for (Partition partition : partsToDelete) {
2883        console.printInfo("Dropping the partition " + partition.getName());
2884        db.dropPartition(dropTbl.getTableName(), partition.getValues(), true);
2885        work.getOutputs().add(new WriteEntity(partition));
2886      }
2887    }
2888
2889    return 0;
2890  }
2891
2892  /**
2893   * Update last_modified_by and last_modified_time parameters in parameter map.
2894   *
2895   * @param params
2896   *          Parameters.
2897   * @param user
2898   *          user that is doing the updating.
2899   */
2900  private boolean updateModifiedParameters(Map<String, String> params, HiveConf conf) {
2901    String user = null;
2902    try {
2903      user = conf.getUser();
2904    } catch (IOException e) {
2905      console.printError("Unable to get current user: " + e.getMessage(),
2906          stringifyException(e));
2907      return false;
2908    }
2909
2910    params.put("last_modified_by", user);
2911    params.put("last_modified_time", Long.toString(System.currentTimeMillis() / 1000));
2912    return true;
2913  }
2914
2915  /**
2916   * Check if the given serde is valid.
2917   */
2918  private void validateSerDe(String serdeName) throws HiveException {
2919    try {
2920      Deserializer d = SerDeUtils.lookupDeserializer(serdeName);
2921      if (d != null) {
2922        LOG.debug("Found class for " + serdeName);
2923      }
2924    } catch (SerDeException e) {
2925      throw new HiveException("Cannot validate serde: " + serdeName, e);
2926    }
2927  }
2928
2929  /**
2930   * Create a Database
2931   * @param db
2932   * @param crtDb
2933   * @return Always returns 0
2934   * @throws HiveException
2935   * @throws AlreadyExistsException
2936   */
2937  private int createDatabase(Hive db, CreateDatabaseDesc crtDb)
2938      throws HiveException, AlreadyExistsException {
2939    Database database = new Database();
2940    database.setName(crtDb.getName());
2941    database.setDescription(crtDb.getComment());
2942    database.setLocationUri(crtDb.getLocationUri());
2943    database.setParameters(crtDb.getDatabaseProperties());
2944
2945    db.createDatabase(database, crtDb.getIfNotExists());
2946    return 0;
2947  }
2948
2949  /**
2950   * Drop a Database
2951   * @param db
2952   * @param dropDb
2953   * @return Always returns 0
2954   * @throws HiveException
2955   * @throws NoSuchObjectException
2956   */
2957  private int dropDatabase(Hive db, DropDatabaseDesc dropDb)
2958      throws HiveException, NoSuchObjectException {
2959    db.dropDatabase(dropDb.getDatabaseName(), true, dropDb.getIfExists());
2960    return 0;
2961  }
2962
2963  /**
2964   * Switch to a different Database
2965   * @param db
2966   * @param switchDb
2967   * @return Always returns 0
2968   * @throws HiveException
2969   */
2970  private int switchDatabase(Hive db, SwitchDatabaseDesc switchDb)
2971      throws HiveException {
2972    String dbName = switchDb.getDatabaseName();
2973    if (!db.databaseExists(dbName)) {
2974      throw new HiveException("ERROR: The database " + dbName + " does not exist.");
2975    }
2976    db.setCurrentDatabase(dbName);
2977
2978    // set database specific parameters
2979    Database database = db.getDatabase(dbName);
2980    assert(database != null);
2981    Map<String, String> dbParams = database.getParameters();
2982    if (dbParams != null) {
2983      for (HiveConf.ConfVars var: HiveConf.dbVars) {
2984        String newValue = dbParams.get(var.varname);
2985        if (newValue != null) {
2986         	LOG.info("Changing " + var.varname +
2987         	    " from " + conf.getVar(var) + " to " + newValue);
2988         	conf.setVar(var, newValue);
2989        }
2990      }
2991    }
2992
2993    return 0;
2994  }
2995
2996  /**
2997   * Create a new table.
2998   *
2999   * @param db
3000   *          The database in question.
3001   * @param crtTbl
3002   *          This is the table we're creating.
3003   * @return Returns 0 when execution succeeds and above 0 if it fails.
3004   * @throws HiveException
3005   *           Throws this exception if an unexpected error occurs.
3006   */
3007  private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
3008    // create the table
3009    Table tbl = db.newTable(crtTbl.getTableName());
3010
3011    if (crtTbl.getTblProps() != null) {
3012      tbl.getTTable().getParameters().putAll(crtTbl.getTblProps());
3013    }
3014
3015    if (crtTbl.getPartCols() != null) {
3016      tbl.setPartCols(crtTbl.getPartCols());
3017    }
3018    if (crtTbl.getNumBuckets() != -1) {
3019      tbl.setNumBuckets(crtTbl.getNumBuckets());
3020    }
3021
3022    if (crtTbl.getStorageHandler() != null) {
3023      tbl.setProperty(
3024        org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE,
3025        crtTbl.getStorageHandler());
3026    }
3027    HiveStorageHandler storageHandler = tbl.getStorageHandler();
3028
3029    /*
3030     * We use LazySimpleSerDe by default.
3031     *
3032     * If the user didn't specify a SerDe, and any of the columns are not simple
3033     * types, we will have to use DynamicSerDe instead.
3034     */
3035    if (crtTbl.getSerName() == null) {
3036      if (storageHandler == null) {
3037        LOG.info("Default to LazySimpleSerDe for table " + crtTbl.getTableName());
3038        tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
3039      } else {
3040        String serDeClassName = storageHandler.getSerDeClass().getName();
3041        LOG.info("Use StorageHandler-supplied " + serDeClassName
3042          + " for table " + crtTbl.getTableName());
3043        tbl.setSerializationLib(serDeClassName);
3044      }
3045    } else {
3046      // let's validate that the serde exists
3047      validateSerDe(crtTbl.getSerName());
3048      tbl.setSerializationLib(crtTbl.getSerName());
3049    }
3050
3051    if (crtTbl.getFieldDelim() != null) {
3052      tbl.setSerdeParam(Constants.FIELD_DELIM, crtTbl.getFieldDelim());
3053      tbl.setSerdeParam(Constants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());
3054    }
3055    if (crtTbl.getFieldEscape() != null) {
3056      tbl.setSerdeParam(Constants.ESCAPE_CHAR, crtTbl.getFieldEscape());
3057    }
3058
3059    if (crtTbl.getCollItemDelim() != null) {
3060      tbl.setSerdeParam(Constants.COLLECTION_DELIM, crtTbl.getCollItemDelim());
3061    }
3062    if (crtTbl.getMapKeyDelim() != null) {
3063      tbl.setSerdeParam(Constants.MAPKEY_DELIM, crtTbl.getMapKeyDelim());
3064    }
3065    if (crtTbl.getLineDelim() != null) {
3066      tbl.setSerdeParam(Constants.LINE_DELIM, crtTbl.getLineDelim());
3067    }
3068
3069    if (crtTbl.getSerdeProps() != null) {
3070      Iterator<Entry<String, String>> iter = crtTbl.getSerdeProps().entrySet()
3071        .iterator();
3072      while (iter.hasNext()) {
3073        Entry<String, String> m = iter.next();
3074        tbl.setSerdeParam(m.getKey(), m.getValue());
3075      }
3076    }
3077
3078    if (crtTbl.getCols() != null) {
3079      tbl.setFields(crtTbl.getCols());
3080    }
3081    if (crtTbl.getBucketCols() != null) {
3082      tbl.setBucketCols(crtTbl.getBucketCols());
3083    }
3084    if (crtTbl.getSortCols() != null) {
3085      tbl.setSortCols(crtTbl.getSortCols());
3086    }
3087    if (crtTbl.getComment() != null) {
3088      tbl.setProperty("comment", crtTbl.getComment());
3089    }
3090    if (crtTbl.getLocation() != null) {
3091      tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
3092    }
3093
3094    tbl.setInputFormatClass(crtTbl.getInputFormat());
3095    tbl.setOutputFormatClass(crtTbl.getOutputFormat());
3096
3097    tbl.getTTable().getSd().setInputFormat(
3098      tbl.getInputFormatClass().getName());
3099    tbl.getTTable().getSd().setOutputFormat(
3100      tbl.getOutputFormatClass().getName());
3101
3102    if (crtTbl.isExternal()) {
3103      tbl.setProperty("EXTERNAL", "TRUE");
3104      tbl.setTableType(TableType.EXTERNAL_TABLE);
3105    }
3106
3107    // If the sorted columns is a superset of bucketed columns, store this fact.
3108    // It can be later used to
3109    // optimize some group-by queries. Note that, the order does not matter as
3110    // long as it in the first
3111    // 'n' columns where 'n' is the length of the bucketed columns.
3112    if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null)) {
3113      List<String> bucketCols = tbl.getBucketCols();
3114      List<Order> sortCols = tbl.getSortCols();
3115
3116      if ((sortCols.size() > 0) && (sortCols.size() >= bucketCols.size())) {
3117        boolean found = true;
3118
3119        Iterator<String> iterBucketCols = bucketCols.iterator();
3120        while (iterBucketCols.hasNext()) {
3121          String bucketCol = iterBucketCols.next();
3122          boolean colFound = false;
3123          for (int i = 0; i < bucketCols.size(); i++) {
3124            if (bucketCol.equals(sortCols.get(i).getCol())) {
3125              colFound = true;
3126              break;
3127            }
3128          }
3129          if (colFound == false) {
3130            found = false;
3131            break;
3132          }
3133        }
3134        if (found) {
3135          tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE");
3136        }
3137      }
3138    }
3139
3140    int rc = setGenericTableAttributes(tbl);
3141    if (rc != 0) {
3142      return rc;
3143    }
3144
3145    // create the table
3146    db.createTable(tbl, crtTbl.getIfNotExists());
3147    work.getOutputs().add(new WriteEntity(tbl));
3148    return 0;
3149  }
3150
3151  /**
3152   * Create a new table like an existing table.
3153   *
3154   * @param db
3155   *          The database in question.
3156   * @param crtTbl
3157   *          This is the table we're creating.
3158   * @return Returns 0 when execution succeeds and above 0 if it fails.
3159   * @throws HiveException
3160   *           Throws this exception if an unexpected error occurs.
3161   */
3162  private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException {
3163    // Get the existing table
3164    Table tbl = db.getTable(crtTbl.getLikeTableName());
3165
3166    // find out database name and table name of target table
3167    String targetTableName = crtTbl.getTableName();
3168    Table newTable = db.newTable(targetTableName);
3169
3170    tbl.setDbName(newTable.getDbName());
3171    tbl.setTableName(newTable.getTableName());
3172
3173    if (crtTbl.isExternal()) {
3174      tbl.setProperty("EXTERNAL", "TRUE");
3175    } else {
3176      tbl.setProperty("EXTERNAL", "FALSE");
3177    }
3178
3179    if (crtTbl.getLocation() != null) {
3180      tbl.setDataLocation(new Path(crtTbl.getLocation()).toUri());
3181    } else {
3182      tbl.unsetDataLocation();
3183    }
3184
3185    // we should reset table specific parameters including (stats, lastDDLTime etc.)
3186    Map<String, String> params = tbl.getParameters();
3187    params.clear();
3188
3189    // create the table
3190    db.createTable(tbl, crtTbl.getIfNotExists());
3191    work.getOutputs().add(new WriteEntity(tbl));
3192    return 0;
3193  }
3194
3195  /**
3196   * Create a new view.
3197   *
3198   * @param db
3199   *          The database in question.
3200   * @param crtView
3201   *          This is the view we're creating.
3202   * @return Returns 0 when execution succeeds and above 0 if it fails.
3203   * @throws HiveException
3204   *           Throws this exception if an unexpected error occurs.
3205   */
3206  private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
3207    Table tbl = db.newTable(crtView.getViewName());
3208    tbl.setTableType(TableType.VIRTUAL_VIEW);
3209    tbl.setSerializationLib(null);
3210    tbl.clearSerDeInfo();
3211    tbl.setViewOriginalText(crtView.getViewOriginalText());
3212    tbl.setViewExpandedText(crtView.getViewExpandedText());
3213    tbl.setFields(crtView.getSchema());
3214    if (crtView.getComment() != null) {
3215      tbl.setProperty("comment", crtView.getComment());
3216    }
3217    if (crtView.getTblProps() != null) {
3218      tbl.getTTable().getParameters().putAll(crtView.getTblProps());
3219    }
3220
3221    int rc = setGenericTableAttributes(tbl);
3222    if (rc != 0) {
3223      return rc;
3224    }
3225
3226    db.createTable(tbl, crtView.getIfNotExists());
3227    work.getOutputs().add(new WriteEntity(tbl));
3228    return 0;
3229  }
3230
3231  private int setGenericTableAttributes(Table tbl) {
3232    try {
3233      tbl.setOwner(conf.getUser());
3234    } catch (IOException e) {
3235      console.printError("Unable to get current user: " + e.getMessage(),
3236          stringifyException(e));
3237      return 1;
3238    }
3239    // set create time
3240    tbl.setCreateTime((int) (System.currentTimeMillis() / 1000));
3241    return 0;
3242  }
3243
3244  @Override
3245  public StageType getType() {
3246    return StageType.DDL;
3247  }
3248
3249  @Override
3250  public String getName() {
3251    return "DDL";
3252  }
3253
3254  @Override
3255  protected void localizeMRTmpFilesImpl(Context ctx) {
3256    // no-op
3257  }
3258}