/tags/release-0.0.0-rc0/hive/external/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
Java | 3258 lines | 2454 code | 332 blank | 472 comment | 676 complexity | 834d1c5310bbb8a1f599a85916c3cad4 MD5 | raw file
Possible License(s): Apache-2.0, BSD-3-Clause, JSON, CPL-1.0
Large files files are truncated, but you can click here to view the full file
- /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- package org.apache.hadoop.hive.ql.exec;
- import static org.apache.commons.lang.StringUtils.join;
- import static org.apache.hadoop.util.StringUtils.stringifyException;
- import java.io.BufferedWriter;
- import java.io.DataOutput;
- import java.io.FileNotFoundException;
- import java.io.IOException;
- import java.io.OutputStreamWriter;
- import java.io.Serializable;
- import java.io.Writer;
- import java.net.URI;
- import java.net.URISyntaxException;
- import java.util.ArrayList;
- import java.util.Collections;
- import java.util.Comparator;
- import java.util.HashSet;
- import java.util.Iterator;
- import java.util.List;
- import java.util.Map;
- import java.util.Set;
- import java.util.SortedSet;
- import java.util.TreeSet;
- import java.util.Map.Entry;
- import org.apache.commons.logging.Log;
- import org.apache.commons.logging.LogFactory;
- import org.apache.hadoop.fs.FSDataOutputStream;
- import org.apache.hadoop.fs.FileStatus;
- import org.apache.hadoop.fs.FileSystem;
- import org.apache.hadoop.fs.FsShell;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.hive.conf.HiveConf;
- import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
- import org.apache.hadoop.hive.metastore.MetaStoreUtils;
- import org.apache.hadoop.hive.metastore.ProtectMode;
- import org.apache.hadoop.hive.metastore.TableType;
- import org.apache.hadoop.hive.metastore.Warehouse;
- import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
- import org.apache.hadoop.hive.metastore.api.Database;
- import org.apache.hadoop.hive.metastore.api.FieldSchema;
- import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
- import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
- import org.apache.hadoop.hive.metastore.api.HiveObjectType;
- import org.apache.hadoop.hive.metastore.api.Index;
- import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
- import org.apache.hadoop.hive.metastore.api.MetaException;
- import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
- import org.apache.hadoop.hive.metastore.api.Order;
- import org.apache.hadoop.hive.metastore.api.PrincipalType;
- import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
- import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
- import org.apache.hadoop.hive.metastore.api.Role;
- import org.apache.hadoop.hive.ql.Context;
- import org.apache.hadoop.hive.ql.DriverContext;
- import org.apache.hadoop.hive.ql.QueryPlan;
- import org.apache.hadoop.hive.ql.hooks.ReadEntity;
- import org.apache.hadoop.hive.ql.hooks.WriteEntity;
- import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
- import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
- import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
- import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
- import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
- import org.apache.hadoop.hive.ql.metadata.CheckResult;
- import org.apache.hadoop.hive.ql.metadata.Hive;
- import org.apache.hadoop.hive.ql.metadata.HiveException;
- import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
- import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
- import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
- import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils;
- import org.apache.hadoop.hive.ql.metadata.Partition;
- import org.apache.hadoop.hive.ql.metadata.Table;
- import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
- import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
- import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
- import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
- import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
- import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
- import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
- import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
- import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc;
- import org.apache.hadoop.hive.ql.plan.CreateViewDesc;
- import org.apache.hadoop.hive.ql.plan.DDLWork;
- import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
- import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
- import org.apache.hadoop.hive.ql.plan.DescTableDesc;
- import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
- import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
- import org.apache.hadoop.hive.ql.plan.DropTableDesc;
- import org.apache.hadoop.hive.ql.plan.GrantDesc;
- import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
- import org.apache.hadoop.hive.ql.plan.LockTableDesc;
- import org.apache.hadoop.hive.ql.plan.MsckDesc;
- import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
- import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
- import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc;
- import org.apache.hadoop.hive.ql.plan.RevokeDesc;
- import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
- import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
- import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
- import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
- import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
- import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
- import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
- import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc;
- import org.apache.hadoop.hive.ql.plan.ShowTablesDesc;
- import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc;
- import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
- import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
- import org.apache.hadoop.hive.ql.plan.api.StageType;
- import org.apache.hadoop.hive.ql.security.authorization.Privilege;
- import org.apache.hadoop.hive.serde.Constants;
- import org.apache.hadoop.hive.serde2.Deserializer;
- import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
- import org.apache.hadoop.hive.serde2.SerDeException;
- import org.apache.hadoop.hive.serde2.SerDeUtils;
- import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
- import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
- import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
- import org.apache.hadoop.hive.shims.HadoopShims;
- import org.apache.hadoop.hive.shims.ShimLoader;
- import org.apache.hadoop.util.ToolRunner;
- /**
- * DDLTask implementation.
- *
- **/
- public class DDLTask extends Task<DDLWork> implements Serializable {
- private static final long serialVersionUID = 1L;
- private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
- transient HiveConf conf;
- private static final int separator = Utilities.tabCode;
- private static final int terminator = Utilities.newLineCode;
- // These are suffixes attached to intermediate directory names used in the
- // archiving / un-archiving process.
- private static String INTERMEDIATE_ARCHIVED_DIR_SUFFIX;
- private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX;
- private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX;
- public DDLTask() {
- super();
- }
- @Override
- public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) {
- super.initialize(conf, queryPlan, ctx);
- this.conf = conf;
- INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
- HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
- INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
- HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL);
- INTERMEDIATE_EXTRACTED_DIR_SUFFIX =
- HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED);
- }
- @Override
- public int execute(DriverContext driverContext) {
- // Create the db
- Hive db;
- try {
- db = Hive.get(conf);
- CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
- if (null != createDatabaseDesc) {
- return createDatabase(db, createDatabaseDesc);
- }
- DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
- if (dropDatabaseDesc != null) {
- return dropDatabase(db, dropDatabaseDesc);
- }
- SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
- if (switchDatabaseDesc != null) {
- return switchDatabase(db, switchDatabaseDesc);
- }
- DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
- if (descDatabaseDesc != null) {
- return descDatabase(descDatabaseDesc);
- }
- AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
- if (alterDatabaseDesc != null) {
- return alterDatabase(alterDatabaseDesc);
- }
- CreateTableDesc crtTbl = work.getCreateTblDesc();
- if (crtTbl != null) {
- return createTable(db, crtTbl);
- }
- CreateIndexDesc crtIndex = work.getCreateIndexDesc();
- if (crtIndex != null) {
- return createIndex(db, crtIndex);
- }
- AlterIndexDesc alterIndex = work.getAlterIndexDesc();
- if (alterIndex != null) {
- return alterIndex(db, alterIndex);
- }
- DropIndexDesc dropIdx = work.getDropIdxDesc();
- if (dropIdx != null) {
- return dropIndex(db, dropIdx);
- }
- CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
- if (crtTblLike != null) {
- return createTableLike(db, crtTblLike);
- }
- DropTableDesc dropTbl = work.getDropTblDesc();
- if (dropTbl != null) {
- return dropTable(db, dropTbl);
- }
- AlterTableDesc alterTbl = work.getAlterTblDesc();
- if (alterTbl != null) {
- return alterTable(db, alterTbl);
- }
- CreateViewDesc crtView = work.getCreateViewDesc();
- if (crtView != null) {
- return createView(db, crtView);
- }
- AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
- if (addPartitionDesc != null) {
- return addPartition(db, addPartitionDesc);
- }
- AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
- if (simpleDesc != null) {
- if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
- return touch(db, simpleDesc);
- } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
- return archive(db, simpleDesc, driverContext);
- } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
- return unarchive(db, simpleDesc);
- }
- }
- MsckDesc msckDesc = work.getMsckDesc();
- if (msckDesc != null) {
- return msck(db, msckDesc);
- }
- DescTableDesc descTbl = work.getDescTblDesc();
- if (descTbl != null) {
- return describeTable(db, descTbl);
- }
- DescFunctionDesc descFunc = work.getDescFunctionDesc();
- if (descFunc != null) {
- return describeFunction(descFunc);
- }
- ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
- if (showDatabases != null) {
- return showDatabases(db, showDatabases);
- }
- ShowTablesDesc showTbls = work.getShowTblsDesc();
- if (showTbls != null) {
- return showTables(db, showTbls);
- }
- ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
- if (showTblStatus != null) {
- return showTableStatus(db, showTblStatus);
- }
- ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
- if (showFuncs != null) {
- return showFunctions(showFuncs);
- }
- ShowLocksDesc showLocks = work.getShowLocksDesc();
- if (showLocks != null) {
- return showLocks(showLocks);
- }
- LockTableDesc lockTbl = work.getLockTblDesc();
- if (lockTbl != null) {
- return lockTable(lockTbl);
- }
- UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
- if (unlockTbl != null) {
- return unlockTable(unlockTbl);
- }
- ShowPartitionsDesc showParts = work.getShowPartsDesc();
- if (showParts != null) {
- return showPartitions(db, showParts);
- }
- RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
- if (roleDDLDesc != null) {
- return roleDDL(roleDDLDesc);
- }
- GrantDesc grantDesc = work.getGrantDesc();
- if (grantDesc != null) {
- return grantOrRevokePrivileges(grantDesc.getPrincipals(), grantDesc
- .getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
- }
- RevokeDesc revokeDesc = work.getRevokeDesc();
- if (revokeDesc != null) {
- return grantOrRevokePrivileges(revokeDesc.getPrincipals(), revokeDesc
- .getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, false, false);
- }
- ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
- if (showGrantDesc != null) {
- return showGrants(showGrantDesc);
- }
- GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
- if (grantOrRevokeRoleDDL != null) {
- return grantOrRevokeRole(grantOrRevokeRoleDDL);
- }
- ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
- if (showIndexes != null) {
- return showIndexes(db, showIndexes);
- }
- } catch (InvalidTableException e) {
- console.printError("Table " + e.getTableName() + " does not exist");
- LOG.debug(stringifyException(e));
- return 1;
- } catch (HiveException e) {
- console.printError("FAILED: Error in metadata: " + e.getMessage(), "\n"
- + stringifyException(e));
- LOG.debug(stringifyException(e));
- return 1;
- } catch (Exception e) {
- console.printError("Failed with exception " + e.getMessage(), "\n"
- + stringifyException(e));
- return (1);
- }
- assert false;
- return 0;
- }
- private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL)
- throws HiveException {
- try {
- boolean grantRole = grantOrRevokeRoleDDL.getGrant();
- List<PrincipalDesc> principals = grantOrRevokeRoleDDL.getPrincipalDesc();
- List<String> roles = grantOrRevokeRoleDDL.getRoles();
- for (PrincipalDesc principal : principals) {
- String userName = principal.getName();
- for (String roleName : roles) {
- if (grantRole) {
- db.grantRole(roleName, userName, principal.getType(),
- grantOrRevokeRoleDDL.getGrantor(), grantOrRevokeRoleDDL
- .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption());
- } else {
- db.revokeRole(roleName, userName, principal.getType());
- }
- }
- }
- } catch (Exception e) {
- throw new HiveException(e);
- }
- return 0;
- }
- private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException {
- try {
- Path resFile = new Path(showGrantDesc.getResFile());
- FileSystem fs = resFile.getFileSystem(conf);
- DataOutput outStream = fs.create(resFile);
- PrincipalDesc principalDesc = showGrantDesc.getPrincipalDesc();
- PrivilegeObjectDesc hiveObjectDesc = showGrantDesc.getHiveObj();
- String principalName = principalDesc.getName();
- if (hiveObjectDesc == null) {
- List<HiveObjectPrivilege> users = db.showPrivilegeGrant(
- HiveObjectType.GLOBAL, principalName, principalDesc.getType(),
- null, null, null, null);
- if (users != null && users.size() > 0) {
- boolean first = true;
- for (HiveObjectPrivilege usr : users) {
- if (!first) {
- outStream.write(terminator);
- } else {
- first = false;
- }
- writeGrantInfo(outStream, principalDesc.getType(), principalName,
- null, null, null, null, usr.getGrantInfo());
- }
- }
- } else {
- String obj = hiveObjectDesc.getObject();
- boolean notFound = true;
- String dbName = null;
- String tableName = null;
- Table tableObj = null;
- Database dbObj = null;
- if (hiveObjectDesc.getTable()) {
- String[] dbTab = obj.split("\\.");
- if (dbTab.length == 2) {
- dbName = dbTab[0];
- tableName = dbTab[1];
- } else {
- dbName = db.getCurrentDatabase();
- tableName = obj;
- }
- dbObj = db.getDatabase(dbName);
- tableObj = db.getTable(dbName, tableName);
- notFound = (dbObj == null || tableObj == null);
- } else {
- dbName = hiveObjectDesc.getObject();
- dbObj = db.getDatabase(dbName);
- notFound = (dbObj == null);
- }
- if (notFound) {
- throw new HiveException(obj + " can not be found");
- }
- String partName = null;
- List<String> partValues = null;
- if (hiveObjectDesc.getPartSpec() != null) {
- partName = Warehouse
- .makePartName(hiveObjectDesc.getPartSpec(), false);
- partValues = Warehouse.getPartValuesFromPartName(partName);
- }
- if (!hiveObjectDesc.getTable()) {
- // show database level privileges
- List<HiveObjectPrivilege> dbs = db.showPrivilegeGrant(HiveObjectType.DATABASE, principalName,
- principalDesc.getType(), dbName, null, null, null);
- if (dbs != null && dbs.size() > 0) {
- boolean first = true;
- for (HiveObjectPrivilege db : dbs) {
- if (!first) {
- outStream.write(terminator);
- } else {
- first = false;
- }
- writeGrantInfo(outStream, principalDesc.getType(), principalName,
- dbName, null, null, null, db.getGrantInfo());
- }
- }
- } else {
- if (showGrantDesc.getColumns() != null) {
- // show column level privileges
- for (String columnName : showGrantDesc.getColumns()) {
- List<HiveObjectPrivilege> columnss = db.showPrivilegeGrant(
- HiveObjectType.COLUMN, principalName,
- principalDesc.getType(), dbName, tableName, partValues,
- columnName);
- if (columnss != null && columnss.size() > 0) {
- boolean first = true;
- for (HiveObjectPrivilege col : columnss) {
- if (!first) {
- outStream.write(terminator);
- } else {
- first = false;
- }
- writeGrantInfo(outStream, principalDesc.getType(),
- principalName, dbName, tableName, partName, columnName,
- col.getGrantInfo());
- }
- }
- }
- } else if (hiveObjectDesc.getPartSpec() != null) {
- // show partition level privileges
- List<HiveObjectPrivilege> parts = db.showPrivilegeGrant(
- HiveObjectType.PARTITION, principalName, principalDesc
- .getType(), dbName, tableName, partValues, null);
- if (parts != null && parts.size() > 0) {
- boolean first = true;
- for (HiveObjectPrivilege part : parts) {
- if (!first) {
- outStream.write(terminator);
- } else {
- first = false;
- }
- writeGrantInfo(outStream, principalDesc.getType(),
- principalName, dbName, tableName, partName, null, part.getGrantInfo());
- }
- }
- } else {
- // show table level privileges
- List<HiveObjectPrivilege> tbls = db.showPrivilegeGrant(
- HiveObjectType.TABLE, principalName, principalDesc.getType(),
- dbName, tableName, null, null);
- if (tbls != null && tbls.size() > 0) {
- boolean first = true;
- for (HiveObjectPrivilege tbl : tbls) {
- if (!first) {
- outStream.write(terminator);
- } else {
- first = false;
- }
- writeGrantInfo(outStream, principalDesc.getType(),
- principalName, dbName, tableName, null, null, tbl.getGrantInfo());
- }
- }
- }
- }
- }
- ((FSDataOutputStream) outStream).close();
- } catch (FileNotFoundException e) {
- LOG.info("show table status: " + stringifyException(e));
- return 1;
- } catch (IOException e) {
- LOG.info("show table status: " + stringifyException(e));
- return 1;
- } catch (Exception e) {
- e.printStackTrace();
- throw new HiveException(e);
- }
- return 0;
- }
- private int grantOrRevokePrivileges(List<PrincipalDesc> principals,
- List<PrivilegeDesc> privileges, PrivilegeObjectDesc privSubjectDesc,
- String grantor, PrincipalType grantorType, boolean grantOption, boolean isGrant) {
- if (privileges == null || privileges.size() == 0) {
- console.printError("No privilege found.");
- return 1;
- }
- String dbName = null;
- String tableName = null;
- Table tableObj = null;
- Database dbObj = null;
- try {
- if (privSubjectDesc != null) {
- if (privSubjectDesc.getPartSpec() != null && isGrant) {
- throw new HiveException("Grant does not support partition level.");
- }
- String obj = privSubjectDesc.getObject();
- boolean notFound = true;
- if (privSubjectDesc.getTable()) {
- String[] dbTab = obj.split("\\.");
- if (dbTab.length == 2) {
- dbName = dbTab[0];
- tableName = dbTab[1];
- } else {
- dbName = db.getCurrentDatabase();
- tableName = obj;
- }
- dbObj = db.getDatabase(dbName);
- tableObj = db.getTable(dbName, tableName);
- notFound = (dbObj == null || tableObj == null);
- } else {
- dbName = privSubjectDesc.getObject();
- dbObj = db.getDatabase(dbName);
- notFound = (dbObj == null);
- }
- if (notFound) {
- throw new HiveException(obj + " can not be found");
- }
- }
- PrivilegeBag privBag = new PrivilegeBag();
- if (privSubjectDesc == null) {
- for (int idx = 0; idx < privileges.size(); idx++) {
- Privilege priv = privileges.get(idx).getPrivilege();
- if (privileges.get(idx).getColumns() != null
- && privileges.get(idx).getColumns().size() > 0) {
- throw new HiveException(
- "For user-level privileges, column sets should be null. columns="
- + privileges.get(idx).getColumns().toString());
- }
- privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef(
- HiveObjectType.GLOBAL, null, null, null, null), null, null,
- new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType,
- grantOption)));
- }
- } else {
- org.apache.hadoop.hive.metastore.api.Partition partObj = null;
- List<String> partValues = null;
- if (tableObj != null) {
- if ((!tableObj.isPartitioned())
- && privSubjectDesc.getPartSpec() != null) {
- throw new HiveException(
- "Table is not partitioned, but partition name is present: partSpec="
- + privSubjectDesc.getPartSpec().toString());
- }
- if (privSubjectDesc.getPartSpec() != null) {
- partObj = db.getPartition(tableObj, privSubjectDesc.getPartSpec(),
- false).getTPartition();
- partValues = partObj.getValues();
- }
- }
- for (PrivilegeDesc privDesc : privileges) {
- List<String> columns = privDesc.getColumns();
- Privilege priv = privDesc.getPrivilege();
- if (columns != null && columns.size() > 0) {
- if (!priv.supportColumnLevel()) {
- throw new HiveException(priv.toString()
- + " does not support column level.");
- }
- if (privSubjectDesc == null || tableName == null) {
- throw new HiveException(
- "For user-level/database-level privileges, column sets should be null. columns="
- + columns);
- }
- for (int i = 0; i < columns.size(); i++) {
- privBag.addToPrivileges(new HiveObjectPrivilege(
- new HiveObjectRef(HiveObjectType.COLUMN, dbName, tableName,
- partValues, columns.get(i)), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
- }
- } else {
- if (privSubjectDesc.getTable()) {
- if (privSubjectDesc.getPartSpec() != null) {
- privBag.addToPrivileges(new HiveObjectPrivilege(
- new HiveObjectRef(HiveObjectType.PARTITION, dbName,
- tableName, partValues, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
- } else {
- privBag
- .addToPrivileges(new HiveObjectPrivilege(
- new HiveObjectRef(HiveObjectType.TABLE, dbName,
- tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
- }
- } else {
- privBag.addToPrivileges(new HiveObjectPrivilege(
- new HiveObjectRef(HiveObjectType.DATABASE, dbName, null,
- null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption)));
- }
- }
- }
- }
- for (PrincipalDesc principal : principals) {
- for (int i = 0; i < privBag.getPrivileges().size(); i++) {
- HiveObjectPrivilege objPrivs = privBag.getPrivileges().get(i);
- objPrivs.setPrincipalName(principal.getName());
- objPrivs.setPrincipalType(principal.getType());
- }
- if (isGrant) {
- db.grantPrivileges(privBag);
- } else {
- db.revokePrivileges(privBag);
- }
- }
- } catch (Exception e) {
- console.printError("Error: " + e.getMessage());
- return 1;
- }
- return 0;
- }
- private int roleDDL(RoleDDLDesc roleDDLDesc) {
- RoleDDLDesc.RoleOperation operation = roleDDLDesc.getOperation();
- try {
- if (operation.equals(RoleDDLDesc.RoleOperation.CREATE_ROLE)) {
- db.createRole(roleDDLDesc.getName(), roleDDLDesc.getRoleOwnerName());
- } else if (operation.equals(RoleDDLDesc.RoleOperation.DROP_ROLE)) {
- db.dropRole(roleDDLDesc.getName());
- } else if (operation.equals(RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT)) {
- List<Role> roles = db.showRoleGrant(roleDDLDesc.getName(), roleDDLDesc
- .getPrincipalType());
- if (roles != null && roles.size() > 0) {
- Path resFile = new Path(roleDDLDesc.getResFile());
- FileSystem fs = resFile.getFileSystem(conf);
- DataOutput outStream = fs.create(resFile);
- for (Role role : roles) {
- outStream.writeBytes("role name:" + role.getRoleName());
- outStream.write(terminator);
- }
- ((FSDataOutputStream) outStream).close();
- }
- } else {
- throw new HiveException("Unkown role operation "
- + operation.getOperationName());
- }
- } catch (HiveException e) {
- console.printError("Error in role operation "
- + operation.getOperationName() + " on role name "
- + roleDDLDesc.getName() + ", error message " + e.getMessage());
- return 1;
- } catch (IOException e) {
- LOG.info("role ddl exception: " + stringifyException(e));
- return 1;
- }
- return 0;
- }
- private int alterDatabase(AlterDatabaseDesc alterDbDesc) throws HiveException {
- String dbName = alterDbDesc.getDatabaseName();
- Database database = db.getDatabase(dbName);
- Map<String, String> newParams = alterDbDesc.getDatabaseProperties();
- if (database != null) {
- Map<String, String> params = database.getParameters();
- // if both old and new params are not null, merge them
- if (params != null && newParams != null) {
- params.putAll(newParams);
- database.setParameters(params);
- } else { // if one of them is null, replace the old params with the new one
- database.setParameters(newParams);
- }
- db.alterDatabase(database.getName(), database);
- } else {
- throw new HiveException("ERROR: The database " + dbName + " does not exist.");
- }
- return 0;
- }
- private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException {
- db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(),
- dropIdx.getIndexName(), true);
- return 0;
- }
- private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException {
- if( crtIndex.getSerde() != null) {
- validateSerDe(crtIndex.getSerde());
- }
- db
- .createIndex(
- crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(),
- crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(),
- crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(),
- crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(),
- crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(),
- crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment()
- );
- return 0;
- }
- private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
- String dbName = alterIndex.getDbName();
- String baseTableName = alterIndex.getBaseTableName();
- String indexName = alterIndex.getIndexName();
- Index idx = db.getIndex(dbName, baseTableName, indexName);
- if (alterIndex.getOp() == AlterIndexDesc.AlterIndexTypes.ADDPROPS) {
- idx.getParameters().putAll(alterIndex.getProps());
- } else {
- console.printError("Unsupported Alter commnad");
- return 1;
- }
- // set last modified by properties
- if (!updateModifiedParameters(idx.getParameters(), conf)) {
- return 1;
- }
- try {
- db.alterIndex(dbName, baseTableName, indexName, idx);
- } catch (InvalidOperationException e) {
- console.printError("Invalid alter operation: " + e.getMessage());
- LOG.info("alter index: " + stringifyException(e));
- return 1;
- } catch (HiveException e) {
- console.printError("Invalid alter operation: " + e.getMessage());
- return 1;
- }
- return 0;
- }
- /**
- * Add a partition to a table.
- *
- * @param db
- * Database to add the partition to.
- * @param addPartitionDesc
- * Add this partition.
- * @return Returns 0 when execution succeeds and above 0 if it fails.
- * @throws HiveException
- */
- /**
- * Add a partition to a table.
- *
- * @param db
- * Database to add the partition to.
- * @param addPartitionDesc
- * Add this partition.
- * @return Returns 0 when execution succeeds and above 0 if it fails.
- * @throws HiveException
- */
- private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException {
- Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName());
- validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION);
- // If the add partition was created with IF NOT EXISTS, then we should
- // not throw an error if the specified part does exist.
- Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false);
- if (checkPart != null && addPartitionDesc.getIfNotExists()) {
- return 0;
- }
- if (addPartitionDesc.getLocation() == null) {
- db.createPartition(tbl, addPartitionDesc.getPartSpec());
- } else {
- // set partition path relative to table
- db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl
- .getPath(), addPartitionDesc.getLocation()));
- }
- Partition part = db
- .getPartition(tbl, addPartitionDesc.getPartSpec(), false);
- work.getOutputs().add(new WriteEntity(part));
- return 0;
- }
- /**
- * Rewrite the partition's metadata and force the pre/post execute hooks to
- * be fired.
- *
- * @param db
- * @param touchDesc
- * @return
- * @throws HiveException
- */
- private int touch(Hive db, AlterTableSimpleDesc touchDesc)
- throws HiveException {
- String dbName = touchDesc.getDbName();
- String tblName = touchDesc.getTableName();
- Table tbl = db.getTable(dbName, tblName);
- validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.TOUCH);
- if (touchDesc.getPartSpec() == null) {
- try {
- db.alterTable(tblName, tbl);
- } catch (InvalidOperationException e) {
- throw new HiveException("Uable to update table");
- }
- work.getInputs().add(new ReadEntity(tbl));
- work.getOutputs().add(new WriteEntity(tbl));
- } else {
- Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
- if (part == null) {
- throw new HiveException("Specified partition does not exist");
- }
- try {
- db.alterPartition(tblName, part);
- } catch (InvalidOperationException e) {
- throw new HiveException(e);
- }
- work.getInputs().add(new ReadEntity(part));
- work.getOutputs().add(new WriteEntity(part));
- }
- return 0;
- }
- /**
- * Determines whether a partition has been archived
- *
- * @param p
- * @return
- */
- private boolean isArchived(Partition p) {
- Map<String, String> params = p.getParameters();
- if ("true".equalsIgnoreCase(params.get(
- org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED))) {
- return true;
- } else {
- return false;
- }
- }
- private void setIsArchived(Partition p, boolean state) {
- Map<String, String> params = p.getParameters();
- if (state) {
- params.put(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED,
- "true");
- } else {
- params.remove(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED);
- }
- }
- private String getOriginalLocation(Partition p) {
- Map<String, String> params = p.getParameters();
- return params.get(
- org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION);
- }
- private void setOriginalLocation(Partition p, String loc) {
- Map<String, String> params = p.getParameters();
- if (loc == null) {
- params.remove(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION);
- } else {
- params.put(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION, loc);
- }
- }
- // Returns only the path component of the URI
- private String getArchiveDirOnly(Path parentDir, String archiveName) {
- URI parentUri = parentDir.toUri();
- Path harDir = new Path(parentUri.getPath(), archiveName);
- return harDir.toString();
- }
- /**
- * Sets the appropriate attributes in the supplied Partition object to mark
- * it as archived. Note that the metastore is not touched - a separate
- * call to alter_partition is needed.
- *
- * @param p - the partition object to modify
- * @param parentDir - the parent directory of the archive, which is the
- * original directory that the partition's files resided in
- * @param dirInArchive - the directory within the archive file that contains
- * the partitions files
- * @param archiveName - the name of the archive
- * @throws URISyntaxException
- */
- private void setArchived(Partition p, Path parentDir, String dirInArchive, String archiveName)
- throws URISyntaxException {
- assert(isArchived(p) == false);
- Map<String, String> params = p.getParameters();
- URI parentUri = parentDir.toUri();
- String parentHost = parentUri.getHost();
- String harHost = null;
- if (parentHost == null) {
- harHost = "";
- } else {
- harHost = parentUri.getScheme() + "-" + parentHost;
- }
- // harUri is used to access the partition's files, which are in the archive
- // The format of the RI is something like:
- // har://underlyingfsscheme-host:port/archivepath
- URI harUri = null;
- if (dirInArchive.length() == 0) {
- harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(),
- getArchiveDirOnly(parentDir, archiveName),
- parentUri.getQuery(), parentUri.getFragment());
- } else {
- harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(),
- new Path(getArchiveDirOnly(parentDir, archiveName), dirInArchive).toUri().getPath(),
- parentUri.getQuery(), parentUri.getFragment());
- }
- setIsArchived(p, true);
- setOriginalLocation(p, parentDir.toString());
- p.setLocation(harUri.toString());
- }
- /**
- * Sets the appropriate attributes in the supplied Partition object to mark
- * it as not archived. Note that the metastore is not touched - a separate
- * call to alter_partition is needed.
- *
- * @param p - the partition to modify
- */
- private void setUnArchived(Partition p) {
- assert(isArchived(p) == true);
- String parentDir = getOriginalLocation(p);
- setIsArchived(p, false);
- setOriginalLocation(p, null);
- assert(parentDir != null);
- p.setLocation(parentDir);
- }
- private boolean pathExists(Path p) throws HiveException {
- try {
- FileSystem fs = p.getFileSystem(conf);
- return fs.exists(p);
- } catch (IOException e) {
- throw new HiveException(e);
- }
- }
- private void moveDir(FileSystem fs, Path from, Path to) throws HiveException {
- try {
- if (!fs.rename(from, to)) {
- throw new HiveException("Moving " + from + " to " + to + " failed!");
- }
- } catch (IOException e) {
- throw new HiveException(e);
- }
- }
- private void deleteDir(Path dir) throws HiveException {
- try {
- Warehouse wh = new Warehouse(conf);
- wh.deleteDir(dir, true);
- } catch (MetaException e) {
- throw new HiveException(e);
- }
- }
- private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext)
- throws HiveException {
- String dbName = simpleDesc.getDbName();
- String tblName = simpleDesc.getTableName();
- Table tbl = db.getTable(dbName, tblName);
- validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ARCHIVE);
- Map<String, String> partSpec = simpleDesc.getPartSpec();
- Partition p = db.getPartition(tbl, partSpec, false);
- if (tbl.getTableType() != TableType.MANAGED_TABLE) {
- throw new HiveException("ARCHIVE can only be performed on managed tables");
- }
- if (p == null) {
- throw new HiveException("Specified partition does not exist");
- }
- if (isArchived(p)) {
- // If there were a failure right after the metadata was updated in an
- // archiving operation, it's possible that the original, unarchived files
- // weren't deleted.
- Path originalDir = new Path(getOriginalLocation(p));
- Path leftOverIntermediateOriginal = new Path(originalDir.getParent(),
- originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
- if (pathExists(leftOverIntermediateOriginal)) {
- console.printInfo("Deleting " + leftOverIntermediateOriginal +
- " left over from a previous archiving operation");
- deleteDir(leftOverIntermediateOriginal);
- }
- throw new HiveException("Specified partition is already archived");
- }
- Path originalDir = p.getPartitionPath();
- Path intermediateArchivedDir = new Path(originalDir.getParent(),
- originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
- Path intermediateOriginalDir = new Path(originalDir.getParent(),
- originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
- String archiveName = "data.har";
- FileSystem fs = null;
- try {
- fs = originalDir.getFileSystem(conf);
- } catch (IOException e) {
- throw new HiveException(e);
- }
- // The following steps seem roundabout, but they are meant to aid in
- // recovery if a failure occurs and to keep a consistent state in the FS
- // Steps:
- // 1. Create the archive in a temporary folder
- // 2. Move the archive dir to an intermediate dir that is in at the same
- // dir as the original partition dir. Call the new dir
- // intermediate-archive.
- // 3. Rename the original partition dir to an intermediate dir. Call the
- // renamed dir intermediate-original
- // 4. Rename intermediate-archive to the original partition dir
- // 5. Change the metadata
- // 6. Delete the original partition files in intermediate-original
- // The original partition files are deleted after the metadata change
- // because the presence of those files are used to indicate whether
- // the original partition directory contains archived or unarchived files.
- // Create an archived version of the partition in a directory ending in
- // ARCHIVE_INTERMEDIATE_DIR_SUFFIX that's the same level as the partition,
- // if it does not already exist. If it does exist, we assume the dir is good
- // to use as the move operation that created it is atomic.
- if (!pathExists(intermediateArchivedDir) &&
- !pathExists(intermediateOriginalDir)) {
- // First create the archive in a tmp dir so that if the job fails, the
- // bad files don't pollute the filesystem
- Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI(originalDir.toUri()), "partlevel");
- console.printInfo("Creating " + archiveName + " for " + originalDir.toString());
- console.printInfo("in " + tmpDir);
- console.printInfo("Please wait... (this may take a while)");
- // Create the Hadoop archive
- HadoopShims shim = ShimLoader.getHadoopShims();
- int ret=0;
- try {
- ret = shim.createHadoopArchive(conf, originalDir, tmpDir, archiveName);
- } catch (Exception e) {
- throw new HiveException(e);
- }
- if (ret != 0) {
- throw new HiveException("Error while creating HAR");
- }
- // Move from the tmp dir to an intermediate directory, in the same level as
- // the partition directory. e.g. .../hr=12-intermediate-archived
- try {
- console.printInfo("Moving " + tmpDir + " to " + intermediateArchivedDir);
- if (pathExists(intermediateArchivedDir)) {
- throw new HiveException("The intermediate archive directory already exists.");
- }
- fs.rename(tmpDir, intermediateArchivedDir);
- } catch (IOException e) {
- throw new HiveException("Error while moving tmp directory");
- }
- } else {
- if (pathExists(intermediateArchivedDir)) {
- console.printInfo("Intermediate archive directory " + intermediateArchivedDir +
- " already exists. Assuming it contains an archived version of the partition");
- }
- }
- // If we get to here, we know that we've archived the partition files, but
- // they may be in the original partition location, or in the intermediate
- // original dir.
- // Move the original parent directory to the intermediate original directory
- // if the move hasn't been made already
- if (!pathExists(intermediateOriginalDir)) {
- console.printInfo("Moving " + originalDir + " to " +
- intermediateOriginalDir);
- moveDir(fs, originalDir, intermediateOriginalDir);
- } else {
- console.printInfo(intermediateOriginalDir + " already exists. " +
- "Assuming it contains the original files in the partition");
- }
- // If there's a failure from here to when the metadata is updated,
- // there will be no data in the partition, or an error while trying to read
- // the partition (if the archive files have been moved to the original
- // partition directory.) But re-running the archive command will allow
- // recovery
- // Move the intermediate archived directory to the original parent directory
- if (!pathExists(originalDir)) {
- console.printInfo("Moving " + intermediateArchivedDir + " to " +
- originalDir);
- moveDir(fs, intermediateArchivedDir, originalDir);
- } else {
- console.printInfo(originalDir + " already exists. " +
- "Assuming it contains the archived version of the partition");
- }
- // Record this change in the metastore
- try {
- boolean parentSettable =
- conf.getBoolVar(HiveConf.ConfVars.HIVEHARPARENTDIRSETTABLE);
- // dirInArchive is the directory within the archive that has all the files
- // for this partition. With older versions of Hadoop, archiving a
- // a directory would produce the same directory structure
- // in the archive. So if you created myArchive.har of /tmp/myDir, the
- // files in /tmp/myDir would be located under myArchive.har/tmp/myDir/*
- // In this case, dirInArchive should be tmp/myDir
- // With newer versions of Hadoop, the parent directory could be specified.
- // Assuming the parent directory was set to /tmp/myDir when creating the
- // archive, the files can be found under myArchive.har/*
- // In this case, dirInArchive should be empty
- String dirInArchive = "";
- if (!parentSettable) {
- dirInArchive = originalDir.toUri().getPath();
- if(dirInArchive.length() > 1 && dirInArchive.charAt(0)=='/') {
- dirInArchive = dirInArchive.substring(1);
- }
- }
- setArchived(p, originalDir, dirInArchive, archiveName);
- db.alterPartition(tblName, p);
- } catch (Exception e) {
- throw new HiveException("Unable to change the partition info for HAR", e);
- }
- // If a failure occurs here, the directory containing the original files
- // will not be deleted. The user will run ARCHIVE again to clear this up
- deleteDir(intermediateOriginalDir);
- return 0;
- }
- private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc)
- throws HiveException {
- String dbName = simpleDesc.getDbName();
- String tblName = simpleDesc.getTableName();
- Table tbl = db.getTable(dbName, tblName);
- validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.UNARCHIVE);
- // Means user specified a table, not a partition
- if (simpleDesc.getPartSpec() == null) {
- throw new HiveException("ARCHIVE is for partitions only");
- }
- Map<String, String> partSpec = simpleDesc.getPartSpec();
- Partition p = db.getPartition(tbl, partSpec, false);
- if (tbl.getTableType() != TableType.MANAGED_TABLE) {
- throw new HiveException("UNARCHIVE can only be performed on managed tables");
- }
- if (p == null) {
- throw new HiveException("Specified partition does not exist");
- }
- if (!isArchived(p)) {
- Path location = new Path(p.getLocation());
- Path leftOverArchiveDir = new Path(location.getParent(),
- location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
- if (pathExists(leftOverArchiveDir)) {
- console.printInfo("Deleting " + leftOverArchiveDir + " left over " +
- "from a previous unarchiving operation");
- deleteDir(leftOverArchiveDir);
- }
- throw new HiveException("Specified partition is not archived");
- }
- Path originalLocation = new Path(getOriginalLocation(p));
- Path sourceDir = new Path(p.getLocation());
- Path intermediateArchiveDir = new Path(originalLocation.getParent(),
- originalLocation.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
- Path intermediateExtractedDir = new Path(originalLocation.getParent(),
- originalLocation.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX);
- Path tmpDir = new Path(driverContext
- .getCtx()
- .getExternalTmpFileURI(originalLocation.toUri()));
- FileSystem fs = null;
- try {
- fs = tmpDir.getFileSystem(conf);
- // Verify that there are no files in the tmp dir, because if there are, it
- // would be copied to the partition
- FileStatus [] filesInTmpDir = fs.listStatus(tmpDir);
- if (filesInTmpDir != null && filesInTmpDir.length != 0) {
- for (FileStatus file : filesInTmpDir) {
- console.printInfo(file.getPath().toString());
- }
- throw new HiveException("Temporary directory " + tmpDir + " is not empty");
- }
- } catch (IOException e) {
- throw new HiveException(e);
- }
- // Some sanity checks
- if (originalLocation == null) {
- throw new HiveException("Missing archive data in the partition");
- }
- if (!"har".equals(sourceDir.toUri().getScheme())) {
- throw new HiveException("Location should refer to a HAR");
- }
- // Clarification of terms:
- // - The originalLocation directory represents the original directory of the
- // partition's files. They now contain an archived version of those files
- // eg. hdfs:/warehouse/myTable/ds=1/
- // - The source directory is the directory containing all the files that
- // should be in the partition. e.g. har:/warehouse/myTable/ds=1/myTable.har/
- // Note the har:/ scheme
- // Steps:
- // 1. Extract the archive in a temporary folder
- // 2. Move the archive dir to an intermediate dir that is in at the same
- // dir as originalLocation. Call the new dir intermediate-extracted.
- // 3. Rename the original partition dir to an intermediate dir. Call the
- // renamed dir intermediate-archive
- // 4. Rename intermediate-extracted to the original partition dir
- // 5. Change the metadata
- // 6. Delete the archived partition files in intermediate-archive
- if (!pathExists(intermediateExtractedDir) &&
- !pathExists(intermediateArchiveDir)) {
- try {
- // Copy the files out of …
Large files files are truncated, but you can click here to view the full file