PageRenderTime 56ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java

https://github.com/mkgobaco/hive
Java | 1720 lines | 1161 code | 228 blank | 331 comment | 89 complexity | f6c3d3a755169fd464147422d4ca8c23 MD5 | raw file
Possible License(s): Apache-2.0
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. package org.apache.hadoop.hive.conf;
  19. import java.io.ByteArrayOutputStream;
  20. import java.io.File;
  21. import java.io.IOException;
  22. import java.io.InputStream;
  23. import java.io.PrintStream;
  24. import java.net.URL;
  25. import java.util.ArrayList;
  26. import java.util.HashMap;
  27. import java.util.Iterator;
  28. import java.util.LinkedHashSet;
  29. import java.util.List;
  30. import java.util.Map;
  31. import java.util.Map.Entry;
  32. import java.util.Properties;
  33. import java.util.Set;
  34. import java.util.regex.Matcher;
  35. import java.util.regex.Pattern;
  36. import javax.security.auth.login.LoginException;
  37. import org.apache.commons.lang.StringUtils;
  38. import org.apache.commons.logging.Log;
  39. import org.apache.commons.logging.LogFactory;
  40. import org.apache.hadoop.conf.Configuration;
  41. import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
  42. import org.apache.hadoop.hive.shims.ShimLoader;
  43. import org.apache.hadoop.mapred.JobConf;
  44. import org.apache.hadoop.security.UserGroupInformation;
  45. import org.apache.hadoop.util.Shell;
  46. import org.apache.hive.common.HiveCompat;
  47. /**
  48. * Hive Configuration.
  49. */
  50. public class HiveConf extends Configuration {
  51. protected String hiveJar;
  52. protected Properties origProp;
  53. protected String auxJars;
  54. private static final Log l4j = LogFactory.getLog(HiveConf.class);
  55. private static URL hiveDefaultURL = null;
  56. private static URL hiveSiteURL = null;
  57. private static byte[] confVarByteArray = null;
  58. private static final Map<String, ConfVars> vars = new HashMap<String, ConfVars>();
  59. private final List<String> restrictList = new ArrayList<String>();
  60. private boolean isWhiteListRestrictionEnabled = false;
  61. private final List<String> modWhiteList = new ArrayList<String>();
  62. static {
  63. ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
  64. if (classLoader == null) {
  65. classLoader = HiveConf.class.getClassLoader();
  66. }
  67. hiveDefaultURL = classLoader.getResource("hive-default.xml");
  68. // Look for hive-site.xml on the CLASSPATH and log its location if found.
  69. hiveSiteURL = classLoader.getResource("hive-site.xml");
  70. for (ConfVars confVar : ConfVars.values()) {
  71. vars.put(confVar.varname, confVar);
  72. }
  73. }
  74. /**
  75. * Metastore related options that the db is initialized against. When a conf
  76. * var in this is list is changed, the metastore instance for the CLI will
  77. * be recreated so that the change will take effect.
  78. */
  79. public static final HiveConf.ConfVars[] metaVars = {
  80. HiveConf.ConfVars.METASTOREDIRECTORY,
  81. HiveConf.ConfVars.METASTOREWAREHOUSE,
  82. HiveConf.ConfVars.METASTOREURIS,
  83. HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
  84. HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
  85. HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
  86. HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT,
  87. HiveConf.ConfVars.METASTOREPWD,
  88. HiveConf.ConfVars.METASTORECONNECTURLHOOK,
  89. HiveConf.ConfVars.METASTORECONNECTURLKEY,
  90. HiveConf.ConfVars.METASTOREFORCERELOADCONF,
  91. HiveConf.ConfVars.METASTORESERVERMINTHREADS,
  92. HiveConf.ConfVars.METASTORESERVERMAXTHREADS,
  93. HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE,
  94. HiveConf.ConfVars.METASTORE_INT_ORIGINAL,
  95. HiveConf.ConfVars.METASTORE_INT_ARCHIVED,
  96. HiveConf.ConfVars.METASTORE_INT_EXTRACTED,
  97. HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE,
  98. HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL,
  99. HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,
  100. HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES,
  101. HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE,
  102. HiveConf.ConfVars.METASTORE_VALIDATE_TABLES,
  103. HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS,
  104. HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS,
  105. HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE,
  106. HiveConf.ConfVars.METASTORE_AUTO_CREATE_SCHEMA,
  107. HiveConf.ConfVars.METASTORE_AUTO_START_MECHANISM_MODE,
  108. HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION,
  109. HiveConf.ConfVars.METASTORE_CACHE_LEVEL2,
  110. HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE,
  111. HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY,
  112. HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK,
  113. HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS,
  114. HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX,
  115. HiveConf.ConfVars.METASTORE_EVENT_LISTENERS,
  116. HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ,
  117. HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION,
  118. HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
  119. HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS,
  120. HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS,
  121. HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX,
  122. HiveConf.ConfVars.METASTORE_INIT_HOOKS,
  123. HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS,
  124. HiveConf.ConfVars.HMSHANDLERATTEMPTS,
  125. HiveConf.ConfVars.HMSHANDLERINTERVAL,
  126. HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF,
  127. HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN,
  128. HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
  129. HiveConf.ConfVars.USERS_IN_ADMIN_ROLE,
  130. HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
  131. HiveConf.ConfVars.HIVE_TXN_MANAGER,
  132. HiveConf.ConfVars.HIVE_TXN_TIMEOUT,
  133. HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH,
  134. };
  135. /**
  136. * dbVars are the parameters can be set per database. If these
  137. * parameters are set as a database property, when switching to that
  138. * database, the HiveConf variable will be changed. The change of these
  139. * parameters will effectively change the DFS and MapReduce clusters
  140. * for different databases.
  141. */
  142. public static final HiveConf.ConfVars[] dbVars = {
  143. HiveConf.ConfVars.HADOOPBIN,
  144. HiveConf.ConfVars.METASTOREWAREHOUSE,
  145. HiveConf.ConfVars.SCRATCHDIR
  146. };
  147. /**
  148. * ConfVars.
  149. *
  150. * These are the default configuration properties for Hive. Each HiveConf
  151. * object is initialized as follows:
  152. *
  153. * 1) Hadoop configuration properties are applied.
  154. * 2) ConfVar properties with non-null values are overlayed.
  155. * 3) hive-site.xml properties are overlayed.
  156. *
  157. * WARNING: think twice before adding any Hadoop configuration properties
  158. * with non-null values to this list as they will override any values defined
  159. * in the underlying Hadoop configuration.
  160. */
  161. public static enum ConfVars {
  162. // QL execution stuff
  163. SCRIPTWRAPPER("hive.exec.script.wrapper", null),
  164. PLAN("hive.exec.plan", ""),
  165. PLAN_SERIALIZATION("hive.plan.serialization.format","kryo"),
  166. SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive-" + System.getProperty("user.name")),
  167. LOCALSCRATCHDIR("hive.exec.local.scratchdir", System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name")),
  168. SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700"),
  169. SUBMITVIACHILD("hive.exec.submitviachild", false),
  170. SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true),
  171. SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000),
  172. ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false),
  173. STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:"),
  174. STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true),
  175. COMPRESSRESULT("hive.exec.compress.output", false),
  176. COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false),
  177. COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", ""),
  178. COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", ""),
  179. BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000)),
  180. MAXREDUCERS("hive.exec.reducers.max", 1009), // pick a prime
  181. PREEXECHOOKS("hive.exec.pre.hooks", ""),
  182. POSTEXECHOOKS("hive.exec.post.hooks", ""),
  183. ONFAILUREHOOKS("hive.exec.failure.hooks", ""),
  184. CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", ""),
  185. EXECPARALLEL("hive.exec.parallel", false), // parallel query launching
  186. EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8),
  187. HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true),
  188. HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L),
  189. DYNAMICPARTITIONING("hive.exec.dynamic.partition", true),
  190. DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict"),
  191. DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000),
  192. DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100),
  193. MAXCREATEDFILES("hive.exec.max.created.files", 100000L),
  194. DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
  195. System.getProperty("java.io.tmpdir") + File.separator + "${hive.session.id}_resources"),
  196. DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__"),
  197. DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__"),
  198. // Whether to show a link to the most failed task + debugging tips
  199. SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true),
  200. JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true),
  201. JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000),
  202. TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000),
  203. OUTPUT_FILE_EXTENSION("hive.output.file.extension", null),
  204. HIVE_IN_TEST("hive.in.test", false), // internal usage only, true in test mode
  205. // should hive determine whether to run in local mode automatically ?
  206. LOCALMODEAUTO("hive.exec.mode.local.auto", false),
  207. // if yes:
  208. // run in local mode only if input bytes is less than this. 128MB by default
  209. LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L),
  210. // run in local mode only if number of tasks (for map and reduce each) is
  211. // less than this
  212. LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4),
  213. // if true, DROP TABLE/VIEW does not fail if table/view doesn't exist and IF EXISTS is
  214. // not specified
  215. DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true),
  216. // ignore the mapjoin hint
  217. HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true),
  218. // Max number of lines of footer user can set for a table file.
  219. HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100),
  220. // Make column names unique in the result set by using table alias if needed
  221. HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true),
  222. // Hadoop Configuration Properties
  223. // Properties with null values are ignored and exist only for the purpose of giving us
  224. // a symbolic name to reference in the Hive source code. Properties with non-null
  225. // values will override any values set in the underlying Hadoop configuration.
  226. HADOOPBIN("hadoop.bin.path", findHadoopBinary()),
  227. HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem"),
  228. HADOOPFS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS"), null),
  229. HADOOPMAPFILENAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPFILENAME"), null),
  230. HADOOPMAPREDINPUTDIR(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIR"), null),
  231. HADOOPMAPREDINPUTDIRRECURSIVE(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIRRECURSIVE"), false),
  232. MAPREDMAXSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 256000000L),
  233. MAPREDMINSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 1L),
  234. MAPREDMINSPLITSIZEPERNODE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 1L),
  235. MAPREDMINSPLITSIZEPERRACK(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 1L),
  236. // The number of reduce tasks per job. Hadoop sets this value to 1 by default
  237. // By setting this property to -1, Hive will automatically determine the correct
  238. // number of reducers.
  239. HADOOPNUMREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPNUMREDUCERS"), -1),
  240. HADOOPJOBNAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPJOBNAME"), null),
  241. HADOOPSPECULATIVEEXECREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSPECULATIVEEXECREDUCERS"), true),
  242. MAPREDSETUPCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false),
  243. MAPREDTASKCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false),
  244. // Metastore stuff. Be sure to update HiveConf.metaVars when you add
  245. // something here!
  246. METASTOREDIRECTORY("hive.metastore.metadb.dir", ""),
  247. METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse"),
  248. METASTOREURIS("hive.metastore.uris", ""),
  249. // Number of times to retry a connection to a Thrift metastore server
  250. METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3),
  251. // Number of times to retry a Thrift metastore call upon failure
  252. METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1),
  253. // Number of seconds the client should wait between connection attempts
  254. METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", 1),
  255. // Socket timeout for the client connection (in seconds)
  256. METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", 600),
  257. METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine"),
  258. // Class name of JDO connection url hook
  259. METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", ""),
  260. METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true),
  261. // Name of the connection url in the configuration
  262. METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
  263. "jdbc:derby:;databaseName=metastore_db;create=true"),
  264. // Whether to force reloading of the metastore configuration (including
  265. // the connection URL, before the next metastore query that accesses the
  266. // datastore. Once reloaded, this value is reset to false. Used for
  267. // testing only.
  268. METASTOREFORCERELOADCONF("hive.metastore.force.reload.conf", false),
  269. // Number of attempts to retry connecting after there is a JDO datastore err
  270. HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 1),
  271. // Number of miliseconds to wait between attepting
  272. HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000),
  273. // Whether to force reloading of the HMSHandler configuration (including
  274. // the connection URL, before the next metastore query that accesses the
  275. // datastore. Once reloaded, this value is reset to false. Used for
  276. // testing only.
  277. HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false),
  278. METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200),
  279. METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 100000),
  280. METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true),
  281. // Intermediate dir suffixes used for archiving. Not important what they
  282. // are, as long as collisions are avoided
  283. METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
  284. "_INTERMEDIATE_ORIGINAL"),
  285. METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
  286. "_INTERMEDIATE_ARCHIVED"),
  287. METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
  288. "_INTERMEDIATE_EXTRACTED"),
  289. METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", ""),
  290. METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
  291. "hive-metastore/_HOST@EXAMPLE.COM"),
  292. METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false),
  293. METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false),
  294. METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS(
  295. "hive.cluster.delegation.token.store.class",
  296. "org.apache.hadoop.hive.thrift.MemoryTokenStore"),
  297. METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
  298. "hive.cluster.delegation.token.store.zookeeper.connectString", ""),
  299. METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
  300. "hive.cluster.delegation.token.store.zookeeper.znode", "/hive/cluster/delegation"),
  301. METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
  302. "hive.cluster.delegation.token.store.zookeeper.acl", ""),
  303. METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order"),
  304. METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP"),
  305. METASTORE_VALIDATE_TABLES("datanucleus.validateTables", false),
  306. METASTORE_VALIDATE_COLUMNS("datanucleus.validateColumns", false),
  307. METASTORE_VALIDATE_CONSTRAINTS("datanucleus.validateConstraints", false),
  308. METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms"),
  309. METASTORE_AUTO_CREATE_SCHEMA("datanucleus.autoCreateSchema", true),
  310. METASTORE_FIXED_DATASTORE("datanucleus.fixedDatastore", false),
  311. METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", false),
  312. METASTORE_AUTO_START_MECHANISM_MODE("datanucleus.autoStartMechanismMode", "checked"),
  313. METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed"),
  314. METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false),
  315. METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none"),
  316. METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1"),
  317. METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true),
  318. METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG"),
  319. METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300),
  320. METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX(
  321. "hive.metastore.batch.retrieve.table.partition.max", 1000),
  322. // A comma separated list of hooks which implement MetaStoreInitListener and will be run at
  323. // the beginning of HMSHandler initialization
  324. METASTORE_INIT_HOOKS("hive.metastore.init.hooks", ""),
  325. METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", ""),
  326. METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", ""),
  327. // should we do checks against the storage (usually hdfs) for operations like drop_partition
  328. METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false),
  329. METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq",0L),
  330. METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration",0L),
  331. METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true),
  332. METASTORE_PARTITION_NAME_WHITELIST_PATTERN(
  333. "hive.metastore.partition.name.whitelist.pattern", ""),
  334. // Whether to enable integral JDO pushdown. For partition columns storing integers
  335. // in non-canonical form, (e.g. '012'), it may not work, so it's off by default.
  336. METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false),
  337. METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true),
  338. METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true),
  339. METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
  340. "hive.metastore.disallow.incompatible.col.type.changes", false),
  341. // Default parameters for creating tables
  342. NEWTABLEDEFAULTPARA("hive.table.parameters.default", ""),
  343. // Parameters to copy over when creating a table with Create Table Like.
  344. DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", ""),
  345. METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl",
  346. "org.apache.hadoop.hive.metastore.ObjectStore"),
  347. METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName",
  348. "org.apache.derby.jdbc.EmbeddedDriver"),
  349. METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
  350. "org.datanucleus.api.jdo.JDOPersistenceManagerFactory"),
  351. METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
  352. "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore"),
  353. METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true),
  354. METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true),
  355. METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP"),
  356. METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", ""),
  357. METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties",""),
  358. // Parameters for exporting metadata on table drop (requires the use of the)
  359. // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
  360. METADATA_EXPORT_LOCATION("hive.metadata.export.location", ""),
  361. MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true),
  362. // CLI
  363. CLIIGNOREERRORS("hive.cli.errors.ignore", false),
  364. CLIPRINTCURRENTDB("hive.cli.print.current.db", false),
  365. CLIPROMPT("hive.cli.prompt", "hive"),
  366. CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1),
  367. HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl"),
  368. // Things we log in the jobconf
  369. // session identifier
  370. HIVESESSIONID("hive.session.id", ""),
  371. // whether session is running in silent mode or not
  372. HIVESESSIONSILENT("hive.session.silent", false),
  373. // Whether to enable history for this session
  374. HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false),
  375. // query being executed (multiple per session)
  376. HIVEQUERYSTRING("hive.query.string", ""),
  377. // id of query being executed (multiple per session)
  378. HIVEQUERYID("hive.query.id", ""),
  379. // id of the mapred plan being executed (multiple per query)
  380. HIVEPLANID("hive.query.planid", ""),
  381. // max jobname length
  382. HIVEJOBNAMELENGTH("hive.jobname.length", 50),
  383. // hive jar
  384. HIVEJAR("hive.jar.path", ""),
  385. HIVEAUXJARS("hive.aux.jars.path", ""),
  386. // hive added files and jars
  387. HIVEADDEDFILES("hive.added.files.path", ""),
  388. HIVEADDEDJARS("hive.added.jars.path", ""),
  389. HIVEADDEDARCHIVES("hive.added.archives.path", ""),
  390. HIVE_CURRENT_DATABASE("hive.current.database", ""), // internal usage only
  391. // for hive script operator
  392. HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", 0),
  393. HIVETABLENAME("hive.table.name", ""),
  394. HIVEPARTITIONNAME("hive.partition.name", ""),
  395. HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false),
  396. HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID"),
  397. HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false),
  398. HIVEMAPREDMODE("hive.mapred.mode", "nonstrict"),
  399. HIVEALIAS("hive.alias", ""),
  400. HIVEMAPSIDEAGGREGATE("hive.map.aggr", true),
  401. HIVEGROUPBYSKEW("hive.groupby.skewindata", false),
  402. HIVE_OPTIMIZE_MULTI_GROUPBY_COMMON_DISTINCTS("hive.optimize.multigroupby.common.distincts",
  403. true),
  404. HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000),
  405. HIVEJOINCACHESIZE("hive.join.cache.size", 25000),
  406. // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
  407. // need to remove by hive .13. Also, do not change default (see SMB operator)
  408. HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100),
  409. HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true),
  410. HIVEMAPJOINUSEOPTIMIZEDKEYS("hive.mapjoin.optimized.keys", true),
  411. HIVEMAPJOINLAZYHASHTABLE("hive.mapjoin.lazy.hashtable", true),
  412. HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 10 * 1024 * 1024),
  413. HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000),
  414. HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000),
  415. HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5),
  416. HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3),
  417. HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9),
  418. HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5),
  419. HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true),
  420. HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false),
  421. HIVE_MAP_GROUPBY_SORT_TESTMODE("hive.map.groupby.sorted.testmode", false),
  422. HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false),
  423. HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30),
  424. // for hive udtf operator
  425. HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false),
  426. // Default file format for CREATE TABLE statement
  427. // Options: TextFile, SequenceFile
  428. HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile",
  429. new StringsValidator("TextFile", "SequenceFile", "RCfile", "ORC")),
  430. HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile",
  431. new StringsValidator("TextFile", "SequenceFile", "RCfile")),
  432. HIVECHECKFILEFORMAT("hive.fileformat.check", true),
  433. // default serde for rcfile
  434. HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
  435. "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"),
  436. SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema","org.apache.hadoop.hive.ql.io.orc.OrcSerde,"
  437. + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe,org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe,"
  438. + "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe,org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe,"
  439. + "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe,org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe,"
  440. + "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"),
  441. //Location of Hive run time structured log file
  442. HIVEHISTORYFILELOC("hive.querylog.location", System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name")),
  443. // Whether to log the plan's progress every time a job's progress is checked
  444. HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true),
  445. // The interval between logging the plan's progress in milliseconds
  446. HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", 60000L),
  447. // Default serde and record reader for user scripts
  448. HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
  449. HIVESCRIPTRECORDREADER("hive.script.recordreader",
  450. "org.apache.hadoop.hive.ql.exec.TextRecordReader"),
  451. HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
  452. "org.apache.hadoop.hive.ql.exec.TextRecordWriter"),
  453. HIVESCRIPTESCAPE("hive.transform.escape.input", false),
  454. HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000 ),
  455. // HWI
  456. HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0"),
  457. HIVEHWILISTENPORT("hive.hwi.listen.port", "9999"),
  458. HIVEHWIWARFILE("hive.hwi.war.file", System.getenv("HWI_WAR_FILE")),
  459. // mapper/reducer memory in local mode
  460. HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0),
  461. //small table file size
  462. HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize",25000000L), //25M
  463. // random number for split sampling
  464. HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0),
  465. // test mode in hive mode
  466. HIVETESTMODE("hive.test.mode", false),
  467. HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_"),
  468. HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32),
  469. HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", ""),
  470. HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", ""), // internal variable
  471. HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", ""), // internal variable
  472. HIVEMERGEMAPFILES("hive.merge.mapfiles", true),
  473. HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false),
  474. HIVEMERGETEZFILES("hive.merge.tezfiles", false),
  475. HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000)),
  476. HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000)),
  477. HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true),
  478. HIVEMERGEINPUTFORMATBLOCKLEVEL("hive.merge.input.format.block.level",
  479. "org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat"),
  480. HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
  481. "hive.merge.current.job.has.dynamic.partitions", false),
  482. HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true),
  483. HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true),
  484. HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE),
  485. HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0),
  486. HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false),
  487. HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304), // 4M
  488. // Maximum fraction of heap that can be used by ORC file writers
  489. HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f), // 50%
  490. // Define the version of the file to write
  491. HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null),
  492. // Define the default ORC stripe size
  493. HIVE_ORC_DEFAULT_STRIPE_SIZE("hive.exec.orc.default.stripe.size",
  494. 256L * 1024 * 1024),
  495. HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD(
  496. "hive.exec.orc.dictionary.key.size.threshold", 0.8f),
  497. // Define the default ORC index stride
  498. HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE("hive.exec.orc.default.row.index.stride"
  499. , 10000),
  500. // Define the default ORC buffer size
  501. HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 1024),
  502. // Define the default block padding
  503. HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding",
  504. true),
  505. // Define the default compression codec for ORC file
  506. HIVE_ORC_DEFAULT_COMPRESS("hive.exec.orc.default.compress", "ZLIB"),
  507. // Define the default encoding strategy to use
  508. HIVE_ORC_ENCODING_STRATEGY("hive.exec.orc.encoding.strategy", "SPEED",
  509. new StringsValidator("SPEED", "COMPRESSION")),
  510. HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false),
  511. HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000),
  512. HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10),
  513. HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false),
  514. HIVE_ORC_ZEROCOPY("hive.exec.orc.zerocopy", false),
  515. // Whether extended literal set is allowed for LazySimpleSerde.
  516. HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false),
  517. HIVESKEWJOIN("hive.optimize.skewjoin", false),
  518. HIVECONVERTJOIN("hive.auto.convert.join", true),
  519. HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true),
  520. HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
  521. 10000000L),
  522. HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false),
  523. HIVESKEWJOINKEY("hive.skewjoin.key", 100000),
  524. HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000),
  525. HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L), //32M
  526. HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000),
  527. HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L),
  528. HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10),
  529. HIVELIMITOPTENABLE("hive.limit.optimize.enable", false),
  530. HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000),
  531. HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f),
  532. HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1),
  533. HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000),
  534. HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75),
  535. HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55),
  536. HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90),
  537. HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000),
  538. HIVEDEBUGLOCALTASK("hive.debug.localtask",false),
  539. HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"),
  540. HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat"),
  541. HIVETEZCONTAINERSIZE("hive.tez.container.size", -1),
  542. HIVETEZJAVAOPTS("hive.tez.java.opts", null),
  543. HIVETEZLOGLEVEL("hive.tez.log.level", "INFO"),
  544. HIVEENFORCEBUCKETING("hive.enforce.bucketing", false),
  545. HIVEENFORCESORTING("hive.enforce.sorting", false),
  546. HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true),
  547. HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner"),
  548. HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false),
  549. HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false),
  550. HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false),
  551. HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
  552. "hive.auto.convert.sortmerge.join.bigtable.selection.policy",
  553. "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ"),
  554. HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN(
  555. "hive.auto.convert.sortmerge.join.to.mapjoin", false),
  556. HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false),
  557. HIVEROWOFFSET("hive.exec.rowoffset", false),
  558. HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE("hive.hadoop.supports.splittable.combineinputformat", false),
  559. // Optimizer
  560. HIVEOPTINDEXFILTER("hive.optimize.index.filter", false), // automatically use indexes
  561. HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false), //automatically update stale indexes
  562. HIVEOPTPPD("hive.optimize.ppd", true), // predicate pushdown
  563. HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true), // predicate pushdown
  564. HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true),
  565. HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", true),
  566. // push predicates down to storage handlers
  567. HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true),
  568. HIVEOPTGROUPBY("hive.optimize.groupby", true), // optimize group by
  569. HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false), // optimize bucket map join
  570. HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false), // try to use sorted merge bucket map join
  571. HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true),
  572. HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4),
  573. // when enabled dynamic partitioning column will be globally sorted.
  574. // this way we can keep only one record writer open for each partition value
  575. // in the reducer thereby reducing the memory pressure on reducers
  576. HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", true),
  577. HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false),
  578. HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000),
  579. HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f),
  580. // whether to optimize union followed by select followed by filesink
  581. // It creates sub-directories in the final output, so should not be turned on in systems
  582. // where MAPREDUCE-1501 is not present
  583. HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false),
  584. HIVEOPTCORRELATION("hive.optimize.correlation", false), // exploit intra-query correlations
  585. // whether hadoop map-reduce supports sub-directories. It was added by MAPREDUCE-1501.
  586. // Some optimizations can only be performed if the version of hadoop being used supports
  587. // sub-directories
  588. HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES("hive.mapred.supports.subdirectories", false),
  589. // optimize skewed join by changing the query plan at compile time
  590. HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false),
  591. // Indexes
  592. HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024), // 5G
  593. HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1), // infinity
  594. HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000), // 10M
  595. HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024), // 10G
  596. HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true),
  597. // Statistics
  598. HIVESTATSAUTOGATHER("hive.stats.autogather", true),
  599. HIVESTATSDBCLASS("hive.stats.dbclass", "fs",
  600. new PatternValidator("jdbc(:.*)", "hbase", "counter", "custom", "fs")), // StatsSetupConst.StatDB
  601. HIVESTATSJDBCDRIVER("hive.stats.jdbcdriver",
  602. "org.apache.derby.jdbc.EmbeddedDriver"), // JDBC driver specific to the dbclass
  603. HIVESTATSDBCONNECTIONSTRING("hive.stats.dbconnectionstring",
  604. "jdbc:derby:;databaseName=TempStatsStore;create=true"), // automatically create database
  605. HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher",
  606. ""), // default stats publisher if none of JDBC/HBase is specified
  607. HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator",
  608. ""), // default stats aggregator if none of JDBC/HBase is specified
  609. HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout",
  610. 30), // default timeout in sec for JDBC connection & SQL statements
  611. HIVE_STATS_ATOMIC("hive.stats.atomic",
  612. false), // whether to update metastore stats only if all stats are available
  613. HIVE_STATS_RETRIES_MAX("hive.stats.retries.max",
  614. 0), // maximum # of retries to insert/select/delete the stats DB
  615. HIVE_STATS_RETRIES_WAIT("hive.stats.retries.wait",
  616. 3000), // # milliseconds to wait before the next retry
  617. HIVE_STATS_COLLECT_RAWDATASIZE("hive.stats.collect.rawdatasize", true),
  618. // should the raw data size be collected when analyzing tables
  619. CLIENT_STATS_COUNTERS("hive.client.stats.counters", ""),
  620. //Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used".
  621. HIVE_STATS_RELIABLE("hive.stats.reliable", false),
  622. // number of threads used by partialscan/noscan stats gathering for partitioned tables.
  623. // This is applicable only for file formats that implement StatsProvidingRecordReader
  624. // interface (like ORC)
  625. HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10),
  626. // Collect table access keys information for operators that can benefit from bucketing
  627. HIVE_STATS_COLLECT_TABLEKEYS("hive.stats.collect.tablekeys", false),
  628. // Collect column access information
  629. HIVE_STATS_COLLECT_SCANCOLS("hive.stats.collect.scancols", false),
  630. // standard error allowed for ndv estimates. A lower value indicates higher accuracy and a
  631. // higher compute cost.
  632. HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0),
  633. HIVE_STATS_KEY_PREFIX_MAX_LENGTH("hive.stats.key.prefix.max.length", 150),
  634. HIVE_STATS_KEY_PREFIX_RESERVE_LENGTH("hive.stats.key.prefix.reserve.length", 24),
  635. HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", ""), // internal usage only
  636. // if length of variable length data type cannot be determined this length will be used.
  637. HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100),
  638. // if number of elements in list cannot be determined, this value will be used
  639. HIVE_STATS_LIST_NUM_ENTRIES("hive.stats.list.num.entries", 10),
  640. // if number of elements in map cannot be determined, this value will be used
  641. HIVE_STATS_MAP_NUM_ENTRIES("hive.stats.map.num.entries", 10),
  642. // to accurately compute statistics for GROUPBY map side parallelism needs to be known
  643. HIVE_STATS_MAP_SIDE_PARALLELISM("hive.stats.map.parallelism", 1),
  644. // statistics annotation fetches stats for each partition, which can be expensive. turning
  645. // this off will result in basic sizes being fetched from namenode instead
  646. HIVE_STATS_FETCH_PARTITION_STATS("hive.stats.fetch.partition.stats", true),
  647. // statistics annotation fetches column statistics for all required columns which can
  648. // be very expensive sometimes
  649. HIVE_STATS_FETCH_COLUMN_STATS("hive.stats.fetch.column.stats", false),
  650. // in the absence of column statistics, the estimated number of rows/data size that will
  651. // be emitted from join operator will depend on this factor
  652. HIVE_STATS_JOIN_FACTOR("hive.stats.join.factor", (float) 1.1),
  653. // in the absence of uncompressed/raw data size, total file size will be used for statistics
  654. // annotation. But the file may be compressed, encoded and serialized which may be lesser in size
  655. // than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate
  656. // the raw data size.
  657. HIVE_STATS_DESERIALIZATION_FACTOR("hive.stats.deserialization.factor", (float) 1.0),
  658. // Concurrency
  659. HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false),
  660. HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager"),
  661. HIVE_LOCK_NUMRETRIES("hive.lock.numretries", 100),
  662. HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10),
  663. HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", 60),
  664. HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false),
  665. HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", ""),
  666. HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181"),
  667. HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", 600*1000),
  668. HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace"),
  669. HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false),
  670. // Transactions
  671. HIVE_TXN_MANAGER("hive.txn.manager",
  672. "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"),
  673. // time after which transactions are declared aborted if the client has
  674. // not sent a heartbeat, in seconds.
  675. HIVE_TXN_TIMEOUT("hive.txn.timeout", 300),
  676. // Maximum number of transactions that can be fetched in one call to
  677. // open_txns().
  678. // Increasing this will decrease the number of delta files created when
  679. // streaming data into Hive. But it will also increase the number of
  680. // open transactions at any given time, possibly impacting read
  681. // performance.
  682. HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000),
  683. // Whether to run the compactor's initiator thread in this metastore instance or not.
  684. HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false),
  685. // Number of compactor worker threads to run on this metastore instance.
  686. HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0),
  687. // Time, in seconds, before a given compaction in working state is declared a failure and
  688. // returned to the initiated state.
  689. HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", 86400L),
  690. // Time in seconds between checks to see if any partitions need compacted. This should be
  691. // kept high because each check for compaction requires many calls against the NameNode.
  692. HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", 300L),
  693. // Number of delta files that must exist in a directory before the compactor will attempt a
  694. // minor compaction.
  695. HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10),
  696. // Percentage (by size) of base that deltas can be before major compaction is initiated.
  697. HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f),
  698. // Number of aborted transactions involving a particular table or partition before major
  699. // compaction is initiated.
  700. HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000),
  701. // For HBase storage handler
  702. HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true),
  703. HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false),
  704. // For har files
  705. HIVEARCHIVEENABLED("hive.archive.enabled", false),
  706. //Enable/Disable gbToIdx rewrite rule
  707. HIVEOPTGBYUSINGINDEX("hive.optimize.index.groupby", false),
  708. HIVEOUTERJOINSUPPORTSFILTERS("hive.outerjoin.supports.filters", true),
  709. // 'minimal', 'more' (and 'all' later)
  710. HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "minimal",
  711. new StringsValidator("minimal", "more")),
  712. HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", -1l),
  713. HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false),
  714. HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", false),
  715. // Serde for FetchTask
  716. HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe"),
  717. HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true),
  718. // Hive Variables
  719. HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true),
  720. HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40),
  721. HIVECONFVALIDATION("hive.conf.validation", true),
  722. SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", ""),
  723. HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false),
  724. HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
  725. "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"),
  726. HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager",
  727. "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator"),
  728. HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager",
  729. "org.apache.hadoop.hive.ql.security.authorization."
  730. + "DefaultHiveMetastoreAuthorizationProvider"),
  731. HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
  732. "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator"),
  733. HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", ""),
  734. HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants",
  735. ""),
  736. HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.createtable.role.grants", ""),
  737. HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS("hive.security.authorization.createtable.owner.grants",
  738. ""),
  739. // if this is not set default value is added by sql standard authorizer.
  740. // Default value can't be set in this constructor as it would refer names in other ConfVars
  741. // whose constructor would not have been called
  742. HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST("hive.security.authorization.sqlstd.confwhitelist", ""),
  743. // Print column names in output
  744. HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false),
  745. HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false),
  746. HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", ""), // internal variable
  747. HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", ""), // internal variable
  748. HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false),
  749. HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile"),
  750. // temporary variable for testing. This is added just to turn off this feature in case of a bug in
  751. // deployment. It has not been documented in hive-default.xml intentionally, this should be removed
  752. // once the feature is stable
  753. HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false),
  754. HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false),
  755. HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true),
  756. HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", ""),
  757. // logging configuration
  758. HIVE_LOG4J_FILE("hive.log4j.file", ""),
  759. HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", ""),
  760. // prefix used to auto generated column aliases (this should be started with '_')
  761. HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c"),
  762. HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME(
  763. "hive.autogen.columnalias.prefix.includefuncname", false),
  764. // The class responsible for logging client side performance metrics
  765. // Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger
  766. HIVE_PERF_LOGGER("hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger"),
  767. // Whether to delete the scratchdir while startup
  768. HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false),
  769. HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false),
  770. HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", false),
  771. HIVE_WAREHOUSE_DATA_SKIPTRASH("hive.warehouse.data.skipTrash", false),
  772. // whether insert into external tables is allowed
  773. HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true),
  774. // A comma separated list of hooks which implement HiveDriverRunHook and will be run at the
  775. // beginning and end of Driver.run, these will be run in the order specified
  776. HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", ""),
  777. HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null),
  778. HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@"),
  779. HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately",true),
  780. HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L,
  781. new LongRangeValidator(0L, Long.MAX_VALUE)),
  782. // binary or http
  783. HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary",
  784. new StringsValidator("binary", "http")),
  785. // http (over thrift) transport settings
  786. HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001),
  787. HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice"),
  788. HIVE_SERVER2_THRIFT_HTTP_MIN_WORKER_THREADS("hive.server2.thrift.http.min.worker.threads", 5),
  789. HIVE_SERVER2_THRIFT_HTTP_MAX_WORKER_THREADS("hive.server2.thrift.http.max.worker.threads", 500),
  790. // binary transport settings
  791. HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000),
  792. HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", ""),
  793. // hadoop.rpc.protection being set to a higher level than HiveServer2
  794. // does not make sense in most situations.
  795. // HiveServer2 ignores hadoop.rpc.protection in favor of hive.server2.thrift.sasl.qop.
  796. HIVE_SERVER2_THRIFT_SASL_QOP("hive.server2.thrift.sasl.qop", "auth",
  797. new StringsValidator("auth", "auth-int", "auth-conf")),
  798. HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS("hive.server2.thrift.min.worker.threads", 5),
  799. HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500),
  800. // Configuration for async thread pool in SessionManager
  801. // Number of async threads
  802. HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100),
  803. // Number of seconds HiveServer2 shutdown will wait for async threads to terminate
  804. HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", 10),
  805. // Size of the wait queue for async thread pool in HiveServer2.
  806. // After hitting this limit, the async thread pool will reject new requests.
  807. HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100),
  808. // Number of seconds that an idle HiveServer2 async thread (from the thread pool)
  809. // will wait for a new task to arrive before terminating
  810. HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", 10),
  811. // Time in milliseconds that HiveServer2 will wait,
  812. // before responding to asynchronous calls that use long polling
  813. HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", 5000L),
  814. // HiveServer2 auth configuration
  815. HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
  816. new StringsValidator("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM")),
  817. HIVE_SERVER2_ALLOW_USER_SUBSTITUTION("hive.server2.allow.user.substitution", true),
  818. HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", ""),
  819. HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", ""),
  820. HIVE_SERVER2_SPNEGO_KEYTAB("hive.server2.authentication.spnego.keytab", ""),
  821. HIVE_SERVER2_SPNEGO_PRINCIPAL("hive.server2.authentication.spnego.principal", ""),
  822. HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null),
  823. HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null),
  824. HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null),
  825. HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null),
  826. // List of the underlying pam services that should be used when auth type is PAM
  827. // A file with the same name must exist in /etc/pam.d
  828. HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null),
  829. HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true),
  830. HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC",
  831. new StringsValidator("CLASSIC", "HIVE")),
  832. HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", ""),
  833. HIVE_SERVER2_USE_SSL("hive.server2.use.SSL", false),
  834. HIVE_SERVER2_SSL_KEYSTORE_PATH("hive.server2.keystore.path", ""),
  835. HIVE_SERVER2_SSL_KEYSTORE_PASSWORD("hive.server2.keystore.password", ""),
  836. HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,delete,compile"),
  837. HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", "hive.security.authenticator.manager,hive.security.authorization.manager"),
  838. // If this is set all move tasks at the end of a multi-insert query will only begin once all
  839. // outputs are ready
  840. HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES(
  841. "hive.multi.insert.move.tasks.share.dependencies", false),
  842. // If this is set, when writing partitions, the metadata will include the bucketing/sorting
  843. // properties with which the data was written if any (this will not overwrite the metadata
  844. // inherited from the table if the table is bucketed/sorted)
  845. HIVE_INFER_BUCKET_SORT("hive.exec.infer.bucket.sort", false),
  846. // If this is set, when setting the number of reducers for the map reduce task which writes the
  847. // final output files, it will choose a number which is a power of two. The number of reducers
  848. // may be set to a power of two, only to be followed by a merge task meaning preventing
  849. // anything from being inferred.
  850. HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO(
  851. "hive.exec.infer.bucket.sort.num.buckets.power.two", false),
  852. /* The following section contains all configurations used for list bucketing feature.*/
  853. /* This is not for clients. but only for block merge task. */
  854. /* This is used by BlockMergeTask to send out flag to RCFileMergeMapper */
  855. /* about alter table...concatenate and list bucketing case. */
  856. HIVEMERGECURRENTJOBCONCATENATELISTBUCKETING(
  857. "hive.merge.current.job.concatenate.list.bucketing", true),
  858. /* This is not for clients. but only for block merge task. */
  859. /* This is used by BlockMergeTask to send out flag to RCFileMergeMapper */
  860. /* about depth of list bucketing. */
  861. HIVEMERGECURRENTJOBCONCATENATELISTBUCKETINGDEPTH(
  862. "hive.merge.current.job.concatenate.list.bucketing.depth", 0),
  863. // Enable list bucketing optimizer. Default value is false so that we disable it by default.
  864. HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false),
  865. // Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket.
  866. SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", 10),
  867. SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true),
  868. // Whether to show the unquoted partition names in query results.
  869. HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false),
  870. HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr",
  871. new StringsValidator("mr", "tez")),
  872. HIVE_JAR_DIRECTORY("hive.jar.directory", null),
  873. HIVE_USER_INSTALL_DIR("hive.user.install.directory", "hdfs:///user/"),
  874. // Vectorization enabled
  875. HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", false),
  876. HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000),
  877. HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000),
  878. HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1),
  879. HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true),
  880. // Whether to send the query plan via local resource or RPC
  881. HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false),
  882. // Whether to generate the splits locally or in the AM (tez only)
  883. HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true),
  884. HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false),
  885. HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10),
  886. // none, idonly, traverse, execution
  887. HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none"),
  888. HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false),
  889. HIVECOUNTERGROUP("hive.counters.group.name", "HIVE"),
  890. HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", ""),
  891. HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE("hive.server2.tez.sessions.per.default.queue", 1),
  892. HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions",
  893. false),
  894. // none, column
  895. // none is the default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.
  896. // column: implies column names can contain any character.
  897. HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column",
  898. new PatternValidator("none", "column")),
  899. USERS_IN_ADMIN_ROLE("hive.users.in.admin.role",""),
  900. // Enable (configurable) deprecated behaviors by setting desired level of backward compatbility
  901. // Setting to 0.12:
  902. // Maintains division behavior: int / int => double
  903. // Setting to 0.13:
  904. HIVE_COMPAT("hive.compat", HiveCompat.DEFAULT_COMPAT_LEVEL),
  905. HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ("hive.convert.join.bucket.mapjoin.tez", false),
  906. // Check if a plan contains a Cross Product.
  907. // If there is one, output a warning to the Session's console.
  908. HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true),
  909. HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", 5000L), // in ms
  910. HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5),
  911. TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false),
  912. TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f),
  913. TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f)
  914. ;
  915. public final String varname;
  916. public final String defaultVal;
  917. public final int defaultIntVal;
  918. public final long defaultLongVal;
  919. public final float defaultFloatVal;
  920. public final Class<?> valClass;
  921. public final boolean defaultBoolVal;
  922. private final VarType type;
  923. private final Validator validator;
  924. ConfVars(String varname, String defaultVal) {
  925. this(varname, defaultVal, null);
  926. }
  927. ConfVars(String varname, String defaultVal, Validator validator) {
  928. this.varname = varname;
  929. this.valClass = String.class;
  930. this.defaultVal = defaultVal;
  931. this.defaultIntVal = -1;
  932. this.defaultLongVal = -1;
  933. this.defaultFloatVal = -1;
  934. this.defaultBoolVal = false;
  935. this.type = VarType.STRING;
  936. this.validator = validator;
  937. }
  938. ConfVars(String varname, int defaultVal) {
  939. this(varname, defaultVal, null);
  940. }
  941. ConfVars(String varname, int defaultIntVal, Validator validator) {
  942. this.varname = varname;
  943. this.valClass = Integer.class;
  944. this.defaultVal = Integer.toString(defaultIntVal);
  945. this.defaultIntVal = defaultIntVal;
  946. this.defaultLongVal = -1;
  947. this.defaultFloatVal = -1;
  948. this.defaultBoolVal = false;
  949. this.type = VarType.INT;
  950. this.validator = validator;
  951. }
  952. ConfVars(String varname, long defaultVal) {
  953. this(varname, defaultVal, null);
  954. }
  955. ConfVars(String varname, long defaultLongVal, Validator validator) {
  956. this.varname = varname;
  957. this.valClass = Long.class;
  958. this.defaultVal = Long.toString(defaultLongVal);
  959. this.defaultIntVal = -1;
  960. this.defaultLongVal = defaultLongVal;
  961. this.defaultFloatVal = -1;
  962. this.defaultBoolVal = false;
  963. this.type = VarType.LONG;
  964. this.validator = validator;
  965. }
  966. ConfVars(String varname, float defaultVal) {
  967. this(varname, defaultVal, null);
  968. }
  969. ConfVars(String varname, float defaultFloatVal, Validator validator) {
  970. this.varname = varname;
  971. this.valClass = Float.class;
  972. this.defaultVal = Float.toString(defaultFloatVal);
  973. this.defaultIntVal = -1;
  974. this.defaultLongVal = -1;
  975. this.defaultFloatVal = defaultFloatVal;
  976. this.defaultBoolVal = false;
  977. this.type = VarType.FLOAT;
  978. this.validator = validator;
  979. }
  980. ConfVars(String varname, boolean defaultBoolVal) {
  981. this.varname = varname;
  982. this.valClass = Boolean.class;
  983. this.defaultVal = Boolean.toString(defaultBoolVal);
  984. this.defaultIntVal = -1;
  985. this.defaultLongVal = -1;
  986. this.defaultFloatVal = -1;
  987. this.defaultBoolVal = defaultBoolVal;
  988. this.type = VarType.BOOLEAN;
  989. this.validator = null;
  990. }
  991. public boolean isType(String value) {
  992. return type.isType(value);
  993. }
  994. public String validate(String value) {
  995. return validator == null ? null : validator.validate(value);
  996. }
  997. public String typeString() {
  998. return type.typeString();
  999. }
  1000. @Override
  1001. public String toString() {
  1002. return varname;
  1003. }
  1004. private static String findHadoopBinary() {
  1005. String val = System.getenv("HADOOP_HOME");
  1006. // In Hadoop 1.X and Hadoop 2.X HADOOP_HOME is gone and replaced with HADOOP_PREFIX
  1007. if (val == null) {
  1008. val = System.getenv("HADOOP_PREFIX");
  1009. }
  1010. // and if all else fails we can at least try /usr/bin/hadoop
  1011. val = (val == null ? File.separator + "usr" : val)
  1012. + File.separator + "bin" + File.separator + "hadoop";
  1013. // Launch hadoop command file on windows.
  1014. return val + (Shell.WINDOWS ? ".cmd" : "");
  1015. }
  1016. enum VarType {
  1017. STRING { @Override
  1018. void checkType(String value) throws Exception { } },
  1019. INT { @Override
  1020. void checkType(String value) throws Exception { Integer.valueOf(value); } },
  1021. LONG { @Override
  1022. void checkType(String value) throws Exception { Long.valueOf(value); } },
  1023. FLOAT { @Override
  1024. void checkType(String value) throws Exception { Float.valueOf(value); } },
  1025. BOOLEAN { @Override
  1026. void checkType(String value) throws Exception { Boolean.valueOf(value); } };
  1027. boolean isType(String value) {
  1028. try { checkType(value); } catch (Exception e) { return false; }
  1029. return true;
  1030. }
  1031. String typeString() { return name().toUpperCase();}
  1032. abstract void checkType(String value) throws Exception;
  1033. }
  1034. }
  1035. /**
  1036. * Writes the default ConfVars out to a byte array and returns an input
  1037. * stream wrapping that byte array.
  1038. *
  1039. * We need this in order to initialize the ConfVar properties
  1040. * in the underling Configuration object using the addResource(InputStream)
  1041. * method.
  1042. *
  1043. * It is important to use a LoopingByteArrayInputStream because it turns out
  1044. * addResource(InputStream) is broken since Configuration tries to read the
  1045. * entire contents of the same InputStream repeatedly without resetting it.
  1046. * LoopingByteArrayInputStream has special logic to handle this.
  1047. */
  1048. private static synchronized InputStream getConfVarInputStream() {
  1049. if (confVarByteArray == null) {
  1050. try {
  1051. // Create a Hadoop configuration without inheriting default settings.
  1052. Configuration conf = new Configuration(false);
  1053. applyDefaultNonNullConfVars(conf);
  1054. ByteArrayOutputStream confVarBaos = new ByteArrayOutputStream();
  1055. conf.writeXml(confVarBaos);
  1056. confVarByteArray = confVarBaos.toByteArray();
  1057. } catch (Exception e) {
  1058. // We're pretty screwed if we can't load the default conf vars
  1059. throw new RuntimeException("Failed to initialize default Hive configuration variables!", e);
  1060. }
  1061. }
  1062. return new LoopingByteArrayInputStream(confVarByteArray);
  1063. }
  1064. public void verifyAndSet(String name, String value) throws IllegalArgumentException {
  1065. if (isWhiteListRestrictionEnabled) {
  1066. if (!modWhiteList.contains(name)) {
  1067. throw new IllegalArgumentException("Cannot modify " + name + " at runtime. "
  1068. + "It is not in list of params that are allowed to be modified at runtime");
  1069. }
  1070. }
  1071. if (restrictList.contains(name)) {
  1072. throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list"
  1073. + "of parameters that can't be modified at runtime");
  1074. }
  1075. set(name, value);
  1076. }
  1077. public static int getIntVar(Configuration conf, ConfVars var) {
  1078. assert (var.valClass == Integer.class) : var.varname;
  1079. return conf.getInt(var.varname, var.defaultIntVal);
  1080. }
  1081. public static void setIntVar(Configuration conf, ConfVars var, int val) {
  1082. assert (var.valClass == Integer.class) : var.varname;
  1083. conf.setInt(var.varname, val);
  1084. }
  1085. public int getIntVar(ConfVars var) {
  1086. return getIntVar(this, var);
  1087. }
  1088. public void setIntVar(ConfVars var, int val) {
  1089. setIntVar(this, var, val);
  1090. }
  1091. public static long getLongVar(Configuration conf, ConfVars var) {
  1092. assert (var.valClass == Long.class) : var.varname;
  1093. return conf.getLong(var.varname, var.defaultLongVal);
  1094. }
  1095. public static long getLongVar(Configuration conf, ConfVars var, long defaultVal) {
  1096. return conf.getLong(var.varname, defaultVal);
  1097. }
  1098. public static void setLongVar(Configuration conf, ConfVars var, long val) {
  1099. assert (var.valClass == Long.class) : var.varname;
  1100. conf.setLong(var.varname, val);
  1101. }
  1102. public long getLongVar(ConfVars var) {
  1103. return getLongVar(this, var);
  1104. }
  1105. public void setLongVar(ConfVars var, long val) {
  1106. setLongVar(this, var, val);
  1107. }
  1108. public static float getFloatVar(Configuration conf, ConfVars var) {
  1109. assert (var.valClass == Float.class) : var.varname;
  1110. return conf.getFloat(var.varname, var.defaultFloatVal);
  1111. }
  1112. public static float getFloatVar(Configuration conf, ConfVars var, float defaultVal) {
  1113. return conf.getFloat(var.varname, defaultVal);
  1114. }
  1115. public static void setFloatVar(Configuration conf, ConfVars var, float val) {
  1116. assert (var.valClass == Float.class) : var.varname;
  1117. conf.setFloat(var.varname, val);
  1118. }
  1119. public float getFloatVar(ConfVars var) {
  1120. return getFloatVar(this, var);
  1121. }
  1122. public void setFloatVar(ConfVars var, float val) {
  1123. setFloatVar(this, var, val);
  1124. }
  1125. public static boolean getBoolVar(Configuration conf, ConfVars var) {
  1126. assert (var.valClass == Boolean.class) : var.varname;
  1127. return conf.getBoolean(var.varname, var.defaultBoolVal);
  1128. }
  1129. public static boolean getBoolVar(Configuration conf, ConfVars var, boolean defaultVal) {
  1130. return conf.getBoolean(var.varname, defaultVal);
  1131. }
  1132. public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
  1133. assert (var.valClass == Boolean.class) : var.varname;
  1134. conf.setBoolean(var.varname, val);
  1135. }
  1136. public boolean getBoolVar(ConfVars var) {
  1137. return getBoolVar(this, var);
  1138. }
  1139. public void setBoolVar(ConfVars var, boolean val) {
  1140. setBoolVar(this, var, val);
  1141. }
  1142. public static String getVar(Configuration conf, ConfVars var) {
  1143. assert (var.valClass == String.class) : var.varname;
  1144. return conf.get(var.varname, var.defaultVal);
  1145. }
  1146. public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
  1147. return conf.get(var.varname, defaultVal);
  1148. }
  1149. public static void setVar(Configuration conf, ConfVars var, String val) {
  1150. assert (var.valClass == String.class) : var.varname;
  1151. conf.set(var.varname, val);
  1152. }
  1153. public static ConfVars getConfVars(String name) {
  1154. return vars.get(name);
  1155. }
  1156. public String getVar(ConfVars var) {
  1157. return getVar(this, var);
  1158. }
  1159. public void setVar(ConfVars var, String val) {
  1160. setVar(this, var, val);
  1161. }
  1162. public void logVars(PrintStream ps) {
  1163. for (ConfVars one : ConfVars.values()) {
  1164. ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : ""));
  1165. }
  1166. }
  1167. public HiveConf() {
  1168. super();
  1169. initialize(this.getClass());
  1170. }
  1171. public HiveConf(Class<?> cls) {
  1172. super();
  1173. initialize(cls);
  1174. }
  1175. public HiveConf(Configuration other, Class<?> cls) {
  1176. super(other);
  1177. initialize(cls);
  1178. }
  1179. /**
  1180. * Copy constructor
  1181. */
  1182. public HiveConf(HiveConf other) {
  1183. super(other);
  1184. hiveJar = other.hiveJar;
  1185. auxJars = other.auxJars;
  1186. origProp = (Properties)other.origProp.clone();
  1187. restrictList.addAll(other.restrictList);
  1188. }
  1189. public Properties getAllProperties() {
  1190. return getProperties(this);
  1191. }
  1192. private static Properties getProperties(Configuration conf) {
  1193. Iterator<Map.Entry<String, String>> iter = conf.iterator();
  1194. Properties p = new Properties();
  1195. while (iter.hasNext()) {
  1196. Map.Entry<String, String> e = iter.next();
  1197. p.setProperty(e.getKey(), e.getValue());
  1198. }
  1199. return p;
  1200. }
  1201. private void initialize(Class<?> cls) {
  1202. hiveJar = (new JobConf(cls)).getJar();
  1203. // preserve the original configuration
  1204. origProp = getAllProperties();
  1205. // Overlay the ConfVars. Note that this ignores ConfVars with null values
  1206. addResource(getConfVarInputStream());
  1207. // Overlay hive-site.xml if it exists
  1208. if (hiveSiteURL != null) {
  1209. addResource(hiveSiteURL);
  1210. }
  1211. // Overlay the values of any system properties whose names appear in the list of ConfVars
  1212. applySystemProperties();
  1213. if(this.get("hive.metastore.local", null) != null) {
  1214. l4j.warn("DEPRECATED: Configuration property hive.metastore.local no longer has any " +
  1215. "effect. Make sure to provide a valid value for hive.metastore.uris if you are " +
  1216. "connecting to a remote metastore.");
  1217. }
  1218. if ((this.get("hive.metastore.ds.retry.attempts") != null) ||
  1219. this.get("hive.metastore.ds.retry.interval") != null) {
  1220. l4j.warn("DEPRECATED: hive.metastore.ds.retry.* no longer has any effect. " +
  1221. "Use hive.hmshandler.retry.* instead");
  1222. }
  1223. // if the running class was loaded directly (through eclipse) rather than through a
  1224. // jar then this would be needed
  1225. if (hiveJar == null) {
  1226. hiveJar = this.get(ConfVars.HIVEJAR.varname);
  1227. }
  1228. if (auxJars == null) {
  1229. auxJars = this.get(ConfVars.HIVEAUXJARS.varname);
  1230. }
  1231. if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) {
  1232. setBoolVar(ConfVars.METASTORE_AUTO_CREATE_SCHEMA, false);
  1233. setBoolVar(ConfVars.METASTORE_FIXED_DATASTORE, true);
  1234. }
  1235. if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) {
  1236. List<String> trimmed = new ArrayList<String>();
  1237. for (Map.Entry<String,String> entry : this) {
  1238. String key = entry.getKey();
  1239. if (key == null || !key.startsWith("hive.")) {
  1240. continue;
  1241. }
  1242. ConfVars var = HiveConf.getConfVars(key);
  1243. if (var == null) {
  1244. var = HiveConf.getConfVars(key.trim());
  1245. if (var != null) {
  1246. trimmed.add(key);
  1247. }
  1248. }
  1249. if (var == null) {
  1250. l4j.warn("HiveConf of name " + key + " does not exist");
  1251. } else if (!var.isType(entry.getValue())) {
  1252. l4j.warn("HiveConf " + var.varname + " expects " + var.typeString() + " type value");
  1253. }
  1254. }
  1255. for (String key : trimmed) {
  1256. set(key.trim(), getRaw(key));
  1257. unset(key);
  1258. }
  1259. }
  1260. // setup list of conf vars that are not allowed to change runtime
  1261. setupRestrictList();
  1262. }
  1263. /**
  1264. * Apply system properties to this object if the property name is defined in ConfVars
  1265. * and the value is non-null and not an empty string.
  1266. */
  1267. private void applySystemProperties() {
  1268. Map<String, String> systemProperties = getConfSystemProperties();
  1269. for (Entry<String, String> systemProperty : systemProperties.entrySet()) {
  1270. this.set(systemProperty.getKey(), systemProperty.getValue());
  1271. }
  1272. }
  1273. /**
  1274. * This method returns a mapping from config variable name to its value for all config variables
  1275. * which have been set using System properties
  1276. */
  1277. public static Map<String, String> getConfSystemProperties() {
  1278. Map<String, String> systemProperties = new HashMap<String, String>();
  1279. for (ConfVars oneVar : ConfVars.values()) {
  1280. if (System.getProperty(oneVar.varname) != null) {
  1281. if (System.getProperty(oneVar.varname).length() > 0) {
  1282. systemProperties.put(oneVar.varname, System.getProperty(oneVar.varname));
  1283. }
  1284. }
  1285. }
  1286. return systemProperties;
  1287. }
  1288. /**
  1289. * Overlays ConfVar properties with non-null values
  1290. */
  1291. private static void applyDefaultNonNullConfVars(Configuration conf) {
  1292. for (ConfVars var : ConfVars.values()) {
  1293. if (var.defaultVal == null) {
  1294. // Don't override ConfVars with null values
  1295. continue;
  1296. }
  1297. conf.set(var.varname, var.defaultVal);
  1298. }
  1299. }
  1300. public Properties getChangedProperties() {
  1301. Properties ret = new Properties();
  1302. Properties newProp = getAllProperties();
  1303. for (Object one : newProp.keySet()) {
  1304. String oneProp = (String) one;
  1305. String oldValue = origProp.getProperty(oneProp);
  1306. if (!StringUtils.equals(oldValue, newProp.getProperty(oneProp))) {
  1307. ret.setProperty(oneProp, newProp.getProperty(oneProp));
  1308. }
  1309. }
  1310. return (ret);
  1311. }
  1312. public String getJar() {
  1313. return hiveJar;
  1314. }
  1315. /**
  1316. * @return the auxJars
  1317. */
  1318. public String getAuxJars() {
  1319. return auxJars;
  1320. }
  1321. /**
  1322. * @param auxJars the auxJars to set
  1323. */
  1324. public void setAuxJars(String auxJars) {
  1325. this.auxJars = auxJars;
  1326. setVar(this, ConfVars.HIVEAUXJARS, auxJars);
  1327. }
  1328. public URL getHiveDefaultLocation() {
  1329. return hiveDefaultURL;
  1330. }
  1331. public static void setHiveSiteLocation(URL location) {
  1332. hiveSiteURL = location;
  1333. }
  1334. public static URL getHiveSiteLocation() {
  1335. return hiveSiteURL;
  1336. }
  1337. /**
  1338. * @return the user name set in hadoop.job.ugi param or the current user from System
  1339. * @throws IOException
  1340. */
  1341. public String getUser() throws IOException {
  1342. try {
  1343. UserGroupInformation ugi = ShimLoader.getHadoopShims()
  1344. .getUGIForConf(this);
  1345. return ugi.getUserName();
  1346. } catch (LoginException le) {
  1347. throw new IOException(le);
  1348. }
  1349. }
  1350. public static String getColumnInternalName(int pos) {
  1351. return "_col" + pos;
  1352. }
  1353. public static int getPositionFromInternalName(String internalName) {
  1354. Pattern internalPattern = Pattern.compile("_col([0-9]+)");
  1355. Matcher m = internalPattern.matcher(internalName);
  1356. if (!m.matches()){
  1357. return -1;
  1358. } else {
  1359. return Integer.parseInt(m.group(1));
  1360. }
  1361. }
  1362. /**
  1363. * validate value for a ConfVar, return non-null string for fail message
  1364. */
  1365. public static interface Validator {
  1366. String validate(String value);
  1367. }
  1368. public static class StringsValidator implements Validator {
  1369. private final Set<String> expected = new LinkedHashSet<String>();
  1370. private StringsValidator(String... values) {
  1371. for (String value : values) {
  1372. expected.add(value.toLowerCase());
  1373. }
  1374. }
  1375. @Override
  1376. public String validate(String value) {
  1377. if (value == null || !expected.contains(value.toLowerCase())) {
  1378. return "Invalid value.. expects one of " + expected;
  1379. }
  1380. return null;
  1381. }
  1382. }
  1383. public static class LongRangeValidator implements Validator {
  1384. private final long lower, upper;
  1385. public LongRangeValidator(long lower, long upper) {
  1386. this.lower = lower;
  1387. this.upper = upper;
  1388. }
  1389. @Override
  1390. public String validate(String value) {
  1391. try {
  1392. if(value == null) {
  1393. return "Value cannot be null";
  1394. }
  1395. value = value.trim();
  1396. long lvalue = Long.parseLong(value);
  1397. if (lvalue < lower || lvalue > upper) {
  1398. return "Invalid value " + value + ", which should be in between " + lower + " and " + upper;
  1399. }
  1400. } catch (NumberFormatException e) {
  1401. return e.toString();
  1402. }
  1403. return null;
  1404. }
  1405. }
  1406. public static class PatternValidator implements Validator {
  1407. private final List<Pattern> expected = new ArrayList<Pattern>();
  1408. private PatternValidator(String... values) {
  1409. for (String value : values) {
  1410. expected.add(Pattern.compile(value));
  1411. }
  1412. }
  1413. @Override
  1414. public String validate(String value) {
  1415. if (value == null) {
  1416. return "Invalid value.. expects one of patterns " + expected;
  1417. }
  1418. for (Pattern pattern : expected) {
  1419. if (pattern.matcher(value).matches()) {
  1420. return null;
  1421. }
  1422. }
  1423. return "Invalid value.. expects one of patterns " + expected;
  1424. }
  1425. }
  1426. public static class RatioValidator implements Validator {
  1427. @Override
  1428. public String validate(String value) {
  1429. try {
  1430. float fvalue = Float.valueOf(value);
  1431. if (fvalue <= 0 || fvalue >= 1) {
  1432. return "Invalid ratio " + value + ", which should be in between 0 to 1";
  1433. }
  1434. } catch (NumberFormatException e) {
  1435. return e.toString();
  1436. }
  1437. return null;
  1438. }
  1439. }
  1440. /**
  1441. * Append comma separated list of config vars to the restrict List
  1442. * @param restrictListStr
  1443. */
  1444. public void addToRestrictList(String restrictListStr) {
  1445. if (restrictListStr == null) {
  1446. return;
  1447. }
  1448. String oldList = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
  1449. if (oldList == null || oldList.isEmpty()) {
  1450. this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, restrictListStr);
  1451. } else {
  1452. this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, oldList + "," + restrictListStr);
  1453. }
  1454. setupRestrictList();
  1455. }
  1456. /**
  1457. * Set if whitelist check is enabled for parameter modification
  1458. *
  1459. * @param isEnabled
  1460. */
  1461. @LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" })
  1462. public void setIsModWhiteListEnabled(boolean isEnabled) {
  1463. this.isWhiteListRestrictionEnabled = isEnabled;
  1464. }
  1465. /**
  1466. * Add config parameter name to whitelist of parameters that can be modified
  1467. *
  1468. * @param paramname
  1469. */
  1470. @LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" })
  1471. public void addToModifiableWhiteList(String paramname) {
  1472. if (paramname == null) {
  1473. return;
  1474. }
  1475. modWhiteList.add(paramname);
  1476. }
  1477. /**
  1478. * Add the HIVE_CONF_RESTRICTED_LIST values to restrictList,
  1479. * including HIVE_CONF_RESTRICTED_LIST itself
  1480. */
  1481. private void setupRestrictList() {
  1482. String restrictListStr = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST);
  1483. restrictList.clear();
  1484. if (restrictListStr != null) {
  1485. for (String entry : restrictListStr.split(",")) {
  1486. restrictList.add(entry.trim());
  1487. }
  1488. }
  1489. restrictList.add(ConfVars.HIVE_IN_TEST.varname);
  1490. restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname);
  1491. }
  1492. }