<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--><configuration>
<!-- WARNING!!! This file is auto generated for documentation purposes ONLY! -->
<!-- WARNING!!! Any changes you make to this file will be ignored by Hive. -->
<!-- WARNING!!! You must make your changes in hive-site.xml instead. -->
<!-- Hive Execution Parameters -->
<property>
<name>hive.exec.script.wrapper</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.plan</name>
<value/>
<description/>
</property>
<property>
<name>hive.exec.stagingdir</name>
<value>.hive-staging</value>
<description>Directory name that will be created inside table locations in order to support HDFS encryption. This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans.</description>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/opt/data/hive/scratch</value>
<description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, with ${hive.scratch.dir.permission}.</description>
</property>
<property>
<name>hive.repl.rootdir</name>
<value>/opt/data/hive/repl/</value>
<description>HDFS root dir for all replication dumps.</description>
</property>
<property>
<name>hive.repl.cm.enabled</name>
<value>false</value>
<description>Turn on ChangeManager, so delete files will go to cmrootdir.</description>
</property>
<property>
<name>hive.repl.cmrootdir</name>
<value>/opt/data/hive/cmroot/</value>
<description>Root dir for ChangeManager, used for deleted files.</description>
</property>
<property>
<name>hive.repl.cm.retain</name>
<value>24h</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is hour if not specified.
Time to retain removed files in cmrootdir.
</description>
</property>
<property>
<name>hive.repl.cm.interval</name>
<value>3600s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Inteval for cmroot cleanup thread.
</description>
</property>
<property>
<name>hive.repl.replica.functions.root.dir</name>
<value>/opt/data/hive/repl/functions/</value>
<description>Root directory on the replica warehouse where the repl sub-system will store jars from the primary warehouse</description>
</property>
<property>
<name>hive.repl.approx.max.load.tasks</name>
<value>10000</value>
<description>
Provide an approximation of the maximum number of tasks that should be executed before
dynamically generating the next set of tasks. The number is approximate as Hive
will stop at a slightly higher number, the reason being some events might lead to a
task increment that would cross the specified limit.
</description>
</property>
<property>
<name>hive.repl.partitions.dump.parallelism</name>
<value>100</value>
<description>Number of threads that will be used to dump partition data information during repl dump.</description>
</property>
<property>
<name>hive.repl.dumpdir.clean.freq</name>
<value>0s</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
Frequency at which timer task runs to purge expired dump dirs.
</description>
</property>
<property>
<name>hive.repl.dumpdir.ttl</name>
<value>7d</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is day if not specified.
TTL of dump dirs before cleanup.
</description>
</property>
<property>
<name>hive.repl.dump.metadata.only</name>
<value>false</value>
<description>Indicates whether replication dump only metadata information or data + metadata.</description>
</property>
<property>
<name>hive.repl.dump.include.acid.tables</name>
<value>false</value>
<description>
Indicates if repl dump should include information about ACID tables. It should be
used in conjunction with 'hive.repl.dump.metadata.only' to enable copying of
metadata for acid tables which do not require the corresponding transaction
semantics to be applied on target. This can be removed when ACID table
replication is supported.
</description>
</property>
<property>
<name>hive.repl.bootstrap.dump.open.txn.timeout</name>
<value>1h</value>
<description>
Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is hour if not specified.
Indicates the timeout for all transactions which are opened before triggering bootstrap REPL DUMP. If these open transactions are not closed within the timeout value, then REPL DUMP will forcefully abort those transactions and continue with bootstrap dump.
</description>
</property>
<property>
<name>hive.repl.add.raw.reserved.namespace</name>
<value>false</value>
<description>
For TDE with same encryption keys on source and target, allow Distcp super user to access
the raw bytes from filesystem without decrypting on source and then encrypting on target.
</description>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/opt/data/hive/tmp/master</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/opt/data/hive/tmp/${hive.session.id}_resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.scratch.dir.permission</name>
<value>700</value>
<description>The permission for the user specific scratch directories that get created.</description>
</property>
<property>
<name>hive.exec.submitviachild</name>
<value>false</value>
<description/>
</property>
<property>
<name>hive.exec.submit.local.task.via.child</name>
<value>true</value>
<description>
Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
separate JVM (true recommended) or not.
Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
</description>
</property>
<property>
<name>hive.exec.script.maxerrsize</name>
<value>100000</value>
<description>
Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task).
This prevents runaway scripts from filling logs partitions to capacity
</description>
</property>
<property>
<name>hive.exec.script.allow.partial.consumption</name>
<va
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
Docker(Hadoop-3.3.1+HBase-2.4.16+Zookeeper-3.7.1+Hive-3.1.3)配置文件 (202个子文件)
backup-masters 7B
backup-masters 7B
backup-masters 7B
container-executor.cfg 3KB
container-executor.cfg 3KB
container-executor.cfg 3KB
zoo.cfg 1KB
zoo.cfg 1KB
zoo.cfg 1KB
zoo_sample.cfg 1KB
zoo_sample.cfg 1KB
zoo_sample.cfg 1KB
hbase-env.cmd 4KB
hbase-env.cmd 4KB
hbase-env.cmd 4KB
hadoop-env.cmd 4KB
hadoop-env.cmd 4KB
hadoop-env.cmd 4KB
yarn-env.cmd 2KB
yarn-env.cmd 2KB
yarn-env.cmd 2KB
mapred-env.cmd 951B
mapred-env.cmd 951B
mapred-env.cmd 951B
my.cnf 2KB
hadoop-user-functions.sh.example 3KB
hadoop-user-functions.sh.example 3KB
hadoop-user-functions.sh.example 3KB
ssl-server.xml.example 3KB
ssl-server.xml.example 3KB
ssl-server.xml.example 3KB
ssl-client.xml.example 2KB
ssl-client.xml.example 2KB
ssl-client.xml.example 2KB
myid 2B
myid 2B
myid 2B
log4j.properties 13KB
log4j.properties 13KB
log4j.properties 13KB
llap-daemon-log4j2.properties 7KB
llap-daemon-log4j2.properties 7KB
llap-daemon-log4j2.properties 7KB
log4j.properties 6KB
log4j.properties 6KB
log4j.properties 6KB
llap-cli-log4j2.properties 3KB
llap-cli-log4j2.properties 3KB
llap-cli-log4j2.properties 3KB
log4j.properties 3KB
log4j.properties 3KB
log4j.properties 3KB
hadoop-metrics2.properties 3KB
hadoop-metrics2.properties 3KB
hadoop-metrics2.properties 3KB
hive-log4j2.properties 3KB
hive-log4j2.properties 3KB
hive-log4j2.properties 3KB
parquet-logging.properties 3KB
parquet-logging.properties 3KB
parquet-logging.properties 3KB
yarnservice-log4j.properties 3KB
yarnservice-log4j.properties 3KB
yarnservice-log4j.properties 3KB
hive-exec-log4j2.properties 2KB
hive-exec-log4j2.properties 2KB
hive-exec-log4j2.properties 2KB
kms-log4j.properties 2KB
kms-log4j.properties 2KB
kms-log4j.properties 2KB
hadoop-metrics2-hbase.properties 2KB
hadoop-metrics2-hbase.properties 2KB
hadoop-metrics2-hbase.properties 2KB
httpfs-log4j.properties 2KB
httpfs-log4j.properties 2KB
httpfs-log4j.properties 2KB
beeline-log4j2.properties 2KB
beeline-log4j2.properties 2KB
beeline-log4j2.properties 2KB
log4j-hbtop.properties 1KB
log4j-hbtop.properties 1KB
log4j-hbtop.properties 1KB
regionservers 23B
regionservers 23B
regionservers 23B
hadoop-env.sh 17KB
hadoop-env.sh 17KB
hadoop-env.sh 17KB
hbase-env.sh 8KB
hbase-env.sh 8KB
hbase-env.sh 8KB
yarn-env.sh 6KB
yarn-env.sh 6KB
yarn-env.sh 6KB
example.sh 4KB
example.sh 4KB
example.sh 4KB
hive-env.sh 3KB
hive-env.sh 3KB
hive-env.sh 3KB
共 202 条
- 1
- 2
- 3
资源评论
lim_5258
- 粉丝: 13
- 资源: 10
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
最新资源
- 三菱FX3U 步进电机算FB块 FB块的使用可以使程序模块化简单化,进而提高了程序的稳定性和可移植性 此例中使用FB块,可以实现步进电机的算,已知距离求得脉冲数,已知速度可以求得频率 程序中包含
- 双向隔离DCDC仿真simulink
- 基于PSO粒子群PID控制器参数整定粒子群PID psopid 基于粒子群算法整定PID控制器,实现PID控制器参数的自整定(PSO-PID) matlab编写,源码注释详细具体如图,评价指标详
- 3567954014871001-进程的概念.zip
- 伺服电机、步进电机通用的S曲线及梯形加减速控制源码,十分经典,有中文注释及实现原理说明 系前期从某高手卖家处高价购得(技术源头实为国外专业公司) 本人已经在多个自动化控制系统中采用,为摊低成本故低
- DSP28335的Svpwm处理器在环仿真(matlab simulink)
- cruise模型,增程汽车仿真模型,恒功率控制 关于模型: 1.模型是个base模型,基于cruise simulink联合仿真,主要实现恒功率控制以及电制动优先的能量回收策略,主要供学习使用
- Polarion表结构
- ssm中小型企业财务管理系统+jsp.zip
- ssm珠宝首饰交易平台开发+jsp.zip
- ssm助学贷款+jsp.zip
- ssm职工健康每日申报系统设计+vue.zip
- ssm在线作业管理系统的设计与实现+vue.zip
- ssm中国咖啡文化宣传网站的设计与实现+vue.zip
- FIFO verilogIP 包括深度为1的fifo 包括普通同步FIFO和异步FIFO,均为first word fall through模式,同步fifo三种写法,异步fifo三种写法,可参
- ssm在线医疗服务系统+jsp.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功