HADOOP-13560. S3ABlockOutputStream to support huge (many GB) file writes. Contributed...
[hadoop.git] / hadoop-common-project / hadoop-common / src / main / resources / core-default.xml
1 <?xml version="1.0"?>
2 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3
4 <!--
5 Licensed to the Apache Software Foundation (ASF) under one or more
6 contributor license agreements. See the NOTICE file distributed with
7 this work for additional information regarding copyright ownership.
8 The ASF licenses this file to You under the Apache License, Version 2.0
9 (the "License"); you may not use this file except in compliance with
10 the License. You may obtain a copy of the License at
11
12 http://www.apache.org/licenses/LICENSE-2.0
13
14 Unless required by applicable law or agreed to in writing, software
15 distributed under the License is distributed on an "AS IS" BASIS,
16 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 See the License for the specific language governing permissions and
18 limitations under the License.
19 -->
20
21 <!-- Do not modify this file directly. Instead, copy entries that you -->
22 <!-- wish to modify from this file into core-site.xml and change them -->
23 <!-- there. If core-site.xml does not already exist, create it. -->
24
25 <configuration>
26
27 <!--- global properties -->
28
29 <property>
30 <name>hadoop.common.configuration.version</name>
31 <value>0.23.0</value>
32 <description>version of this configuration file</description>
33 </property>
34
35 <property>
36 <name>hadoop.tmp.dir</name>
37 <value>/tmp/hadoop-${user.name}</value>
38 <description>A base for other temporary directories.</description>
39 </property>
40
41 <property>
42 <name>io.native.lib.available</name>
43 <value>true</value>
44 <description>Controls whether to use native libraries for bz2 and zlib
45 compression codecs or not. The property does not control any other native
46 libraries.
47 </description>
48 </property>
49
50 <property>
51 <name>hadoop.http.filter.initializers</name>
52 <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
53 <description>A comma separated list of class names. Each class in the list
54 must extend org.apache.hadoop.http.FilterInitializer. The corresponding
55 Filter will be initialized. Then, the Filter will be applied to all user
56 facing jsp and servlet web pages. The ordering of the list defines the
57 ordering of the filters.</description>
58 </property>
59
60 <!--- security properties -->
61
62 <property>
63 <name>hadoop.security.authorization</name>
64 <value>false</value>
65 <description>Is service-level authorization enabled?</description>
66 </property>
67
68 <property>
69 <name>hadoop.security.instrumentation.requires.admin</name>
70 <value>false</value>
71 <description>
72 Indicates if administrator ACLs are required to access
73 instrumentation servlets (JMX, METRICS, CONF, STACKS).
74 </description>
75 </property>
76
77 <property>
78 <name>hadoop.security.authentication</name>
79 <value>simple</value>
80 <description>Possible values are simple (no authentication), and kerberos
81 </description>
82 </property>
83
84 <property>
85 <name>hadoop.security.group.mapping</name>
86 <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
87 <description>
88 Class for user to group mapping (get groups for a given user) for ACL.
89 The default implementation,
90 org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback,
91 will determine if the Java Native Interface (JNI) is available. If JNI is
92 available the implementation will use the API within hadoop to resolve a
93 list of groups for a user. If JNI is not available then the shell
94 implementation, ShellBasedUnixGroupsMapping, is used. This implementation
95 shells out to the Linux/Unix environment with the
96 <code>bash -c groups</code> command to resolve a list of groups for a user.
97 </description>
98 </property>
99
100 <property>
101 <name>hadoop.security.dns.interface</name>
102 <description>
103 The name of the Network Interface from which the service should determine
104 its host name for Kerberos login. e.g. eth2. In a multi-homed environment,
105 the setting can be used to affect the _HOST substitution in the service
106 Kerberos principal. If this configuration value is not set, the service
107 will use its default hostname as returned by
108 InetAddress.getLocalHost().getCanonicalHostName().
109
110 Most clusters will not require this setting.
111 </description>
112 </property>
113
114 <property>
115 <name>hadoop.security.dns.nameserver</name>
116 <description>
117 The host name or IP address of the name server (DNS) which a service Node
118 should use to determine its own host name for Kerberos Login. Requires
119 hadoop.security.dns.interface.
120
121 Most clusters will not require this setting.
122 </description>
123 </property>
124
125 <property>
126 <name>hadoop.security.dns.log-slow-lookups.enabled</name>
127 <value>false</value>
128 <description>
129 Time name lookups (via SecurityUtil) and log them if they exceed the
130 configured threshold.
131 </description>
132 </property>
133
134 <property>
135 <name>hadoop.security.dns.log-slow-lookups.threshold.ms</name>
136 <value>1000</value>
137 <description>
138 If slow lookup logging is enabled, this threshold is used to decide if a
139 lookup is considered slow enough to be logged.
140 </description>
141 </property>
142
143 <property>
144 <name>hadoop.security.groups.cache.secs</name>
145 <value>300</value>
146 <description>
147 This is the config controlling the validity of the entries in the cache
148 containing the user->group mapping. When this duration has expired,
149 then the implementation of the group mapping provider is invoked to get
150 the groups of the user and then cached back.
151 </description>
152 </property>
153
154 <property>
155 <name>hadoop.security.groups.negative-cache.secs</name>
156 <value>30</value>
157 <description>
158 Expiration time for entries in the the negative user-to-group mapping
159 caching, in seconds. This is useful when invalid users are retrying
160 frequently. It is suggested to set a small value for this expiration, since
161 a transient error in group lookup could temporarily lock out a legitimate
162 user.
163
164 Set this to zero or negative value to disable negative user-to-group caching.
165 </description>
166 </property>
167
168 <property>
169 <name>hadoop.security.groups.cache.warn.after.ms</name>
170 <value>5000</value>
171 <description>
172 If looking up a single user to group takes longer than this amount of
173 milliseconds, we will log a warning message.
174 </description>
175 </property>
176
177 <property>
178 <name>hadoop.security.groups.cache.background.reload</name>
179 <value>false</value>
180 <description>
181 Whether to reload expired user->group mappings using a background thread
182 pool. If set to true, a pool of
183 hadoop.security.groups.cache.background.reload.threads is created to
184 update the cache in the background.
185 </description>
186 </property>
187
188 <property>
189 <name>hadoop.security.groups.cache.background.reload.threads</name>
190 <value>3</value>
191 <description>
192 Only relevant if hadoop.security.groups.cache.background.reload is true.
193 Controls the number of concurrent background user->group cache entry
194 refreshes. Pending refresh requests beyond this value are queued and
195 processed when a thread is free.
196 </description>
197 </property>
198
199 <property>
200 <name>hadoop.security.group.mapping.ldap.connection.timeout.ms</name>
201 <value>60000</value>
202 <description>
203 This property is the connection timeout (in milliseconds) for LDAP
204 operations. If the LDAP provider doesn't establish a connection within the
205 specified period, it will abort the connect attempt. Non-positive value
206 means no LDAP connection timeout is specified in which case it waits for the
207 connection to establish until the underlying network times out.
208 </description>
209 </property>
210
211 <property>
212 <name>hadoop.security.group.mapping.ldap.read.timeout.ms</name>
213 <value>60000</value>
214 <description>
215 This property is the read timeout (in milliseconds) for LDAP
216 operations. If the LDAP provider doesn't get a LDAP response within the
217 specified period, it will abort the read attempt. Non-positive value
218 means no read timeout is specified in which case it waits for the response
219 infinitely.
220 </description>
221 </property>
222
223 <property>
224 <name>hadoop.security.group.mapping.ldap.url</name>
225 <value></value>
226 <description>
227 The URL of the LDAP server to use for resolving user groups when using
228 the LdapGroupsMapping user to group mapping.
229 </description>
230 </property>
231
232 <property>
233 <name>hadoop.security.group.mapping.ldap.ssl</name>
234 <value>false</value>
235 <description>
236 Whether or not to use SSL when connecting to the LDAP server.
237 </description>
238 </property>
239
240 <property>
241 <name>hadoop.security.group.mapping.ldap.ssl.keystore</name>
242 <value></value>
243 <description>
244 File path to the SSL keystore that contains the SSL certificate required
245 by the LDAP server.
246 </description>
247 </property>
248
249 <property>
250 <name>hadoop.security.group.mapping.ldap.ssl.keystore.password.file</name>
251 <value></value>
252 <description>
253 The path to a file containing the password of the LDAP SSL keystore. If
254 the password is not configured in credential providers and the property
255 hadoop.security.group.mapping.ldap.ssl.keystore.password is not set,
256 LDAPGroupsMapping reads password from the file.
257
258 IMPORTANT: This file should be readable only by the Unix user running
259 the daemons and should be a local file.
260 </description>
261 </property>
262
263 <property>
264 <name>hadoop.security.group.mapping.ldap.ssl.keystore.password</name>
265 <value></value>
266 <description>
267 The password of the LDAP SSL keystore. this property name is used as an
268 alias to get the password from credential providers. If the password can
269 not be found and hadoop.security.credential.clear-text-fallback is true
270 LDAPGroupsMapping uses the value of this property for password.
271 </description>
272 </property>
273
274 <property>
275 <name>hadoop.security.credential.clear-text-fallback</name>
276 <value>true</value>
277 <description>
278 true or false to indicate whether or not to fall back to storing credential
279 password as clear text. The default value is true. This property only works
280 when the password can't not be found from credential providers.
281 </description>
282 </property>
283
284 <property>
285 <name>hadoop.security.credential.provider.path</name>
286 <value></value>
287 <description>
288 A comma-separated list of URLs that indicates the type and
289 location of a list of providers that should be consulted.
290 </description>
291 </property>
292
293 <property>
294 <name>hadoop.security.credstore.java-keystore-provider.password-file</name>
295 <value></value>
296 <description>
297 The path to a file containing the custom password for all keystores
298 that may be configured in the provider path.
299 </description>
300 </property>
301
302 <property>
303 <name>hadoop.security.group.mapping.ldap.bind.user</name>
304 <value></value>
305 <description>
306 The distinguished name of the user to bind as when connecting to the LDAP
307 server. This may be left blank if the LDAP server supports anonymous binds.
308 </description>
309 </property>
310
311 <property>
312 <name>hadoop.security.group.mapping.ldap.bind.password.file</name>
313 <value></value>
314 <description>
315 The path to a file containing the password of the bind user. If
316 the password is not configured in credential providers and the property
317 hadoop.security.group.mapping.ldap.bind.password is not set,
318 LDAPGroupsMapping reads password from the file.
319
320 IMPORTANT: This file should be readable only by the Unix user running
321 the daemons and should be a local file.
322 </description>
323 </property>
324
325 <property>
326 <name>hadoop.security.group.mapping.ldap.bind.password</name>
327 <value></value>
328 <description>
329 The password of the bind user. this property name is used as an
330 alias to get the password from credential providers. If the password can
331 not be found and hadoop.security.credential.clear-text-fallback is true
332 LDAPGroupsMapping uses the value of this property for password.
333 </description>
334 </property>
335
336 <property>
337 <name>hadoop.security.group.mapping.ldap.base</name>
338 <value></value>
339 <description>
340 The search base for the LDAP connection. This is a distinguished name,
341 and will typically be the root of the LDAP directory.
342 </description>
343 </property>
344
345 <property>
346 <name>hadoop.security.group.mapping.ldap.search.filter.user</name>
347 <value>(&amp;(objectClass=user)(sAMAccountName={0}))</value>
348 <description>
349 An additional filter to use when searching for LDAP users. The default will
350 usually be appropriate for Active Directory installations. If connecting to
351 an LDAP server with a non-AD schema, this should be replaced with
352 (&amp;(objectClass=inetOrgPerson)(uid={0}). {0} is a special string used to
353 denote where the username fits into the filter.
354
355 If the LDAP server supports posixGroups, Hadoop can enable the feature by
356 setting the value of this property to "posixAccount" and the value of
357 the hadoop.security.group.mapping.ldap.search.filter.group property to
358 "posixGroup".
359 </description>
360 </property>
361
362 <property>
363 <name>hadoop.security.group.mapping.ldap.search.filter.group</name>
364 <value>(objectClass=group)</value>
365 <description>
366 An additional filter to use when searching for LDAP groups. This should be
367 changed when resolving groups against a non-Active Directory installation.
368
369 See the description of hadoop.security.group.mapping.ldap.search.filter.user
370 to enable posixGroups support.
371 </description>
372 </property>
373
374 <property>
375 <name>hadoop.security.group.mapping.ldap.search.attr.memberof</name>
376 <value></value>
377 <description>
378 The attribute of the user object that identifies its group objects. By
379 default, Hadoop makes two LDAP queries per user if this value is empty. If
380 set, Hadoop will attempt to resolve group names from this attribute,
381 instead of making the second LDAP query to get group objects. The value
382 should be 'memberOf' for an MS AD installation.
383 </description>
384 </property>
385
386 <property>
387 <name>hadoop.security.group.mapping.ldap.search.attr.member</name>
388 <value>member</value>
389 <description>
390 The attribute of the group object that identifies the users that are
391 members of the group. The default will usually be appropriate for
392 any LDAP installation.
393 </description>
394 </property>
395
396 <property>
397 <name>hadoop.security.group.mapping.ldap.search.attr.group.name</name>
398 <value>cn</value>
399 <description>
400 The attribute of the group object that identifies the group name. The
401 default will usually be appropriate for all LDAP systems.
402 </description>
403 </property>
404
405 <property>
406 <name>hadoop.security.group.mapping.ldap.search.group.hierarchy.levels</name>
407 <value>0</value>
408 <description>
409 The number of levels to go up the group hierarchy when determining
410 which groups a user is part of. 0 Will represent checking just the
411 group that the user belongs to. Each additional level will raise the
412 time it takes to execute a query by at most
413 hadoop.security.group.mapping.ldap.directory.search.timeout.
414 The default will usually be appropriate for all LDAP systems.
415 </description>
416 </property>
417
418 <property>
419 <name>hadoop.security.group.mapping.ldap.posix.attr.uid.name</name>
420 <value>uidNumber</value>
421 <description>
422 The attribute of posixAccount to use when groups for membership.
423 Mostly useful for schemas wherein groups have memberUids that use an
424 attribute other than uidNumber.
425 </description>
426 </property>
427
428 <property>
429 <name>hadoop.security.group.mapping.ldap.posix.attr.gid.name</name>
430 <value>gidNumber</value>
431 <description>
432 The attribute of posixAccount indicating the group id.
433 </description>
434 </property>
435
436 <property>
437 <name>hadoop.security.group.mapping.ldap.directory.search.timeout</name>
438 <value>10000</value>
439 <description>
440 The attribute applied to the LDAP SearchControl properties to set a
441 maximum time limit when searching and awaiting a result.
442 Set to 0 if infinite wait period is desired.
443 Default is 10 seconds. Units in milliseconds.
444 </description>
445 </property>
446
447 <property>
448 <name>hadoop.security.group.mapping.providers</name>
449 <value></value>
450 <description>
451 Comma separated of names of other providers to provide user to group
452 mapping. Used by CompositeGroupsMapping.
453 </description>
454 </property>
455
456 <property>
457 <name>hadoop.security.group.mapping.providers.combined</name>
458 <value>true</value>
459 <description>
460 true or false to indicate whether groups from the providers are combined or
461 not. The default value is true. If true, then all the providers will be
462 tried to get groups and all the groups are combined to return as the final
463 results. Otherwise, providers are tried one by one in the configured list
464 order, and if any groups are retrieved from any provider, then the groups
465 will be returned without trying the left ones.
466 </description>
467 </property>
468
469 <property>
470 <name>hadoop.security.service.user.name.key</name>
471 <value></value>
472 <description>
473 For those cases where the same RPC protocol is implemented by multiple
474 servers, this configuration is required for specifying the principal
475 name to use for the service when the client wishes to make an RPC call.
476 </description>
477 </property>
478
479
480 <property>
481 <name>hadoop.security.uid.cache.secs</name>
482 <value>14400</value>
483 <description>
484 This is the config controlling the validity of the entries in the cache
485 containing the userId to userName and groupId to groupName used by
486 NativeIO getFstat().
487 </description>
488 </property>
489
490 <property>
491 <name>hadoop.rpc.protection</name>
492 <value>authentication</value>
493 <description>A comma-separated list of protection values for secured sasl
494 connections. Possible values are authentication, integrity and privacy.
495 authentication means authentication only and no integrity or privacy;
496 integrity implies authentication and integrity are enabled; and privacy
497 implies all of authentication, integrity and privacy are enabled.
498 hadoop.security.saslproperties.resolver.class can be used to override
499 the hadoop.rpc.protection for a connection at the server side.
500 </description>
501 </property>
502
503 <property>
504 <name>hadoop.security.saslproperties.resolver.class</name>
505 <value></value>
506 <description>SaslPropertiesResolver used to resolve the QOP used for a
507 connection. If not specified, the full set of values specified in
508 hadoop.rpc.protection is used while determining the QOP used for the
509 connection. If a class is specified, then the QOP values returned by
510 the class will be used while determining the QOP used for the connection.
511 </description>
512 </property>
513
514 <property>
515 <name>hadoop.security.sensitive-config-keys</name>
516 <value>password$,fs.s3.*[Ss]ecret.?[Kk]ey,fs.azure.account.key.*,dfs.webhdfs.oauth2.[a-z]+.token,hadoop.security.sensitive-config-keys</value>
517 <description>A comma-separated list of regular expressions to match against
518 configuration keys that should be redacted where appropriate, for
519 example, when logging modified properties during a reconfiguration,
520 private credentials should not be logged.
521 </description>
522 </property>
523
524 <property>
525 <name>hadoop.workaround.non.threadsafe.getpwuid</name>
526 <value>true</value>
527 <description>Some operating systems or authentication modules are known to
528 have broken implementations of getpwuid_r and getpwgid_r, such that these
529 calls are not thread-safe. Symptoms of this problem include JVM crashes
530 with a stack trace inside these functions. If your system exhibits this
531 issue, enable this configuration parameter to include a lock around the
532 calls as a workaround.
533
534 An incomplete list of some systems known to have this issue is available
535 at http://wiki.apache.org/hadoop/KnownBrokenPwuidImplementations
536 </description>
537 </property>
538
539 <property>
540 <name>hadoop.kerberos.kinit.command</name>
541 <value>kinit</value>
542 <description>Used to periodically renew Kerberos credentials when provided
543 to Hadoop. The default setting assumes that kinit is in the PATH of users
544 running the Hadoop client. Change this to the absolute path to kinit if this
545 is not the case.
546 </description>
547 </property>
548
549 <property>
550 <name>hadoop.security.auth_to_local</name>
551 <value></value>
552 <description>Maps kerberos principals to local user names</description>
553 </property>
554
555 <property>
556 <name>hadoop.token.files</name>
557 <value></value>
558 <description>List of token cache files that have delegation tokens for hadoop service</description>
559 </property>
560
561 <!-- i/o properties -->
562 <property>
563 <name>io.file.buffer.size</name>
564 <value>4096</value>
565 <description>The size of buffer for use in sequence files.
566 The size of this buffer should probably be a multiple of hardware
567 page size (4096 on Intel x86), and it determines how much data is
568 buffered during read and write operations.</description>
569 </property>
570
571 <property>
572 <name>io.bytes.per.checksum</name>
573 <value>512</value>
574 <description>The number of bytes per checksum. Must not be larger than
575 io.file.buffer.size.</description>
576 </property>
577
578 <property>
579 <name>io.skip.checksum.errors</name>
580 <value>false</value>
581 <description>If true, when a checksum error is encountered while
582 reading a sequence file, entries are skipped, instead of throwing an
583 exception.</description>
584 </property>
585
586 <property>
587 <name>io.compression.codecs</name>
588 <value></value>
589 <description>A comma-separated list of the compression codec classes that can
590 be used for compression/decompression. In addition to any classes specified
591 with this property (which take precedence), codec classes on the classpath
592 are discovered using a Java ServiceLoader.</description>
593 </property>
594
595 <property>
596 <name>io.compression.codec.bzip2.library</name>
597 <value>system-native</value>
598 <description>The native-code library to be used for compression and
599 decompression by the bzip2 codec. This library could be specified
600 either by by name or the full pathname. In the former case, the
601 library is located by the dynamic linker, usually searching the
602 directories specified in the environment variable LD_LIBRARY_PATH.
603
604 The value of "system-native" indicates that the default system
605 library should be used. To indicate that the algorithm should
606 operate entirely in Java, specify "java-builtin".</description>
607 </property>
608
609 <property>
610 <name>io.serializations</name>
611 <value>org.apache.hadoop.io.serializer.WritableSerialization, org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
612 <description>A list of serialization classes that can be used for
613 obtaining serializers and deserializers.</description>
614 </property>
615
616 <property>
617 <name>io.seqfile.local.dir</name>
618 <value>${hadoop.tmp.dir}/io/local</value>
619 <description>The local directory where sequence file stores intermediate
620 data files during merge. May be a comma-separated list of
621 directories on different devices in order to spread disk i/o.
622 Directories that do not exist are ignored.
623 </description>
624 </property>
625
626 <property>
627 <name>io.map.index.skip</name>
628 <value>0</value>
629 <description>Number of index entries to skip between each entry.
630 Zero by default. Setting this to values larger than zero can
631 facilitate opening large MapFiles using less memory.</description>
632 </property>
633
634 <property>
635 <name>io.map.index.interval</name>
636 <value>128</value>
637 <description>
638 MapFile consist of two files - data file (tuples) and index file
639 (keys). For every io.map.index.interval records written in the
640 data file, an entry (record-key, data-file-position) is written
641 in the index file. This is to allow for doing binary search later
642 within the index file to look up records by their keys and get their
643 closest positions in the data file.
644 </description>
645 </property>
646
647 <!-- file system properties -->
648
649 <property>
650 <name>fs.defaultFS</name>
651 <value>file:///</value>
652 <description>The name of the default file system. A URI whose
653 scheme and authority determine the FileSystem implementation. The
654 uri's scheme determines the config property (fs.SCHEME.impl) naming
655 the FileSystem implementation class. The uri's authority is used to
656 determine the host, port, etc. for a filesystem.</description>
657 </property>
658
659 <property>
660 <name>fs.default.name</name>
661 <value>file:///</value>
662 <description>Deprecated. Use (fs.defaultFS) property
663 instead</description>
664 </property>
665
666 <property>
667 <name>fs.trash.interval</name>
668 <value>0</value>
669 <description>Number of minutes after which the checkpoint
670 gets deleted. If zero, the trash feature is disabled.
671 This option may be configured both on the server and the
672 client. If trash is disabled server side then the client
673 side configuration is checked. If trash is enabled on the
674 server side then the value configured on the server is
675 used and the client configuration value is ignored.
676 </description>
677 </property>
678
679 <property>
680 <name>fs.trash.checkpoint.interval</name>
681 <value>0</value>
682 <description>Number of minutes between trash checkpoints.
683 Should be smaller or equal to fs.trash.interval. If zero,
684 the value is set to the value of fs.trash.interval.
685 Every time the checkpointer runs it creates a new checkpoint
686 out of current and removes checkpoints created more than
687 fs.trash.interval minutes ago.
688 </description>
689 </property>
690
691 <property>
692 <name>fs.protected.directories</name>
693 <value></value>
694 <description>A comma-separated list of directories which cannot
695 be deleted even by the superuser unless they are empty. This
696 setting can be used to guard important system directories
697 against accidental deletion due to administrator error.
698 </description>
699 </property>
700
701 <property>
702 <name>fs.AbstractFileSystem.file.impl</name>
703 <value>org.apache.hadoop.fs.local.LocalFs</value>
704 <description>The AbstractFileSystem for file: uris.</description>
705 </property>
706
707 <property>
708 <name>fs.AbstractFileSystem.har.impl</name>
709 <value>org.apache.hadoop.fs.HarFs</value>
710 <description>The AbstractFileSystem for har: uris.</description>
711 </property>
712
713 <property>
714 <name>fs.AbstractFileSystem.hdfs.impl</name>
715 <value>org.apache.hadoop.fs.Hdfs</value>
716 <description>The FileSystem for hdfs: uris.</description>
717 </property>
718
719 <property>
720 <name>fs.AbstractFileSystem.viewfs.impl</name>
721 <value>org.apache.hadoop.fs.viewfs.ViewFs</value>
722 <description>The AbstractFileSystem for view file system for viewfs: uris
723 (ie client side mount table:).</description>
724 </property>
725
726 <property>
727 <name>fs.AbstractFileSystem.ftp.impl</name>
728 <value>org.apache.hadoop.fs.ftp.FtpFs</value>
729 <description>The FileSystem for Ftp: uris.</description>
730 </property>
731
732 <property>
733 <name>fs.AbstractFileSystem.webhdfs.impl</name>
734 <value>org.apache.hadoop.fs.WebHdfs</value>
735 <description>The FileSystem for webhdfs: uris.</description>
736 </property>
737
738 <property>
739 <name>fs.AbstractFileSystem.swebhdfs.impl</name>
740 <value>org.apache.hadoop.fs.SWebHdfs</value>
741 <description>The FileSystem for swebhdfs: uris.</description>
742 </property>
743
744 <property>
745 <name>fs.ftp.host</name>
746 <value>0.0.0.0</value>
747 <description>FTP filesystem connects to this server</description>
748 </property>
749
750 <property>
751 <name>fs.ftp.host.port</name>
752 <value>21</value>
753 <description>
754 FTP filesystem connects to fs.ftp.host on this port
755 </description>
756 </property>
757
758 <property>
759 <name>fs.df.interval</name>
760 <value>60000</value>
761 <description>Disk usage statistics refresh interval in msec.</description>
762 </property>
763
764 <property>
765 <name>fs.du.interval</name>
766 <value>600000</value>
767 <description>File space usage statistics refresh interval in msec.</description>
768 </property>
769
770 <property>
771 <name>fs.s3.awsAccessKeyId</name>
772 <description>AWS access key ID used by S3 block file system.</description>
773 </property>
774
775 <property>
776 <name>fs.s3.awsSecretAccessKey</name>
777 <description>AWS secret key used by S3 block file system.</description>
778 </property>
779
780 <property>
781 <name>fs.s3.block.size</name>
782 <value>67108864</value>
783 <description>Block size to use when writing files to S3.</description>
784 </property>
785
786 <property>
787 <name>fs.s3.buffer.dir</name>
788 <value>${hadoop.tmp.dir}/s3</value>
789 <description>Determines where on the local filesystem the s3:/s3n: filesystem
790 should store files before sending them to S3
791 (or after retrieving them from S3).
792 </description>
793 </property>
794
795 <property>
796 <name>fs.s3.maxRetries</name>
797 <value>4</value>
798 <description>The maximum number of retries for reading or writing files to S3,
799 before we signal failure to the application.
800 </description>
801 </property>
802
803 <property>
804 <name>fs.s3.sleepTimeSeconds</name>
805 <value>10</value>
806 <description>The number of seconds to sleep between each S3 retry.
807 </description>
808 </property>
809
810 <property>
811 <name>fs.automatic.close</name>
812 <value>true</value>
813 <description>By default, FileSystem instances are automatically closed at program
814 exit using a JVM shutdown hook. Setting this property to false disables this
815 behavior. This is an advanced option that should only be used by server applications
816 requiring a more carefully orchestrated shutdown sequence.
817 </description>
818 </property>
819
820 <property>
821 <name>fs.s3n.awsAccessKeyId</name>
822 <description>AWS access key ID used by S3 native file system.</description>
823 </property>
824
825 <property>
826 <name>fs.s3n.awsSecretAccessKey</name>
827 <description>AWS secret key used by S3 native file system.</description>
828 </property>
829
830 <property>
831 <name>fs.s3n.block.size</name>
832 <value>67108864</value>
833 <description>Block size to use when reading files using the native S3
834 filesystem (s3n: URIs).</description>
835 </property>
836
837 <property>
838 <name>fs.s3n.multipart.uploads.enabled</name>
839 <value>false</value>
840 <description>Setting this property to true enables multiple uploads to
841 native S3 filesystem. When uploading a file, it is split into blocks
842 if the size is larger than fs.s3n.multipart.uploads.block.size.
843 </description>
844 </property>
845
846 <property>
847 <name>fs.s3n.multipart.uploads.block.size</name>
848 <value>67108864</value>
849 <description>The block size for multipart uploads to native S3 filesystem.
850 Default size is 64MB.
851 </description>
852 </property>
853
854 <property>
855 <name>fs.s3n.multipart.copy.block.size</name>
856 <value>5368709120</value>
857 <description>The block size for multipart copy in native S3 filesystem.
858 Default size is 5GB.
859 </description>
860 </property>
861
862 <property>
863 <name>fs.s3n.server-side-encryption-algorithm</name>
864 <value></value>
865 <description>Specify a server-side encryption algorithm for S3.
866 Unset by default, and the only other currently allowable value is AES256.
867 </description>
868 </property>
869
870 <property>
871 <name>fs.s3a.access.key</name>
872 <description>AWS access key ID used by S3A file system. Omit for IAM role-based or provider-based authentication.</description>
873 </property>
874
875 <property>
876 <name>fs.s3a.secret.key</name>
877 <description>AWS secret key used by S3A file system. Omit for IAM role-based or provider-based authentication.</description>
878 </property>
879
880 <property>
881 <name>fs.s3a.aws.credentials.provider</name>
882 <description>
883 Comma-separated class names of credential provider classes which implement
884 com.amazonaws.auth.AWSCredentialsProvider.
885
886 These are loaded and queried in sequence for a valid set of credentials.
887 Each listed class must provide either an accessible constructor accepting
888 java.net.URI and org.apache.hadoop.conf.Configuration, or an accessible
889 default constructor.
890
891 Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
892 anonymous access to a publicly accessible S3 bucket without any credentials.
893 Please note that allowing anonymous access to an S3 bucket compromises
894 security and therefore is unsuitable for most use cases. It can be useful
895 for accessing public data sets without requiring AWS credentials.
896 </description>
897 </property>
898
899 <property>
900 <name>fs.s3a.session.token</name>
901 <description>Session token, when using org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider
902 as one of the providers.
903 </description>
904 </property>
905
906 <property>
907 <name>fs.s3a.connection.maximum</name>
908 <value>15</value>
909 <description>Controls the maximum number of simultaneous connections to S3.</description>
910 </property>
911
912 <property>
913 <name>fs.s3a.connection.ssl.enabled</name>
914 <value>true</value>
915 <description>Enables or disables SSL connections to S3.</description>
916 </property>
917
918 <property>
919 <name>fs.s3a.endpoint</name>
920 <description>AWS S3 endpoint to connect to. An up-to-date list is
921 provided in the AWS Documentation: regions and endpoints. Without this
922 property, the standard region (s3.amazonaws.com) is assumed.
923 </description>
924 </property>
925
926 <property>
927 <name>fs.s3a.path.style.access</name>
928 <value>false</value>
929 <description>Enable S3 path style access ie disabling the default virtual hosting behaviour.
930 Useful for S3A-compliant storage providers as it removes the need to set up DNS for virtual hosting.
931 </description>
932 </property>
933
934 <property>
935 <name>fs.s3a.proxy.host</name>
936 <description>Hostname of the (optional) proxy server for S3 connections.</description>
937 </property>
938
939 <property>
940 <name>fs.s3a.proxy.port</name>
941 <description>Proxy server port. If this property is not set
942 but fs.s3a.proxy.host is, port 80 or 443 is assumed (consistent with
943 the value of fs.s3a.connection.ssl.enabled).</description>
944 </property>
945
946 <property>
947 <name>fs.s3a.proxy.username</name>
948 <description>Username for authenticating with proxy server.</description>
949 </property>
950
951 <property>
952 <name>fs.s3a.proxy.password</name>
953 <description>Password for authenticating with proxy server.</description>
954 </property>
955
956 <property>
957 <name>fs.s3a.proxy.domain</name>
958 <description>Domain for authenticating with proxy server.</description>
959 </property>
960
961 <property>
962 <name>fs.s3a.proxy.workstation</name>
963 <description>Workstation for authenticating with proxy server.</description>
964 </property>
965
966 <property>
967 <name>fs.s3a.attempts.maximum</name>
968 <value>20</value>
969 <description>How many times we should retry commands on transient errors.</description>
970 </property>
971
972 <property>
973 <name>fs.s3a.connection.establish.timeout</name>
974 <value>5000</value>
975 <description>Socket connection setup timeout in milliseconds.</description>
976 </property>
977
978 <property>
979 <name>fs.s3a.connection.timeout</name>
980 <value>200000</value>
981 <description>Socket connection timeout in milliseconds.</description>
982 </property>
983
984 <property>
985 <name>fs.s3a.socket.send.buffer</name>
986 <value>8192</value>
987 <description>Socket send buffer hint to amazon connector. Represented in bytes.</description>
988 </property>
989
990 <property>
991 <name>fs.s3a.socket.recv.buffer</name>
992 <value>8192</value>
993 <description>Socket receive buffer hint to amazon connector. Represented in bytes.</description>
994 </property>
995
996 <property>
997 <name>fs.s3a.paging.maximum</name>
998 <value>5000</value>
999 <description>How many keys to request from S3 when doing
1000 directory listings at a time.</description>
1001 </property>
1002
1003 <property>
1004 <name>fs.s3a.threads.max</name>
1005 <value>10</value>
1006 <description>The total number of threads available in the filesystem for data
1007 uploads *or any other queued filesystem operation*.</description>
1008 </property>
1009
1010 <property>
1011 <name>fs.s3a.threads.keepalivetime</name>
1012 <value>60</value>
1013 <description>Number of seconds a thread can be idle before being
1014 terminated.</description>
1015 </property>
1016
1017 <property>
1018 <name>fs.s3a.max.total.tasks</name>
1019 <value>5</value>
1020 <description>The number of operations which can be queued for execution</description>
1021 </property>
1022
1023 <property>
1024 <name>fs.s3a.multipart.size</name>
1025 <value>104857600</value>
1026 <description>How big (in bytes) to split upload or copy operations up into.</description>
1027 </property>
1028
1029 <property>
1030 <name>fs.s3a.multipart.threshold</name>
1031 <value>2147483647</value>
1032 <description>How big (in bytes) to split upload or copy operations up into.
1033 This also controls the partition size in renamed files, as rename() involves
1034 copying the source file(s)
1035 </description>
1036 </property>
1037
1038 <property>
1039 <name>fs.s3a.multiobjectdelete.enable</name>
1040 <value>true</value>
1041 <description>When enabled, multiple single-object delete requests are replaced by
1042 a single 'delete multiple objects'-request, reducing the number of requests.
1043 Beware: legacy S3-compatible object stores might not support this request.
1044 </description>
1045 </property>
1046
1047 <property>
1048 <name>fs.s3a.acl.default</name>
1049 <description>Set a canned ACL for newly created and copied objects. Value may be Private,
1050 PublicRead, PublicReadWrite, AuthenticatedRead, LogDeliveryWrite, BucketOwnerRead,
1051 or BucketOwnerFullControl.</description>
1052 </property>
1053
1054 <property>
1055 <name>fs.s3a.multipart.purge</name>
1056 <value>false</value>
1057 <description>True if you want to purge existing multipart uploads that may not have been
1058 completed/aborted correctly. The corresponding purge age is defined in
1059 fs.s3a.multipart.purge.age.
1060 If set, when the filesystem is instantiated then all outstanding uploads
1061 older than the purge age will be terminated -across the entire bucket.
1062 This will impact multipart uploads by other applications and users. so should
1063 be used sparingly, with an age value chosen to stop failed uploads, without
1064 breaking ongoing operations.
1065 </description>
1066 </property>
1067
1068 <property>
1069 <name>fs.s3a.multipart.purge.age</name>
1070 <value>86400</value>
1071 <description>Minimum age in seconds of multipart uploads to purge.
1072 </description>
1073 </property>
1074
1075 <property>
1076 <name>fs.s3a.server-side-encryption-algorithm</name>
1077 <description>Specify a server-side encryption algorithm for s3a: file system.
1078 Unset by default, and the only other currently allowable value is AES256.
1079 </description>
1080 </property>
1081
1082 <property>
1083 <name>fs.s3a.signing-algorithm</name>
1084 <description>Override the default signing algorithm so legacy
1085 implementations can still be used</description>
1086 </property>
1087
1088 <property>
1089 <name>fs.s3a.block.size</name>
1090 <value>33554432</value>
1091 <description>Block size to use when reading files using s3a: file system.
1092 </description>
1093 </property>
1094
1095 <property>
1096 <name>fs.s3a.buffer.dir</name>
1097 <value>${hadoop.tmp.dir}/s3a</value>
1098 <description>Comma separated list of directories that will be used to buffer file
1099 uploads to.</description>
1100 </property>
1101
1102 <property>
1103 <name>fs.s3a.fast.upload</name>
1104 <value>false</value>
1105 <description>
1106 Use the incremental block-based fast upload mechanism with
1107 the buffering mechanism set in fs.s3a.fast.upload.buffer.
1108 </description>
1109 </property>
1110
1111 <property>
1112 <name>fs.s3a.fast.upload.buffer</name>
1113 <value>disk</value>
1114 <description>
1115 The buffering mechanism to use when using S3A fast upload
1116 (fs.s3a.fast.upload=true). Values: disk, array, bytebuffer.
1117 This configuration option has no effect if fs.s3a.fast.upload is false.
1118
1119 "disk" will use the directories listed in fs.s3a.buffer.dir as
1120 the location(s) to save data prior to being uploaded.
1121
1122 "array" uses arrays in the JVM heap
1123
1124 "bytebuffer" uses off-heap memory within the JVM.
1125
1126 Both "array" and "bytebuffer" will consume memory in a single stream up to the number
1127 of blocks set by:
1128
1129 fs.s3a.multipart.size * fs.s3a.fast.upload.active.blocks.
1130
1131 If using either of these mechanisms, keep this value low
1132
1133 The total number of threads performing work across all threads is set by
1134 fs.s3a.threads.max, with fs.s3a.max.total.tasks values setting the number of queued
1135 work items.
1136 </description>
1137 </property>
1138
1139 <property>
1140 <name>fs.s3a.fast.upload.active.blocks</name>
1141 <value>4</value>
1142 <description>
1143 Maximum Number of blocks a single output stream can have
1144 active (uploading, or queued to the central FileSystem
1145 instance's pool of queued operations.
1146
1147 This stops a single stream overloading the shared thread pool.
1148 </description>
1149 </property>
1150
1151 <property>
1152 <name>fs.s3a.readahead.range</name>
1153 <value>65536</value>
1154 <description>Bytes to read ahead during a seek() before closing and
1155 re-opening the S3 HTTP connection. This option will be overridden if
1156 any call to setReadahead() is made to an open stream.</description>
1157 </property>
1158
1159 <property>
1160 <name>fs.s3a.user.agent.prefix</name>
1161 <value></value>
1162 <description>
1163 Sets a custom value that will be prepended to the User-Agent header sent in
1164 HTTP requests to the S3 back-end by S3AFileSystem. The User-Agent header
1165 always includes the Hadoop version number followed by a string generated by
1166 the AWS SDK. An example is "User-Agent: Hadoop 2.8.0, aws-sdk-java/1.10.6".
1167 If this optional property is set, then its value is prepended to create a
1168 customized User-Agent. For example, if this configuration property was set
1169 to "MyApp", then an example of the resulting User-Agent would be
1170 "User-Agent: MyApp, Hadoop 2.8.0, aws-sdk-java/1.10.6".
1171 </description>
1172 </property>
1173
1174 <property>
1175 <name>fs.s3a.impl</name>
1176 <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
1177 <description>The implementation class of the S3A Filesystem</description>
1178 </property>
1179
1180 <property>
1181 <name>fs.AbstractFileSystem.s3a.impl</name>
1182 <value>org.apache.hadoop.fs.s3a.S3A</value>
1183 <description>The implementation class of the S3A AbstractFileSystem.</description>
1184 </property>
1185
1186 <property>
1187 <name>io.seqfile.compress.blocksize</name>
1188 <value>1000000</value>
1189 <description>The minimum block size for compression in block compressed
1190 SequenceFiles.
1191 </description>
1192 </property>
1193
1194 <property>
1195 <name>io.seqfile.lazydecompress</name>
1196 <value>true</value>
1197 <description>Should values of block-compressed SequenceFiles be decompressed
1198 only when necessary.
1199 </description>
1200 </property>
1201
1202 <property>
1203 <name>io.seqfile.sorter.recordlimit</name>
1204 <value>1000000</value>
1205 <description>The limit on number of records to be kept in memory in a spill
1206 in SequenceFiles.Sorter
1207 </description>
1208 </property>
1209
1210 <property>
1211 <name>io.mapfile.bloom.size</name>
1212 <value>1048576</value>
1213 <description>The size of BloomFilter-s used in BloomMapFile. Each time this many
1214 keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).
1215 Larger values minimize the number of filters, which slightly increases the performance,
1216 but may waste too much space if the total number of keys is usually much smaller
1217 than this number.
1218 </description>
1219 </property>
1220
1221 <property>
1222 <name>io.mapfile.bloom.error.rate</name>
1223 <value>0.005</value>
1224 <description>The rate of false positives in BloomFilter-s used in BloomMapFile.
1225 As this value decreases, the size of BloomFilter-s increases exponentially. This
1226 value is the probability of encountering false positives (default is 0.5%).
1227 </description>
1228 </property>
1229
1230 <property>
1231 <name>hadoop.util.hash.type</name>
1232 <value>murmur</value>
1233 <description>The default implementation of Hash. Currently this can take one of the
1234 two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.
1235 </description>
1236 </property>
1237
1238
1239 <!-- ipc properties -->
1240
1241 <property>
1242 <name>ipc.client.idlethreshold</name>
1243 <value>4000</value>
1244 <description>Defines the threshold number of connections after which
1245 connections will be inspected for idleness.
1246 </description>
1247 </property>
1248
1249 <property>
1250 <name>ipc.client.kill.max</name>
1251 <value>10</value>
1252 <description>Defines the maximum number of clients to disconnect in one go.
1253 </description>
1254 </property>
1255
1256 <property>
1257 <name>ipc.client.connection.maxidletime</name>
1258 <value>10000</value>
1259 <description>The maximum time in msec after which a client will bring down the
1260 connection to the server.
1261 </description>
1262 </property>
1263
1264 <property>
1265 <name>ipc.client.connect.max.retries</name>
1266 <value>10</value>
1267 <description>Indicates the number of retries a client will make to establish
1268 a server connection.
1269 </description>
1270 </property>
1271
1272 <property>
1273 <name>ipc.client.connect.retry.interval</name>
1274 <value>1000</value>
1275 <description>Indicates the number of milliseconds a client will wait for
1276 before retrying to establish a server connection.
1277 </description>
1278 </property>
1279
1280 <property>
1281 <name>ipc.client.connect.timeout</name>
1282 <value>20000</value>
1283 <description>Indicates the number of milliseconds a client will wait for the
1284 socket to establish a server connection.
1285 </description>
1286 </property>
1287
1288 <property>
1289 <name>ipc.client.connect.max.retries.on.timeouts</name>
1290 <value>45</value>
1291 <description>Indicates the number of retries a client will make on socket timeout
1292 to establish a server connection.
1293 </description>
1294 </property>
1295
1296 <property>
1297 <name>ipc.client.tcpnodelay</name>
1298 <value>true</value>
1299 <description>Use TCP_NODELAY flag to bypass Nagle's algorithm transmission delays.
1300 </description>
1301 </property>
1302
1303 <property>
1304 <name>ipc.client.low-latency</name>
1305 <value>false</value>
1306 <description>Use low-latency QoS markers for IPC connections.
1307 </description>
1308 </property>
1309
1310 <property>
1311 <name>ipc.client.ping</name>
1312 <value>true</value>
1313 <description>Send a ping to the server when timeout on reading the response,
1314 if set to true. If no failure is detected, the client retries until at least
1315 a byte is read or the time given by ipc.client.rpc-timeout.ms is passed.
1316 </description>
1317 </property>
1318
1319 <property>
1320 <name>ipc.ping.interval</name>
1321 <value>60000</value>
1322 <description>Timeout on waiting response from server, in milliseconds.
1323 The client will send ping when the interval is passed without receiving bytes,
1324 if ipc.client.ping is set to true.
1325 </description>
1326 </property>
1327
1328 <property>
1329 <name>ipc.client.rpc-timeout.ms</name>
1330 <value>0</value>
1331 <description>Timeout on waiting response from server, in milliseconds.
1332 If ipc.client.ping is set to true and this rpc-timeout is greater than
1333 the value of ipc.ping.interval, the effective value of the rpc-timeout is
1334 rounded up to multiple of ipc.ping.interval.
1335 </description>
1336 </property>
1337
1338 <property>
1339 <name>ipc.server.listen.queue.size</name>
1340 <value>128</value>
1341 <description>Indicates the length of the listen queue for servers accepting
1342 client connections.
1343 </description>
1344 </property>
1345
1346 <property>
1347 <name>ipc.server.log.slow.rpc</name>
1348 <value>false</value>
1349 <description>This setting is useful to troubleshoot performance issues for
1350 various services. If this value is set to true then we log requests that
1351 fall into 99th percentile as well as increment RpcSlowCalls counter.
1352 </description>
1353 </property>
1354
1355 <property>
1356 <name>ipc.maximum.data.length</name>
1357 <value>67108864</value>
1358 <description>This indicates the maximum IPC message length (bytes) that can be
1359 accepted by the server. Messages larger than this value are rejected by the
1360 immediately to avoid possible OOMs. This setting should rarely need to be
1361 changed.
1362 </description>
1363 </property>
1364
1365 <property>
1366 <name>ipc.maximum.response.length</name>
1367 <value>134217728</value>
1368 <description>This indicates the maximum IPC message length (bytes) that can be
1369 accepted by the client. Messages larger than this value are rejected
1370 immediately to avoid possible OOMs. This setting should rarely need to be
1371 changed. Set to 0 to disable.
1372 </description>
1373 </property>
1374
1375 <!-- Proxy Configuration -->
1376
1377 <property>
1378 <name>hadoop.security.impersonation.provider.class</name>
1379 <value></value>
1380 <description>A class which implements ImpersonationProvider interface, used to
1381 authorize whether one user can impersonate a specific user.
1382 If not specified, the DefaultImpersonationProvider will be used.
1383 If a class is specified, then that class will be used to determine
1384 the impersonation capability.
1385 </description>
1386 </property>
1387
1388 <property>
1389 <name>hadoop.rpc.socket.factory.class.default</name>
1390 <value>org.apache.hadoop.net.StandardSocketFactory</value>
1391 <description> Default SocketFactory to use. This parameter is expected to be
1392 formatted as "package.FactoryClassName".
1393 </description>
1394 </property>
1395
1396 <property>
1397 <name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
1398 <value></value>
1399 <description> SocketFactory to use to connect to a DFS. If null or empty, use
1400 hadoop.rpc.socket.class.default. This socket factory is also used by
1401 DFSClient to create sockets to DataNodes.
1402 </description>
1403 </property>
1404
1405
1406
1407 <property>
1408 <name>hadoop.socks.server</name>
1409 <value></value>
1410 <description> Address (host:port) of the SOCKS server to be used by the
1411 SocksSocketFactory.
1412 </description>
1413 </property>
1414
1415 <!-- Topology Configuration -->
1416 <property>
1417 <name>net.topology.node.switch.mapping.impl</name>
1418 <value>org.apache.hadoop.net.ScriptBasedMapping</value>
1419 <description> The default implementation of the DNSToSwitchMapping. It
1420 invokes a script specified in net.topology.script.file.name to resolve
1421 node names. If the value for net.topology.script.file.name is not set, the
1422 default value of DEFAULT_RACK is returned for all node names.
1423 </description>
1424 </property>
1425
1426 <property>
1427 <name>net.topology.impl</name>
1428 <value>org.apache.hadoop.net.NetworkTopology</value>
1429 <description> The default implementation of NetworkTopology which is classic three layer one.
1430 </description>
1431 </property>
1432
1433 <property>
1434 <name>net.topology.script.file.name</name>
1435 <value></value>
1436 <description> The script name that should be invoked to resolve DNS names to
1437 NetworkTopology names. Example: the script would take host.foo.bar as an
1438 argument, and return /rack1 as the output.
1439 </description>
1440 </property>
1441
1442 <property>
1443 <name>net.topology.script.number.args</name>
1444 <value>100</value>
1445 <description> The max number of args that the script configured with
1446 net.topology.script.file.name should be run with. Each arg is an
1447 IP address.
1448 </description>
1449 </property>
1450
1451 <property>
1452 <name>net.topology.table.file.name</name>
1453 <value></value>
1454 <description> The file name for a topology file, which is used when the
1455 net.topology.node.switch.mapping.impl property is set to
1456 org.apache.hadoop.net.TableMapping. The file format is a two column text
1457 file, with columns separated by whitespace. The first column is a DNS or
1458 IP address and the second column specifies the rack where the address maps.
1459 If no entry corresponding to a host in the cluster is found, then
1460 /default-rack is assumed.
1461 </description>
1462 </property>
1463
1464 <!-- Local file system -->
1465 <property>
1466 <name>file.stream-buffer-size</name>
1467 <value>4096</value>
1468 <description>The size of buffer to stream files.
1469 The size of this buffer should probably be a multiple of hardware
1470 page size (4096 on Intel x86), and it determines how much data is
1471 buffered during read and write operations.</description>
1472 </property>
1473
1474 <property>
1475 <name>file.bytes-per-checksum</name>
1476 <value>512</value>
1477 <description>The number of bytes per checksum. Must not be larger than
1478 file.stream-buffer-size</description>
1479 </property>
1480
1481 <property>
1482 <name>file.client-write-packet-size</name>
1483 <value>65536</value>
1484 <description>Packet size for clients to write</description>
1485 </property>
1486
1487 <property>
1488 <name>file.blocksize</name>
1489 <value>67108864</value>
1490 <description>Block size</description>
1491 </property>
1492
1493 <property>
1494 <name>file.replication</name>
1495 <value>1</value>
1496 <description>Replication factor</description>
1497 </property>
1498
1499 <!-- s3 File System -->
1500
1501 <property>
1502 <name>s3.stream-buffer-size</name>
1503 <value>4096</value>
1504 <description>The size of buffer to stream files.
1505 The size of this buffer should probably be a multiple of hardware
1506 page size (4096 on Intel x86), and it determines how much data is
1507 buffered during read and write operations.</description>
1508 </property>
1509
1510 <property>
1511 <name>s3.bytes-per-checksum</name>
1512 <value>512</value>
1513 <description>The number of bytes per checksum. Must not be larger than
1514 s3.stream-buffer-size</description>
1515 </property>
1516
1517 <property>
1518 <name>s3.client-write-packet-size</name>
1519 <value>65536</value>
1520 <description>Packet size for clients to write</description>
1521 </property>
1522
1523 <property>
1524 <name>s3.blocksize</name>
1525 <value>67108864</value>
1526 <description>Block size</description>
1527 </property>
1528
1529 <property>
1530 <name>s3.replication</name>
1531 <value>3</value>
1532 <description>Replication factor</description>
1533 </property>
1534
1535 <!-- s3native File System -->
1536
1537 <property>
1538 <name>s3native.stream-buffer-size</name>
1539 <value>4096</value>
1540 <description>The size of buffer to stream files.
1541 The size of this buffer should probably be a multiple of hardware
1542 page size (4096 on Intel x86), and it determines how much data is
1543 buffered during read and write operations.</description>
1544 </property>
1545
1546 <property>
1547 <name>s3native.bytes-per-checksum</name>
1548 <value>512</value>
1549 <description>The number of bytes per checksum. Must not be larger than
1550 s3native.stream-buffer-size</description>
1551 </property>
1552
1553 <property>
1554 <name>s3native.client-write-packet-size</name>
1555 <value>65536</value>
1556 <description>Packet size for clients to write</description>
1557 </property>
1558
1559 <property>
1560 <name>s3native.blocksize</name>
1561 <value>67108864</value>
1562 <description>Block size</description>
1563 </property>
1564
1565 <property>
1566 <name>s3native.replication</name>
1567 <value>3</value>
1568 <description>Replication factor</description>
1569 </property>
1570
1571 <!-- FTP file system -->
1572 <property>
1573 <name>ftp.stream-buffer-size</name>
1574 <value>4096</value>
1575 <description>The size of buffer to stream files.
1576 The size of this buffer should probably be a multiple of hardware
1577 page size (4096 on Intel x86), and it determines how much data is
1578 buffered during read and write operations.</description>
1579 </property>
1580
1581 <property>
1582 <name>ftp.bytes-per-checksum</name>
1583 <value>512</value>
1584 <description>The number of bytes per checksum. Must not be larger than
1585 ftp.stream-buffer-size</description>
1586 </property>
1587
1588 <property>
1589 <name>ftp.client-write-packet-size</name>
1590 <value>65536</value>
1591 <description>Packet size for clients to write</description>
1592 </property>
1593
1594 <property>
1595 <name>ftp.blocksize</name>
1596 <value>67108864</value>
1597 <description>Block size</description>
1598 </property>
1599
1600 <property>
1601 <name>ftp.replication</name>
1602 <value>3</value>
1603 <description>Replication factor</description>
1604 </property>
1605
1606 <!-- Tfile -->
1607
1608 <property>
1609 <name>tfile.io.chunk.size</name>
1610 <value>1048576</value>
1611 <description>
1612 Value chunk size in bytes. Default to
1613 1MB. Values of the length less than the chunk size is
1614 guaranteed to have known value length in read time (See also
1615 TFile.Reader.Scanner.Entry.isValueLengthKnown()).
1616 </description>
1617 </property>
1618
1619 <property>
1620 <name>tfile.fs.output.buffer.size</name>
1621 <value>262144</value>
1622 <description>
1623 Buffer size used for FSDataOutputStream in bytes.
1624 </description>
1625 </property>
1626
1627 <property>
1628 <name>tfile.fs.input.buffer.size</name>
1629 <value>262144</value>
1630 <description>
1631 Buffer size used for FSDataInputStream in bytes.
1632 </description>
1633 </property>
1634
1635 <!-- HTTP web-consoles Authentication -->
1636
1637 <property>
1638 <name>hadoop.http.authentication.type</name>
1639 <value>simple</value>
1640 <description>
1641 Defines authentication used for Oozie HTTP endpoint.
1642 Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
1643 </description>
1644 </property>
1645
1646 <property>
1647 <name>hadoop.http.authentication.token.validity</name>
1648 <value>36000</value>
1649 <description>
1650 Indicates how long (in seconds) an authentication token is valid before it has
1651 to be renewed.
1652 </description>
1653 </property>
1654
1655 <property>
1656 <name>hadoop.http.authentication.signature.secret.file</name>
1657 <value>${user.home}/hadoop-http-auth-signature-secret</value>
1658 <description>
1659 The signature secret for signing the authentication tokens.
1660 The same secret should be used for JT/NN/DN/TT configurations.
1661 </description>
1662 </property>
1663
1664 <property>
1665 <name>hadoop.http.authentication.cookie.domain</name>
1666 <value></value>
1667 <description>
1668 The domain to use for the HTTP cookie that stores the authentication token.
1669 In order to authentiation to work correctly across all Hadoop nodes web-consoles
1670 the domain must be correctly set.
1671 IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings.
1672 For this setting to work properly all nodes in the cluster must be configured
1673 to generate URLs with hostname.domain names on it.
1674 </description>
1675 </property>
1676
1677 <property>
1678 <name>hadoop.http.authentication.simple.anonymous.allowed</name>
1679 <value>true</value>
1680 <description>
1681 Indicates if anonymous requests are allowed when using 'simple' authentication.
1682 </description>
1683 </property>
1684
1685 <property>
1686 <name>hadoop.http.authentication.kerberos.principal</name>
1687 <value>HTTP/_HOST@LOCALHOST</value>
1688 <description>
1689 Indicates the Kerberos principal to be used for HTTP endpoint.
1690 The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
1691 </description>
1692 </property>
1693
1694 <property>
1695 <name>hadoop.http.authentication.kerberos.keytab</name>
1696 <value>${user.home}/hadoop.keytab</value>
1697 <description>
1698 Location of the keytab file with the credentials for the principal.
1699 Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
1700 </description>
1701 </property>
1702
1703 <!-- HTTP CORS support -->
1704 <property>
1705 <description>Enable/disable the cross-origin (CORS) filter.</description>
1706 <name>hadoop.http.cross-origin.enabled</name>
1707 <value>false</value>
1708 </property>
1709
1710 <property>
1711 <description>Comma separated list of origins that are allowed for web
1712 services needing cross-origin (CORS) support. Wildcards (*) and patterns
1713 allowed</description>
1714 <name>hadoop.http.cross-origin.allowed-origins</name>
1715 <value>*</value>
1716 </property>
1717
1718 <property>
1719 <description>Comma separated list of methods that are allowed for web
1720 services needing cross-origin (CORS) support.</description>
1721 <name>hadoop.http.cross-origin.allowed-methods</name>
1722 <value>GET,POST,HEAD</value>
1723 </property>
1724
1725 <property>
1726 <description>Comma separated list of headers that are allowed for web
1727 services needing cross-origin (CORS) support.</description>
1728 <name>hadoop.http.cross-origin.allowed-headers</name>
1729 <value>X-Requested-With,Content-Type,Accept,Origin</value>
1730 </property>
1731
1732 <property>
1733 <description>The number of seconds a pre-flighted request can be cached
1734 for web services needing cross-origin (CORS) support.</description>
1735 <name>hadoop.http.cross-origin.max-age</name>
1736 <value>1800</value>
1737 </property>
1738
1739 <property>
1740 <name>dfs.ha.fencing.methods</name>
1741 <value></value>
1742 <description>
1743 List of fencing methods to use for service fencing. May contain
1744 builtin methods (eg shell and sshfence) or user-defined method.
1745 </description>
1746 </property>
1747
1748 <property>
1749 <name>dfs.ha.fencing.ssh.connect-timeout</name>
1750 <value>30000</value>
1751 <description>
1752 SSH connection timeout, in milliseconds, to use with the builtin
1753 sshfence fencer.
1754 </description>
1755 </property>
1756
1757 <property>
1758 <name>dfs.ha.fencing.ssh.private-key-files</name>
1759 <value></value>
1760 <description>
1761 The SSH private key files to use with the builtin sshfence fencer.
1762 </description>
1763 </property>
1764
1765
1766 <!-- Static Web User Filter properties. -->
1767 <property>
1768 <description>
1769 The user name to filter as, on static web filters
1770 while rendering content. An example use is the HDFS
1771 web UI (user to be used for browsing files).
1772 </description>
1773 <name>hadoop.http.staticuser.user</name>
1774 <value>dr.who</value>
1775 </property>
1776
1777 <property>
1778 <name>ha.zookeeper.quorum</name>
1779 <description>
1780 A list of ZooKeeper server addresses, separated by commas, that are
1781 to be used by the ZKFailoverController in automatic failover.
1782 </description>
1783 </property>
1784
1785 <property>
1786 <name>ha.zookeeper.session-timeout.ms</name>
1787 <value>5000</value>
1788 <description>
1789 The session timeout to use when the ZKFC connects to ZooKeeper.
1790 Setting this value to a lower value implies that server crashes
1791 will be detected more quickly, but risks triggering failover too
1792 aggressively in the case of a transient error or network blip.
1793 </description>
1794 </property>
1795
1796 <property>
1797 <name>ha.zookeeper.parent-znode</name>
1798 <value>/hadoop-ha</value>
1799 <description>
1800 The ZooKeeper znode under which the ZK failover controller stores
1801 its information. Note that the nameservice ID is automatically
1802 appended to this znode, so it is not normally necessary to
1803 configure this, even in a federated environment.
1804 </description>
1805 </property>
1806
1807 <property>
1808 <name>ha.zookeeper.acl</name>
1809 <value>world:anyone:rwcda</value>
1810 <description>
1811 A comma-separated list of ZooKeeper ACLs to apply to the znodes
1812 used by automatic failover. These ACLs are specified in the same
1813 format as used by the ZooKeeper CLI.
1814
1815 If the ACL itself contains secrets, you may instead specify a
1816 path to a file, prefixed with the '@' symbol, and the value of
1817 this configuration will be loaded from within.
1818 </description>
1819 </property>
1820
1821 <property>
1822 <name>ha.zookeeper.auth</name>
1823 <value></value>
1824 <description>
1825 A comma-separated list of ZooKeeper authentications to add when
1826 connecting to ZooKeeper. These are specified in the same format
1827 as used by the &quot;addauth&quot; command in the ZK CLI. It is
1828 important that the authentications specified here are sufficient
1829 to access znodes with the ACL specified in ha.zookeeper.acl.
1830
1831 If the auths contain secrets, you may instead specify a
1832 path to a file, prefixed with the '@' symbol, and the value of
1833 this configuration will be loaded from within.
1834 </description>
1835 </property>
1836
1837 <!-- SSLFactory configuration -->
1838
1839 <property>
1840 <name>hadoop.ssl.keystores.factory.class</name>
1841 <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
1842 <description>
1843 The keystores factory to use for retrieving certificates.
1844 </description>
1845 </property>
1846
1847 <property>
1848 <name>hadoop.ssl.require.client.cert</name>
1849 <value>false</value>
1850 <description>Whether client certificates are required</description>
1851 </property>
1852
1853 <property>
1854 <name>hadoop.ssl.hostname.verifier</name>
1855 <value>DEFAULT</value>
1856 <description>
1857 The hostname verifier to provide for HttpsURLConnections.
1858 Valid values are: DEFAULT, STRICT, STRICT_IE6, DEFAULT_AND_LOCALHOST and
1859 ALLOW_ALL
1860 </description>
1861 </property>
1862
1863 <property>
1864 <name>hadoop.ssl.server.conf</name>
1865 <value>ssl-server.xml</value>
1866 <description>
1867 Resource file from which ssl server keystore information will be extracted.
1868 This file is looked up in the classpath, typically it should be in Hadoop
1869 conf/ directory.
1870 </description>
1871 </property>
1872
1873 <property>
1874 <name>hadoop.ssl.client.conf</name>
1875 <value>ssl-client.xml</value>
1876 <description>
1877 Resource file from which ssl client keystore information will be extracted
1878 This file is looked up in the classpath, typically it should be in Hadoop
1879 conf/ directory.
1880 </description>
1881 </property>
1882
1883 <property>
1884 <name>hadoop.ssl.enabled</name>
1885 <value>false</value>
1886 <description>
1887 Deprecated. Use dfs.http.policy and yarn.http.policy instead.
1888 </description>
1889 </property>
1890
1891 <property>
1892 <name>hadoop.ssl.enabled.protocols</name>
1893 <value>TLSv1,SSLv2Hello,TLSv1.1,TLSv1.2</value>
1894 <description>
1895 The supported SSL protocols.
1896 </description>
1897 </property>
1898
1899 <property>
1900 <name>hadoop.jetty.logs.serve.aliases</name>
1901 <value>true</value>
1902 <description>
1903 Enable/Disable aliases serving from jetty
1904 </description>
1905 </property>
1906
1907 <property>
1908 <name>fs.permissions.umask-mode</name>
1909 <value>022</value>
1910 <description>
1911 The umask used when creating files and directories.
1912 Can be in octal or in symbolic. Examples are:
1913 "022" (octal for u=rwx,g=r-x,o=r-x in symbolic),
1914 or "u=rwx,g=rwx,o=" (symbolic for 007 in octal).
1915 </description>
1916 </property>
1917
1918 <!-- ha properties -->
1919
1920 <property>
1921 <name>ha.health-monitor.connect-retry-interval.ms</name>
1922 <value>1000</value>
1923 <description>
1924 How often to retry connecting to the service.
1925 </description>
1926 </property>
1927
1928 <property>
1929 <name>ha.health-monitor.check-interval.ms</name>
1930 <value>1000</value>
1931 <description>
1932 How often to check the service.
1933 </description>
1934 </property>
1935
1936 <property>
1937 <name>ha.health-monitor.sleep-after-disconnect.ms</name>
1938 <value>1000</value>
1939 <description>
1940 How long to sleep after an unexpected RPC error.
1941 </description>
1942 </property>
1943
1944 <property>
1945 <name>ha.health-monitor.rpc-timeout.ms</name>
1946 <value>45000</value>
1947 <description>
1948 Timeout for the actual monitorHealth() calls.
1949 </description>
1950 </property>
1951
1952 <property>
1953 <name>ha.failover-controller.new-active.rpc-timeout.ms</name>
1954 <value>60000</value>
1955 <description>
1956 Timeout that the FC waits for the new active to become active
1957 </description>
1958 </property>
1959
1960 <property>
1961 <name>ha.failover-controller.graceful-fence.rpc-timeout.ms</name>
1962 <value>5000</value>
1963 <description>
1964 Timeout that the FC waits for the old active to go to standby
1965 </description>
1966 </property>
1967
1968 <property>
1969 <name>ha.failover-controller.graceful-fence.connection.retries</name>
1970 <value>1</value>
1971 <description>
1972 FC connection retries for graceful fencing
1973 </description>
1974 </property>
1975
1976 <property>
1977 <name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
1978 <value>20000</value>
1979 <description>
1980 Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState
1981 </description>
1982 </property>
1983
1984 <property>
1985 <name>ipc.client.fallback-to-simple-auth-allowed</name>
1986 <value>false</value>
1987 <description>
1988 When a client is configured to attempt a secure connection, but attempts to
1989 connect to an insecure server, that server may instruct the client to
1990 switch to SASL SIMPLE (unsecure) authentication. This setting controls
1991 whether or not the client will accept this instruction from the server.
1992 When false (the default), the client will not allow the fallback to SIMPLE
1993 authentication, and will abort the connection.
1994 </description>
1995 </property>
1996
1997 <property>
1998 <name>fs.client.resolve.remote.symlinks</name>
1999 <value>true</value>
2000 <description>
2001 Whether to resolve symlinks when accessing a remote Hadoop filesystem.
2002 Setting this to false causes an exception to be thrown upon encountering
2003 a symlink. This setting does not apply to local filesystems, which
2004 automatically resolve local symlinks.
2005 </description>
2006 </property>
2007
2008 <property>
2009 <name>nfs.exports.allowed.hosts</name>
2010 <value>* rw</value>
2011 <description>
2012 By default, the export can be mounted by any client. The value string
2013 contains machine name and access privilege, separated by whitespace
2014 characters. The machine name format can be a single host, a Java regular
2015 expression, or an IPv4 address. The access privilege uses rw or ro to
2016 specify read/write or read-only access of the machines to exports. If the
2017 access privilege is not provided, the default is read-only. Entries are separated by ";".
2018 For example: "192.168.0.0/22 rw ; host.*\.example\.com ; host1.test.org ro;".
2019 Only the NFS gateway needs to restart after this property is updated.
2020 </description>
2021 </property>
2022
2023 <property>
2024 <name>hadoop.user.group.static.mapping.overrides</name>
2025 <value>dr.who=;</value>
2026 <description>
2027 Static mapping of user to groups. This will override the groups if
2028 available in the system for the specified user. In other words, groups
2029 look-up will not happen for these users, instead groups mapped in this
2030 configuration will be used.
2031 Mapping should be in this format.
2032 user1=group1,group2;user2=;user3=group2;
2033 Default, "dr.who=;" will consider "dr.who" as user without groups.
2034 </description>
2035 </property>
2036
2037 <property>
2038 <name>rpc.metrics.quantile.enable</name>
2039 <value>false</value>
2040 <description>
2041 Setting this property to true and rpc.metrics.percentiles.intervals
2042 to a comma-separated list of the granularity in seconds, the
2043 50/75/90/95/99th percentile latency for rpc queue/processing time in
2044 milliseconds are added to rpc metrics.
2045 </description>
2046 </property>
2047
2048 <property>
2049 <name>rpc.metrics.percentiles.intervals</name>
2050 <value></value>
2051 <description>
2052 A comma-separated list of the granularity in seconds for the metrics which
2053 describe the 50/75/90/95/99th percentile latency for rpc queue/processing
2054 time. The metrics are outputted if rpc.metrics.quantile.enable is set to
2055 true.
2056 </description>
2057 </property>
2058
2059 <property>
2060 <name>hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE</name>
2061 <value></value>
2062 <description>
2063 The prefix for a given crypto codec, contains a comma-separated
2064 list of implementation classes for a given crypto codec (eg EXAMPLECIPHERSUITE).
2065 The first implementation will be used if available, others are fallbacks.
2066 </description>
2067 </property>
2068
2069 <property>
2070 <name>hadoop.security.crypto.codec.classes.aes.ctr.nopadding</name>
2071 <value>org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, org.apache.hadoop.crypto.JceAesCtrCryptoCodec</value>
2072 <description>
2073 Comma-separated list of crypto codec implementations for AES/CTR/NoPadding.
2074 The first implementation will be used if available, others are fallbacks.
2075 </description>
2076 </property>
2077
2078 <property>
2079 <name>hadoop.security.crypto.cipher.suite</name>
2080 <value>AES/CTR/NoPadding</value>
2081 <description>
2082 Cipher suite for crypto codec.
2083 </description>
2084 </property>
2085
2086 <property>
2087 <name>hadoop.security.crypto.jce.provider</name>
2088 <value></value>
2089 <description>
2090 The JCE provider name used in CryptoCodec.
2091 </description>
2092 </property>
2093
2094 <property>
2095 <name>hadoop.security.crypto.buffer.size</name>
2096 <value>8192</value>
2097 <description>
2098 The buffer size used by CryptoInputStream and CryptoOutputStream.
2099 </description>
2100 </property>
2101
2102 <property>
2103 <name>hadoop.security.java.secure.random.algorithm</name>
2104 <value>SHA1PRNG</value>
2105 <description>
2106 The java secure random algorithm.
2107 </description>
2108 </property>
2109
2110 <property>
2111 <name>hadoop.security.secure.random.impl</name>
2112 <value></value>
2113 <description>
2114 Implementation of secure random.
2115 </description>
2116 </property>
2117
2118 <property>
2119 <name>hadoop.security.random.device.file.path</name>
2120 <value>/dev/urandom</value>
2121 <description>
2122 OS security random device file path.
2123 </description>
2124 </property>
2125
2126 <property>
2127 <name>hadoop.security.key.provider.path</name>
2128 <description>
2129 The KeyProvider to use when managing zone keys, and interacting with
2130 encryption keys when reading and writing to an encryption zone.
2131 </description>
2132 </property>
2133
2134 <property>
2135 <name>fs.har.impl.disable.cache</name>
2136 <value>true</value>
2137 <description>Don't cache 'har' filesystem instances.</description>
2138 </property>
2139
2140 <!--- KMSClientProvider configurations -->
2141 <property>
2142 <name>hadoop.security.kms.client.authentication.retry-count</name>
2143 <value>1</value>
2144 <description>
2145 Number of time to retry connecting to KMS on authentication failure
2146 </description>
2147 </property>
2148 <property>
2149 <name>hadoop.security.kms.client.encrypted.key.cache.size</name>
2150 <value>500</value>
2151 <description>
2152 Size of the EncryptedKeyVersion cache Queue for each key
2153 </description>
2154 </property>
2155 <property>
2156 <name>hadoop.security.kms.client.encrypted.key.cache.low-watermark</name>
2157 <value>0.3f</value>
2158 <description>
2159 If size of the EncryptedKeyVersion cache Queue falls below the
2160 low watermark, this cache queue will be scheduled for a refill
2161 </description>
2162 </property>
2163 <property>
2164 <name>hadoop.security.kms.client.encrypted.key.cache.num.refill.threads</name>
2165 <value>2</value>
2166 <description>
2167 Number of threads to use for refilling depleted EncryptedKeyVersion
2168 cache Queues
2169 </description>
2170 </property>
2171 <property>
2172 <name>hadoop.security.kms.client.encrypted.key.cache.expiry</name>
2173 <value>43200000</value>
2174 <description>
2175 Cache expiry time for a Key, after which the cache Queue for this
2176 key will be dropped. Default = 12hrs
2177 </description>
2178 </property>
2179
2180 <property>
2181 <name>ipc.server.max.connections</name>
2182 <value>0</value>
2183 <description>The maximum number of concurrent connections a server is allowed
2184 to accept. If this limit is exceeded, incoming connections will first fill
2185 the listen queue and then may go to an OS-specific listen overflow queue.
2186 The client may fail or timeout, but the server can avoid running out of file
2187 descriptors using this feature. 0 means no limit.
2188 </description>
2189 </property>
2190
2191
2192 <!-- YARN registry -->
2193
2194 <property>
2195 <description>
2196 Is the registry enabled in the YARN Resource Manager?
2197
2198 If true, the YARN RM will, as needed.
2199 create the user and system paths, and purge
2200 service records when containers, application attempts
2201 and applications complete.
2202
2203 If false, the paths must be created by other means,
2204 and no automatic cleanup of service records will take place.
2205 </description>
2206 <name>hadoop.registry.rm.enabled</name>
2207 <value>false</value>
2208 </property>
2209
2210 <property>
2211 <description>
2212 The root zookeeper node for the registry
2213 </description>
2214 <name>hadoop.registry.zk.root</name>
2215 <value>/registry</value>
2216 </property>
2217
2218 <property>
2219 <description>
2220 Zookeeper session timeout in milliseconds
2221 </description>
2222 <name>hadoop.registry.zk.session.timeout.ms</name>
2223 <value>60000</value>
2224 </property>
2225
2226 <property>
2227 <description>
2228 Zookeeper connection timeout in milliseconds
2229 </description>
2230 <name>hadoop.registry.zk.connection.timeout.ms</name>
2231 <value>15000</value>
2232 </property>
2233
2234 <property>
2235 <description>
2236 Zookeeper connection retry count before failing
2237 </description>
2238 <name>hadoop.registry.zk.retry.times</name>
2239 <value>5</value>
2240 </property>
2241
2242 <property>
2243 <description>
2244 </description>
2245 <name>hadoop.registry.zk.retry.interval.ms</name>
2246 <value>1000</value>
2247 </property>
2248
2249 <property>
2250 <description>
2251 Zookeeper retry limit in milliseconds, during
2252 exponential backoff.
2253
2254 This places a limit even
2255 if the retry times and interval limit, combined
2256 with the backoff policy, result in a long retry
2257 period
2258 </description>
2259 <name>hadoop.registry.zk.retry.ceiling.ms</name>
2260 <value>60000</value>
2261 </property>
2262
2263 <property>
2264 <description>
2265 List of hostname:port pairs defining the
2266 zookeeper quorum binding for the registry
2267 </description>
2268 <name>hadoop.registry.zk.quorum</name>
2269 <value>localhost:2181</value>
2270 </property>
2271
2272 <property>
2273 <description>
2274 Key to set if the registry is secure. Turning it on
2275 changes the permissions policy from "open access"
2276 to restrictions on kerberos with the option of
2277 a user adding one or more auth key pairs down their
2278 own tree.
2279 </description>
2280 <name>hadoop.registry.secure</name>
2281 <value>false</value>
2282 </property>
2283
2284 <property>
2285 <description>
2286 A comma separated list of Zookeeper ACL identifiers with
2287 system access to the registry in a secure cluster.
2288
2289 These are given full access to all entries.
2290
2291 If there is an "@" at the end of a SASL entry it
2292 instructs the registry client to append the default kerberos domain.
2293 </description>
2294 <name>hadoop.registry.system.acls</name>
2295 <value>sasl:yarn@, sasl:mapred@, sasl:hdfs@</value>
2296 </property>
2297
2298 <property>
2299 <description>
2300 The kerberos realm: used to set the realm of
2301 system principals which do not declare their realm,
2302 and any other accounts that need the value.
2303
2304 If empty, the default realm of the running process
2305 is used.
2306
2307 If neither are known and the realm is needed, then the registry
2308 service/client will fail.
2309 </description>
2310 <name>hadoop.registry.kerberos.realm</name>
2311 <value></value>
2312 </property>
2313
2314 <property>
2315 <description>
2316 Key to define the JAAS context. Used in secure
2317 mode
2318 </description>
2319 <name>hadoop.registry.jaas.context</name>
2320 <value>Client</value>
2321 </property>
2322
2323 <property>
2324 <description>
2325 Enable hdfs shell commands to display warnings if (fs.defaultFS) property
2326 is not set.
2327 </description>
2328 <name>hadoop.shell.missing.defaultFs.warning</name>
2329 <value>false</value>
2330 </property>
2331
2332 <property>
2333 <name>hadoop.shell.safely.delete.limit.num.files</name>
2334 <value>100</value>
2335 <description>Used by -safely option of hadoop fs shell -rm command to avoid
2336 accidental deletion of large directories. When enabled, the -rm command
2337 requires confirmation if the number of files to be deleted is greater than
2338 this limit. The default limit is 100 files. The warning is disabled if
2339 the limit is 0 or the -safely is not specified in -rm command.
2340 </description>
2341 </property>
2342
2343 <property>
2344 <name>fs.client.htrace.sampler.classes</name>
2345 <value></value>
2346 <description>The class names of the HTrace Samplers to use for Hadoop
2347 filesystem clients.
2348 </description>
2349 </property>
2350
2351 <property>
2352 <name>hadoop.htrace.span.receiver.classes</name>
2353 <value></value>
2354 <description>The class names of the Span Receivers to use for Hadoop.
2355 </description>
2356 </property>
2357
2358 <property>
2359 <description>
2360 Enable the "/logs" endpoint on all Hadoop daemons, which serves local
2361 logs, but may be considered a security risk due to it listing the contents
2362 of a directory.
2363 </description>
2364 <name>hadoop.http.logs.enabled</name>
2365 <value>true</value>
2366 </property>
2367
2368 <property>
2369 <name>fs.client.resolve.topology.enabled</name>
2370 <value>false</value>
2371 <description>Whether the client machine will use the class specified by
2372 property net.topology.node.switch.mapping.impl to compute the network
2373 distance between itself and remote machines of the FileSystem. Additional
2374 properties might need to be configured depending on the class specified
2375 in net.topology.node.switch.mapping.impl. For example, if
2376 org.apache.hadoop.net.ScriptBasedMapping is used, a valid script file
2377 needs to be specified in net.topology.script.file.name.
2378 </description>
2379 </property>
2380
2381 <property>
2382 <name>hadoop.caller.context.enabled</name>
2383 <value>false</value>
2384 <description>When the feature is enabled, additional fields are written into
2385 name-node audit log records for auditing coarse granularity operations.
2386 </description>
2387 </property>
2388 <property>
2389 <name>hadoop.caller.context.max.size</name>
2390 <value>128</value>
2391 <description>The maximum bytes a caller context string can have. If the
2392 passed caller context is longer than this maximum bytes, client will
2393 truncate it before sending to server. Note that the server may have a
2394 different maximum size, and will truncate the caller context to the
2395 maximum size it allows.
2396 </description>
2397 </property>
2398 <property>
2399 <name>hadoop.caller.context.signature.max.size</name>
2400 <value>40</value>
2401 <description>
2402 The caller's signature (optional) is for offline validation. If the
2403 signature exceeds the maximum allowed bytes in server, the caller context
2404 will be abandoned, in which case the caller context will not be recorded
2405 in audit logs.
2406 </description>
2407 </property>
2408
2409 </configuration>