DC/OS Software Development Kit Status: Alpha

HDFS: Connecting Clients

Connecting Clients

Applications interface with HDFS like they would any POSIX file system. However, applications that will act as client nodes of the HDFS deployment require an hdfs-site.xml and core-site.xml file that provides the configuration information necessary to communicate with the cluster.

Discovering Endpoints

Connection Info Using the DC/OS CLI

Executed the following command from the DC/OS CLI to retrieve the hdfs-site.xml file that client applications can use to connect to the cluster.

$ dcos beta-hdfs --name=<service-name> endpoints hdfs-site.xml
...
$ dcos beta-hdfs --name=<service-name> endpoints core-site.xml
...

Connection Info Response

The responses are as below.

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
    <property>
        <name>hadoop.hdfs.configuration.version</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.nameservice.id</name>
        <value>hdfs</value>
    </property>
    <property>
        <name>dfs.nameservices</name>
        <value>hdfs</value>
    </property>
    <property>
        <name>dfs.ha.namenodes.hdfs</name>
        <value>name-0-node,name-1-node</value>
    </property>
    <property>
        <name>dfs.cluster.administrators</name>
        <value>core,centos,azureuser</value>
    </property>
    <!-- namenode -->
    <property>
        <name>dfs.namenode.logging.level</name>
        <value>info</value>
    </property>
    <property>
        <name>dfs.namenode.shared.edits.dir</name>
        <value>qjournal://journal-0-node.hdfs.autoip.dcos.thisdcos.directory:8485;journal-1-node.hdfs.autoip.dcos.thisdcos.directory:8485;journal-2-node.hdfs.autoip.dcos.thisdcos.directory:8485/hdfs</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>sandboxpath/name-data</value>
    </property>
    <property>
        <name>dfs.namenode.safemode.threshold-pct</name>
        <value>0.999f</value>
    </property>
    <property>
        <name>dfs.namenode.heartbeat.recheck-interval</name>
        <value>60000</value>
    </property>
    <property>
        <name>dfs.namenode.handler.count</name>
        <value>10</value>
    </property>
    <property>
        <name>dfs.namenode.invalidate.work.pct.per.iteration</name>
        <value>0.32f</value>
    </property>
    <property>
        <name>dfs.namenode.replication.min</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.replication.work.multiplier.per.iteration</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir.restore</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.replication.considerLoad</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.namenode.fs-limits.max-component-length</name>
        <value>255</value>
    </property>
    <property>
        <name>dfs.namenode.fs-limits.max-directory-items</name>
        <value>1048576</value>
    </property>
    <property>
        <name>dfs.namenode.fs-limits.min-block-size</name>
        <value>1048576</value>
    </property>
    <property>
        <name>dfs.namenode.fs-limits.max-blocks-per-file</name>
        <value>1048576</value>
    </property>
    <property>
        <name>dfs.namenode.edits.dir</name>
        <value>sandboxpath/name-data</value>
    </property>
    <property>
        <name>dfs.namenode.acls.enabled</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.lazypersist.file.scrub.interval.sec</name>
        <value>300</value>
    </property>
    <property>
        <name>dfs.namenode.safemode.min.datanodes</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.namenode.safemode.extension</name>
        <value>30000</value>
    </property>
    <property>
        <name>dfs.namenode.resource.check.interval</name>
        <value>5000</value>
    </property>
    <property>
        <name>dfs.namenode.resource.du.reserved</name>
        <value>104857600</value>
    </property>
    <property>
        <name>dfs.namenode.resource.checked.volumes</name>
        <value></value>
    </property>
    <property>
        <name>dfs.namenode.resource.checked.volumes.minimum</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.max.objects</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.namenode.decommission.interval</name>
        <value>30</value>
    </property>
    <property>
        <name>dfs.namenode.decommission.blocks.per.interval</name>
        <value>500000</value>
    </property>
    <property>
        <name>dfs.namenode.replication.interval</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.namenode.accesstime.precision</name>
        <value>3600000</value>
    </property>
    <property>
        <name>dfs.namenode.plugins</name>
        <value></value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.dir</name>
        <value>file://name-data/namesecondary</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.edits.dir</name>
        <value>file://name-data/namesecondary</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.period</name>
        <value>3600</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.txns</name>
        <value>1000000</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.check.period</name>
        <value>60</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.max-retries</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.namenode.num.checkpoints.retained</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.num.extra.edits.retained</name>
        <value>1000000</value>
    </property>
    <property>
        <name>dfs.namenode.max.extra.edits.segments.retained</name>
        <value>10000</value>
    </property>
    <property>
        <name>dfs.namenode.delegation.key.update-interval</name>
        <value>86400000</value>
    </property>
    <property>
        <name>dfs.namenode.delegation.token.max-lifetime</name>
        <value>604800000</value>
    </property>
    <property>
        <name>dfs.namenode.support.allow.format</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.namenode.avoid.read.stale.datanode</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.avoid.write.stale.datanode</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.stale.datanode.interval</name>
        <value>30000</value>
    </property>
    <property>
        <name>dfs.namenode.write.stale.datanode.ratio</name>
        <value>0.5f</value>
    </property>
    <property>
        <name>dfs.namenode.audit.loggers</name>
        <value>default</value>
    </property>
    <property>
        <name>dfs.namenode.edits.noeditlogchannelflush</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.enable.retrycache</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.namenode.retrycache.expirytime.millis</name>
        <value>600000</value>
    </property>
    <property>
        <name>dfs.namenode.retrycache.heap.percent</name>
        <value>0.03f</value>
    </property>
    <property>
        <name>dfs.namenode.path.based.cache.block.map.allocation.percent</name>
        <value>0.25</value>
    </property>
    <property>
        <name>dfs.namenode.list.cache.directives.num.responses</name>
        <value>100</value>
    </property>
    <property>
        <name>dfs.namenode.list.cache.pools.num.responses</name>
        <value>100</value>
    </property>
    <property>
        <name>dfs.namenode.path.based.cache.refresh.interval.ms</name>
        <value>30000</value>
    </property>
    <property>
        <name>dfs.namenode.path.based.cache.retry.interval.ms</name>
        <value>30000</value>
    </property>
    <property>
        <name>dfs.namenode.edit.log.autoroll.multiplier.threshold</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.edit.log.autoroll.check.interval.ms</name>
        <value>300000</value>
    </property>
    <property>
        <name>dfs.namenode.reject-unresolved-dn-topology-mapping</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.namenode.xattrs.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.namenode.fs-limits.max-xattrs-per-inode</name>
        <value>32</value>
    </property>
    <property>
        <name>dfs.namenode.fs-limits.max-xattr-size</name>
        <value>16384</value>
    </property>
    <property>
        <name>dfs.namenode.startup.delay.block.deletion.sec</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.namenode.list.encryption.zones.num.responses</name>
        <value>100</value>
    </property>
    <property>
        <name>dfs.namenode.inotify.max.events.per.rpc</name>
        <value>1000</value>
    </property>
    <property>
        <name>dfs.namenode.legacy-oiv-image.dir</name>
        <value></value>
    </property>

    <!-- name-0-node -->
    <property>
        <name>dfs.namenode.rpc-address.hdfs.name-0-node</name>
        <value>name-0-node.hdfs.autoip.dcos.thisdcos.directory:9001</value>
    </property>
    <property>
        <name>dfs.namenode.rpc-bind-host.hdfs.name-0-node</name>
        <value>0.0.0.0</value>
    </property>
    <property>
        <name>dfs.namenode.http-address.hdfs.name-0-node</name>
        <value>name-0-node.hdfs.autoip.dcos.thisdcos.directory:9002</value>
    </property>
    <property>
        <name>dfs.namenode.http-bind-host.hdfs.name-0-node</name>
        <value>0.0.0.0</value>
    </property>


    <!-- name-1-node -->
    <property>
        <name>dfs.namenode.rpc-address.hdfs.name-1-node</name>
        <value>name-1-node.hdfs.autoip.dcos.thisdcos.directory:9001</value>
    </property>
    <property>
        <name>dfs.namenode.rpc-bind-host.hdfs.name-1-node</name>
        <value>0.0.0.0</value>
    </property>
    <property>
        <name>dfs.namenode.http-address.hdfs.name-1-node</name>
        <value>name-1-node.hdfs.autoip.dcos.thisdcos.directory:9002</value>
    </property>
    <property>
        <name>dfs.namenode.http-bind-host.hdfs.name-1-node</name>
        <value>0.0.0.0</value>
    </property>

    <property>
        <name>dfs.ha.zkfc.port</name>
        <value>8019</value>
    </property>

    <!-- journalnode -->
    <property>
        <name>dfs.journalnode.rpc-address</name>
        <value>0.0.0.0:8485</value>
    </property>
    <property>
        <name>dfs.journalnode.http-address</name>
        <value>0.0.0.0:8480</value>
    </property>
    <property>
        <name>dfs.journalnode.edits.dir</name>
        <value>sandboxpath/journal-data</value>
    </property>

    <!-- datanode -->
    <property>
        <name>dfs.datanode.address</name>
        <value>0.0.0.0:9003</value>
    </property>
    <property>
        <name>dfs.datanode.http.address</name>
        <value>0.0.0.0:9004</value>
    </property>
    <property>
        <name>dfs.datanode.ipc.address</name>
        <value>0.0.0.0:9005</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>sandboxpath/data-data</value>
    </property>
    <property>
        <name>dfs.datanode.balance.bandwidthPerSec</name>
        <value>41943040</value>
    </property>
    <property>
        <name>dfs.datanode.handler.count</name>
        <value>10</value>
    </property>
    <property>
        <name>dfs.datanode.dns.nameserver</name>
        <value>default</value>
    </property>
    <property>
        <name>dfs.datanode.du.reserved</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.datanode.directoryscan.interval</name>
        <value>21600</value>
    </property>
    <property>
        <name>dfs.datanode.directoryscan.threads</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.datanode.balance.bandwidthPerSec</name>
        <value>1048576</value>
    </property>
    <property>
        <name>dfs.datanode.plugins</name>
        <value></value>
    </property>
    <property>
        <name>dfs.datanode.failed.volumes.tolerated</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.datanode.max.transfer.threads</name>
        <value>4096</value>
    </property>
    <property>
        <name>dfs.datanode.readahead.bytes</name>
        <value>4193404</value>
    </property>
    <property>
        <name>dfs.datanode.drop.cache.behind.reads</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.datanode.drop.cache.behind.writes</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.datanode.sync.behind.writes</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.datanode.use.datanode.hostname</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.datanode.shared.file.descriptor.paths</name>
        <value>/dev/shm,/tmp</value>
    </property>
    <property>
        <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
        <value>1.073741824E10</value>
    </property>
    <property>
        <name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
        <value>0.75f</value>
    </property>
    <property>
        <name>dfs.datanode.max.locked.memory</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.datanode.fsdatasetcache.max.threads.per.volume</name>
        <value>4</value>
    </property>
    <property>
        <name>dfs.datanode.slow.io.warning.threshold.ms</name>
        <value>300</value>
    </property>
    <property>
        <name>dfs.datanode.cache.revocation.timeout.ms</name>
        <value>900000</value>
    </property>
    <property>
        <name>dfs.datanode.cache.revocation.polling.ms</name>
        <value>500</value>
    </property>
    <property>
        <name>dfs.datanode.block.id.layout.upgrade.threads</name>
        <value>12</value>
    </property>

    <!-- HA -->
    <property>
        <name>ha.zookeeper.quorum</name>
        <value>master.mesos:2181</value>
    </property>
    <property>
        <name>dfs.ha.fencing.methods</name>
        <value>shell(/bin/true)</value>
    </property>
    <property>
        <name>dfs.ha.automatic-failover.enabled</name>
        <value>true</value>
    </property>


    <property>
        <name>dfs.image.compress</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.image.compression.codec</name>
        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
    <property>
        <name>dfs.client.read.shortcircuit</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.domain.socket.path</name>
        <value>dn_socket</value>
    </property>
    <property>
        <name>dfs.client.read.shortcircuit.skip.checksum</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.client.read.shortcircuit.streams.cache.size</name>
        <value>256</value>
    </property>
    <property>
        <name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
        <value>300000</value>
    </property>
    <property>
        <name>dfs.client.failover.proxy.provider.hdfs</name>
        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
    </property>
    <property>
        <name>dfs.permissions.enabled</name>
        <value>false</value>
    </property>

    <property>
        <name>dfs.permissions.superusergroup</name>
        <value>supergroup</value>
    </property>
    <property>
        <name>dfs.client.cached.conn.retry</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.https.server.keystore.resource</name>
        <value>ssl-server.xml</value>
    </property>
    <property>
        <name>dfs.client.https.keystore.resource</name>
        <value>ssl-client.xml</value>
    </property>
    <property>
        <name>dfs.default.chunk.view.size</name>
        <value>32768</value>
    </property>
    <property>
        <name>dfs.block.access.token.enable</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.block.access.key.update.interval</name>
        <value>600</value>
    </property>
    <property>
        <name>dfs.block.access.token.lifetime</name>
        <value>600</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.replication.max</name>
        <value>512</value>
    </property>
    <property>
        <name>dfs.blocksize</name>
        <value>134217728</value>
    </property>
    <property>
        <name>dfs.client.block.write.retries</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
        <value>DEFAULT</value>
    </property>
    <property>
        <name>dfs.client.block.write.replace-datanode-on-failure.best-effort</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.blockreport.intervalMsec</name>
        <value>21600000</value>
    </property>
    <property>
        <name>dfs.blockreport.initialDelay</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.blockreport.split.threshold</name>
        <value>1000000</value>
    </property>
    <property>
        <name>dfs.heartbeat.interval</name>
        <value>3</value>
    </property>
    <property>
        <name>dfs.hosts</name>
        <value></value>
    </property>
    <property>
        <name>dfs.hosts.exclude</name>
        <value></value>
    </property>
    <property>
        <name>dfs.stream-buffer-size</name>
        <value>4096</value>
    </property>
    <property>
        <name>dfs.bytes-per-checksum</name>
        <value>512</value>
    </property>
    <property>
        <name>dfs.client-write-packet-size</name>
        <value>65536</value>
    </property>
    <property>
        <name>dfs.client.write.exclude.nodes.cache.expiry.interval.millis</name>
        <value>600000</value>
    </property>
    <property>
        <name>dfs.namenode.checkpoint.dir</name>
        <value>600000</value>
    </property>
    <property>
        <name>dfs.image.transfer.timeout</name>
        <value>60000</value>
    </property>
    <property>
        <name>dfs.image.transfer.bandwidthPerSec</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.image.transfer.chunksize</name>
        <value>65536</value>
    </property>
    <property>
        <name>dfs.client.failover.max.attempts</name>
        <value>15</value>
    </property>
    <property>
        <name>dfs.client.failover.sleep.base.millis</name>
        <value>500</value>
    </property>
    <property>
        <name>dfs.client.failover.sleep.max.millis</name>
        <value>15000</value>
    </property>
    <property>
        <name>dfs.client.failover.connection.retries</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.client.failover.connection.retries.on.timeouts</name>
        <value>0</value>
    </property>
    <property>
        <name>dfs.client.datanode-restart.timeout</name>
        <value>30</value>
    </property>
    <property>
        <name>dfs.ha.log-roll.period</name>
        <value>120</value>
    </property>

    <property>
        <name>dfs.ha.tail-edits.period</name>
        <value>60</value>
    </property>
    <property>
        <name>dfs.support.append</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.client.use.datanode.hostname</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.client.local.interfaces</name>
        <value></value>
    </property>
    <property>
        <name>dfs.short.circuit.shared.memory.watcher.interrupt.check.ms</name>
        <value>60000</value>
    </property>
    <property>
        <name>dfs.webhdfs.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>hadoop.fuse.connection.timeout</name>
        <value>300</value>
    </property>
    <property>
        <name>hadoop.fuse.timer.period</name>
        <value>5</value>
    </property>
    <property>
        <name>dfs.metrics.percentiles.intervals</name>
        <value></value>
    </property>
    <property>
        <name>dfs.encrypt.data.transfer</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.encrypt.data.transfer.algorithm</name>
        <value></value>
    </property>
    <property>
        <name>dfs.encrypt.data.transfer.cipher.suites</name>
        <value></value>
    </property>
    <property>
        <name>dfs.encrypt.data.transfer.cipher.key.bitlength</name>
        <value>128</value>
    </property>
    <property>
        <name>dfs.trustedchannel.resolver.class</name>
        <value></value>
    </property>
    <property>
        <name>dfs.data.transfer.protection</name>
        <value></value>
    </property>
    <property>
        <name>dfs.data.transfer.saslproperties.resolver.class</name>
        <value></value>
    </property>
    <property>
        <name>dfs.client.file-block-storage-locations.num-threads</name>
        <value>10</value>
    </property>
    <property>
        <name>dfs.client.file-block-storage-locations.timeout.millis</name>
        <value>1000</value>
    </property>
    <property>
        <name>dfs.client.cache.drop.behind.writes</name>
        <value></value>
    </property>
    <property>
        <name>dfs.client.cache.drop.behind.reads</name>
        <value></value>
    </property>
    <property>
        <name>dfs.client.cache.readahead</name>
        <value></value>
    </property>
    <property>
        <name>dfs.client.mmap.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>dfs.client.mmap.cache.size</name>
        <value>256</value>
    </property>
    <property>
        <name>dfs.client.mmap.cache.timeout.ms</name>
        <value>3600000</value>
    </property>
    <property>
        <name>dfs.client.mmap.retry.timeout.ms</name>
        <value>300000</value>
    </property>
    <property>
        <name>dfs.client.short.circuit.replica.stale.threshold.ms</name>
        <value>1800000</value>
    </property>
    <property>
        <name>dfs.cachereport.intervalMsec</name>
        <value>10000</value>
    </property>

    <property>
        <name>dfs.webhdfs.user.provider.user.pattern</name>
        <value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
    </property>
    <property>
        <name>dfs.block.local-path-access.user</name>
        <value></value>
    </property>
    <property>
        <name>dfs.client.domain.socket.data.traffic</name>
        <value>false</value>
    </property>
    <property>
        <name>dfs.client.slow.io.warning.threshold.ms</name>
        <value>30000</value>
    </property>
    <property>
        <name>dfs.encryption.key.provider.uri</name>
        <value></value>
    </property>
    <property>
        <name>dfs.storage.policy.enabled</name>
        <value>true</value>
    </property>

</configuration>

core-site.xml

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><configuration>
    <property>
        <name>fs.default.name</name>
        <value>hdfs://hdfs</value>
    </property>
    <property>
        <name>hadoop.common.configuration.version</name>
        <value>0.23.0</value>
    </property>
    <property>
        <name>hadoop.proxyuser.hue.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.hue.groups</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.root.groups</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.httpfs.groups</name>
        <value>*</value>
    </property>
    <property>
        <name>hadoop.proxyuser.httpfs.hosts</name>
        <value>*</value>
    </property>
    <property>
        <name>ha.zookeeper.parent-znode</name>
        <value>/dcos-service-hdfs/hadoop-ha</value>
    </property>
    <property>
        <name>ipc.client.connect.max.retries</name>
        <value>300</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/tmp/hadoop-${user.name}</value>
    </property>
    <property>
        <name>hadoop.http.filter.initializers</name>
        <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
    </property>
    <property>
        <name>hadoop.security.authorization</name>
        <value>false</value>
    </property>
    <property>
        <name>hadoop.security.instrumentation.requires.admin</name>
        <value>false</value>
    </property>
    <property>
        <name>hadoop.security.authentication</name>
        <value>simple</value>
    </property>
    <property>
        <name>hadoop.security.group.mapping</name>
        <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
    </property>
    <property>
        <name>hadoop.security.groups.cache.secs</name>
        <value>300</value>
    </property>
    <property>
        <name>hadoop.security.groups.negative-cache.secs</name>
        <value>30</value>
    </property>
    <property>
        <name>hadoop.security.groups.cache.warn.after.ms</name>
        <value>5000</value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.url</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.ssl</name>
        <value>false</value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.ssl.keystore</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.ssl.keystore.password.file</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.bind.user</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.bind.password.file</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.base</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.search.filter.user</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.search.filter.group</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.search.attr.member</name>
        <value>member</value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.search.attr.group.name</name>
        <value>cn</value>
    </property>
    <property>
        <name>hadoop.security.group.mapping.ldap.directory.search.timeout</name>
        <value>10000</value>
    </property>
    <property>
        <name>hadoop.security.service.user.name.key</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.security.uid.cache.secs</name>
        <value>14400</value>
    </property>
    <property>
        <name>hadoop.rpc.protection</name>
        <value>authentication</value>
    </property>
    <property>
        <name>hadoop.security.saslproperties.resolver.class</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.work.around.non.threadsafe.getpwuid</name>
        <value>false</value>
    </property>
    <property>
        <name>hadoop.kerberos.kinit.command</name>
        <value>kinit</value>
    </property>
    <property>
        <name>hadoop.security.auth_to_local</name>
        <value></value>
    </property>
    <property>
        <name>io.file.buffer.size</name>
        <value>4096</value>
    </property>
    <property>
        <name>io.bytes.per.checksum</name>
        <value>512</value>
    </property>
    <property>
        <name></name>
        <value>512</value>
    </property>
    <property>
        <name>io.skip.checksum.errors</name>
        <value>false</value>
    </property>
    <property>
        <name>io.compression.codecs</name>
        <value></value>
    </property>
    <property>
        <name>io.compression.codec.bzip2.library</name>
        <value>system-native</value>
    </property>
    <property>
        <name>io.serializations</name>
        <value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
    </property>
    <property>
        <name>io.seqfile.local.dir</name>
        <value>${hadoop.tmp.dir}/io/local</value>
    </property>
    <property>
        <name>io.map.index.skip</name>
        <value>0</value>
    </property>
    <property>
        <name>io.map.index.interval</name>
        <value>128</value>
    </property>
    <property>
        <name>fs.trash.interval</name>
        <value>0</value>
    </property>
    <property>
        <name>fs.trash.checkpoint.interval</name>
        <value>0</value>
    </property>
    <property>
        <name>fs.AbstractFileSystem.file.impl</name>
        <value>org.apache.hadoop.fs.local.LocalFs</value>
    </property>
    <property>
        <name>fs.AbstractFileSystem.har.impl</name>
        <value>org.apache.hadoop.fs.HarFs</value>
    </property>
    <property>
        <name>fs.AbstractFileSystem.hdfs.impl</name>
        <value>org.apache.hadoop.fs.Hdfs</value>
    </property>
    <property>
        <name>fs.AbstractFileSystem.viewfs.impl</name>
        <value>org.apache.hadoop.fs.viewfs.ViewFs</value>
    </property>
    <property>
        <name>fs.ftp.host</name>
        <value>0.0.0.0</value>
    </property>
    <property>
        <name>fs.ftp.host.port</name>
        <value>21</value>
    </property>
    <property>
        <name>fs.df.interval</name>
        <value>60000</value>
    </property>
    <property>
        <name>fs.du.interval</name>
        <value>600000</value>
    </property>
    <property>
        <name>fs.s3.block.size</name>
        <value>67108864</value>
    </property>
    <property>
        <name>fs.s3.buffer.dir</name>
        <value>${hadoop.tmp.dir}/s3</value>
    </property>
    <property>
        <name>fs.s3.maxRetries</name>
        <value>4</value>
    </property>
    <property>
        <name>fs.s3.sleepTimeSeconds</name>
        <value>10</value>
    </property>
    <property>
        <name>fs.swift.impl</name>
        <value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
    </property>
    <property>
        <name>fs.automatic.close</name>
        <value>true</value>
    </property>
    <property>
        <name>fs.s3n.block.size</name>
        <value>67108864</value>
    </property>
    <property>
        <name>fs.s3n.multipart.uploads.enabled</name>
        <value>false</value>
    </property>
    <property>
        <name>fs.s3n.multipart.uploads.block.size</name>
        <value>67108864</value>
    </property>
    <property>
        <name>fs.s3n.multipart.copy.block.size</name>
        <value>5.36870912E9</value>
    </property>
    <property>
        <name>fs.s3n.server-side-encryption-algorithm</name>
        <value></value>
    </property>
    <property>
        <name>fs.s3a.access.key</name>
        <value></value>
    </property>
    <property>
        <name>fs.s3a.secret.key</name>
        <value></value>
    </property>
    <property>
        <name>fs.s3a.connection.maximum</name>
        <value>15</value>
    </property>
    <property>
        <name>fs.s3a.connection.ssl.enabled</name>
        <value>true</value>
    </property>
    <property>
        <name>fs.s3a.attempts.maximum</name>
        <value>10</value>
    </property>
    <property>
        <name>fs.s3a.connection.timeout</name>
        <value>5000</value>
    </property>
    <property>
        <name>fs.s3a.paging.maximum</name>
        <value>5000</value>
    </property>
    <property>
        <name>fs.s3a.multipart.size</name>
        <value>104857600</value>
    </property>
    <property>
        <name>fs.s3a.multipart.threshold</name>
        <value>2147483647</value>
    </property>
    <property>
        <name>fs.s3a.acl.default</name>
        <value></value>
    </property>
    <property>
        <name>fs.s3a.multipart.purge</name>
        <value>false</value>
    </property>
    <property>
        <name>fs.s3a.multipart.purge.age</name>
        <value>86400</value>
    </property>
    <property>
        <name>fs.s3a.buffer.dir</name>
        <value>${hadoop.tmp.dir}/s3a</value>
    </property>
    <property>
        <name>fs.s3a.impl</name>
        <value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
    </property>
    <property>
        <name>io.seqfile.compress.blocksize</name>
        <value>1000000</value>
    </property>
    <property>
        <name>io.seqfile.lazydecompress</name>
        <value>true</value>
    </property>
    <property>
        <name>io.seqfile.sorter.recordlimit</name>
        <value>1000000</value>
    </property>
    <property>
        <name>io.mapfile.bloom.size</name>
        <value>1048576</value>
    </property>
    <property>
        <name>io.mapfile.bloom.error.rate</name>
        <value>0.005</value>
    </property>
    <property>
        <name>hadoop.util.hash.type</name>
        <value>murmur</value>
    </property>
    <property>
        <name>ipc.client.idlethreshold</name>
        <value>4000</value>
    </property>
    <property>
        <name>ipc.client.kill.max</name>
        <value>10</value>
    </property>
    <property>
        <name>ipc.client.connection.maxidletime</name>
        <value>10000</value>
    </property>
    <property>
        <name>ipc.client.connect.max.retries</name>
        <value>300</value>
    </property>
    <property>
        <name>ipc.client.connect.retry.interval</name>
        <value>1000</value>
    </property>
    <property>
        <name>ipc.client.connect.timeout</name>
        <value>20000</value>
    </property>
    <property>
        <name>ipc.client.connect.max.retries.on.timeouts</name>
        <value>45</value>
    </property>
    <property>
        <name>ipc.server.listen.queue.size</name>
        <value>45</value>
    </property>
    <property>
        <name>hadoop.security.impersonation.provider.class</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.rpc.socket.factory.class.default</name>
        <value>org.apache.hadoop.net.StandardSocketFactory</value>
    </property>
    <property>
        <name>hadoop.rpc.socket.factory.class.ClientProtocol</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.socks.server</name>
        <value></value>
    </property>
    <property>
        <name>net.topology.node.switch.mapping.impl</name>
        <value>org.apache.hadoop.net.ScriptBasedMapping</value>
    </property>
    <property>
        <name>net.topology.impl</name>
        <value>org.apache.hadoop.net.NetworkTopology</value>
    </property>
    <property>
        <name>net.topology.script.file.name</name>
        <value></value>
    </property>
    <property>
        <name>net.topology.script.number.args</name>
        <value>100</value>
    </property>
    <property>
        <name>net.topology.table.file.name</name>
        <value></value>
    </property>
    <property>
        <name>file.stream-buffer-size</name>
        <value>4096</value>
    </property>
    <property>
        <name>file.bytes-per-checksum</name>
        <value>512</value>
    </property>
    <property>
        <name>file.client-write-packet-size</name>
        <value>65536</value>
    </property>
    <property>
        <name>file.blocksize</name>
        <value>67108864</value>
    </property>
    <property>
        <name>file.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>s3.stream-buffer-size</name>
        <value>4096</value>
    </property>
    <property>
        <name>s3.bytes-per-checksum</name>
        <value>512</value>
    </property>
    <property>
        <name>s3.client-write-packet-size</name>
        <value>65536</value>
    </property>
    <property>
        <name>s3.blocksize</name>
        <value>67108864</value>
    </property>
    <property>
        <name>s3.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>s3native.stream-buffer-size</name>
        <value>4096</value>
    </property>
    <property>
        <name>s3native.bytes-per-checksum</name>
        <value>512</value>
    </property>
    <property>
        <name>s3native.client-write-packet-size</name>
        <value>65536</value>
    </property>
    <property>
        <name>s3native.blocksize</name>
        <value>67108864</value>
    </property>
    <property>
        <name>s3native.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>ftp.stream-buffer-size</name>
        <value>4096</value>
    </property>
    <property>
        <name>ftp.bytes-per-checksum</name>
        <value>512</value>
    </property>
    <property>
        <name>ftp.client-write-packet-size</name>
        <value>65536</value>
    </property>
    <property>
        <name>ftp.blocksize</name>
        <value>67108864</value>
    </property>
    <property>
        <name>ftp.replication</name>
        <value>3</value>
    </property>
    <property>
        <name>tfile.io.chunk.size</name>
        <value>1048576</value>
    </property>
    <property>
        <name>tfile.fs.output.buffer.size</name>
        <value>262144</value>
    </property>
    <property>
        <name>tfile.fs.input.buffer.size</name>
        <value>262144</value>
    </property>
    <property>
        <name>hadoop.http.authentication.type</name>
        <value>simple</value>
    </property>
    <property>
        <name>hadoop.http.authentication.token.validity</name>
        <value>36000</value>
    </property>
    <property>
        <name>hadoop.http.authentication.signature.secret.file</name>
        <value>${user.home}/hadoop-http-auth-signature-secret</value>
    </property>
    <property>
        <name>hadoop.http.authentication.cookie.domain</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.http.authentication.simple.anonymous.allowed</name>
        <value>true</value>
    </property>
    <property>
        <name>hadoop.http.authentication.kerberos.principal</name>
        <value>HTTP/_HOST@LOCALHOST</value>
    </property>
    <property>
        <name>hadoop.http.authentication.kerberos.keytab</name>
        <value>${user.home}/hadoop.keytab</value>
    </property>
    <property>
        <name>dfs.ha.fencing.ssh.connect-timeout</name>
        <value>30000</value>
    </property>
    <property>
        <name>dfs.ha.fencing.ssh.private-key-files</name>
        <value></value>
    </property>
    <property>
        <name>hadoop.http.staticuser.user</name>
        <value>dr.who</value>
    </property>
    <property>
        <name>ha.zookeeper.session-timeout.ms</name>
        <value>5000</value>
    </property>
    <property>
        <name>hadoop.ssl.keystores.factory.class</name>
        <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
    </property>
    <property>
        <name>hadoop.ssl.require.client.cert</name>
        <value>false</value>
    </property>
    <property>
        <name>hadoop.jetty.logs.serve.aliases</name>
        <value>false</value>
    </property>
    <property>
        <name>fs.permissions.umask-mode</name>
        <value>022</value>
    </property>
    <property>
        <name>ha.health-monitor.connect-retry-interval.ms</name>
        <value>1000</value>
    </property>
    <property>
        <name>ha.health-monitor.check-interval.ms</name>
        <value>1000</value>
    </property>
    <property>
        <name>ha.health-monitor.sleep-after-disconnect.ms</name>
        <value>1000</value>
    </property>
    <property>
        <name>ha.health-monitor.rpc-timeout.ms</name>
        <value>45000</value>
    </property>
    <property>
        <name>ha.failover-controller.new-active.rpc-timeout.ms</name>
        <value>60000</value>
    </property>
    <property>
        <name>ha.failover-controller.graceful-fence.rpc-timeout.ms</name>
        <value>5000</value>
    </property>
    <property>
        <name>ha.failover-controller.graceful-fence.connection.retries</name>
        <value>1</value>
    </property>
    <property>
        <name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
        <value>20000</value>
    </property>
    <property>
        <name>ipc.client.fallback-to-simple-auth-allowed</name>
        <value>false</value>
    </property>
    <property>
        <name>fs.client.resolve.remote.symlinks</name>
        <value>true</value>
    </property>
    <property>
        <name>nfs.exports.allowed.hosts</name>
        <value>* rw</value>
    </property>
    <property>
        <name>hadoop.user.group.static.mapping.overrides</name>
        <value></value>
    </property>
    <property>
        <name>rpc.metrics.quantile.enable</name>
        <value>false</value>
    </property>
    <property>
        <name>rpc.metrics.percentiles.intervals</name>
        <value></value>
    </property>

</configuration>

DNS names used in this configuration file will remain accurate even if nodes in the HDFS cluster are moved to different agent nodes.