-
Notifications
You must be signed in to change notification settings - Fork 3.3k
/
hbase-default.xml
1803 lines (1789 loc) · 84.2 KB
/
hbase-default.xml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<!--
OVERVIEW
The important configs. are listed near the top. You should change
at least the setting for hbase.tmp.dir. Other settings will change
dependent on whether you are running hbase in standalone mode or
distributed. See the hbase reference guide for requirements and
guidance making configuration.
This file does not contain all possible configurations. The file would be
much larger if it carried everything. The absent configurations will only be
found through source code reading. The idea is that such configurations are
exotic and only those who would go to the trouble of reading a particular
section in the code would be knowledgeable or invested enough in ever wanting
to alter such configurations, so we do not list them here. Listing all
possible configurations would overwhelm and obscure the important.
-->
<configuration>
<!--Configs you will likely change are listed here at the top of the file.
-->
<property >
<name>hbase.tmp.dir</name>
<value>${java.io.tmpdir}/hbase-${user.name}</value>
<description>Temporary directory on the local filesystem.
Change this setting to point to a location more permanent
than '/tmp', the usual resolve for java.io.tmpdir, as the
'/tmp' directory is cleared on machine restart.</description>
</property>
<property >
<name>hbase.rootdir</name>
<value>${hbase.tmp.dir}/hbase</value>
<description>The directory shared by region servers and into
which HBase persists. The URL should be 'fully-qualified'
to include the filesystem scheme. For example, to specify the
HDFS directory '/hbase' where the HDFS instance's namenode is
running at namenode.example.org on port 9000, set this value to:
hdfs://namenode.example.org:9000/hbase. By default, we write
to whatever ${hbase.tmp.dir} is set too -- usually /tmp --
so change this configuration or else all data will be lost on
machine restart.</description>
</property>
<property >
<name>hbase.cluster.distributed</name>
<value>false</value>
<description>The mode the cluster will be in. Possible values are
false for standalone mode and true for distributed mode. If
false, startup will run all HBase and ZooKeeper daemons together
in the one JVM.</description>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>localhost</value>
<description>Comma separated list of servers in the ZooKeeper ensemble
(This config. should have been named hbase.zookeeper.ensemble).
For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
By default this is set to localhost for local and pseudo-distributed modes
of operation. For a fully-distributed setup, this should be set to a full
list of ZooKeeper ensemble servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
this is the list of servers which hbase will start/stop ZooKeeper on as
part of cluster start/stop. Client-side, we will take this list of
ensemble members and put it together with the hbase.zookeeper.property.clientPort
config. and pass it into zookeeper constructor as the connectString
parameter.</description>
</property>
<!--The above are the important configurations for getting hbase up
and running -->
<property>
<name>zookeeper.recovery.retry.maxsleeptime</name>
<value>60000</value>
<description>Max sleep time before retry zookeeper operations in milliseconds,
a max time is needed here so that sleep time won't grow unboundedly
</description>
</property>
<property>
<name>hbase.local.dir</name>
<value>${hbase.tmp.dir}/local/</value>
<description>Directory on the local filesystem to be used
as a local storage.</description>
</property>
<!--Master configurations-->
<property >
<name>hbase.master.port</name>
<value>16000</value>
<description>The port the HBase Master should bind to.</description>
</property>
<property>
<name>hbase.master.info.port</name>
<value>16010</value>
<description>The port for the HBase Master web UI.
Set to -1 if you do not want a UI instance run.</description>
</property>
<property>
<name>hbase.master.info.bindAddress</name>
<value>0.0.0.0</value>
<description>The bind address for the HBase Master web UI
</description>
</property>
<property>
<name>hbase.master.logcleaner.plugins</name>
<value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner,org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner</value>
<description>A comma-separated list of BaseLogCleanerDelegate invoked by
the LogsCleaner service. These WAL cleaners are called in order,
so put the cleaner that prunes the most files in front. To
implement your own BaseLogCleanerDelegate, just put it in HBase's classpath
and add the fully qualified class name here. Always add the above
default log cleaners in the list.</description>
</property>
<property>
<name>hbase.master.logcleaner.ttl</name>
<value>600000</value>
<description>How long a WAL remain in the archive ({hbase.rootdir}/oldWALs) directory,
after which it will be cleaned by a Master thread. The value is in milliseconds.</description>
</property>
<property>
<name>hbase.master.procedurewalcleaner.ttl</name>
<value>604800000</value>
<description>How long a Procedure WAL will remain in the
archive directory, after which it will be cleaned
by a Master thread. The value is in milliseconds.</description>
</property>
<property>
<name>hbase.master.hfilecleaner.plugins</name>
<value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
<description>A comma-separated list of BaseHFileCleanerDelegate invoked by
the HFileCleaner service. These HFiles cleaners are called in order,
so put the cleaner that prunes the most files in front. To
implement your own BaseHFileCleanerDelegate, just put it in HBase's classpath
and add the fully qualified class name here. Always add the above
default log cleaners in the list as they will be overwritten in
hbase-site.xml.</description>
</property>
<property>
<name>hbase.master.infoserver.redirect</name>
<value>true</value>
<description>Whether or not the Master listens to the Master web
UI port (hbase.master.info.port) and redirects requests to the web
UI server shared by the Master and RegionServer. Config. makes
sense when Master is serving Regions (not the default).</description>
</property>
<property>
<name>hbase.master.fileSplitTimeout</name>
<value>600000</value>
<description>Splitting a region, how long to wait on the file-splitting
step before aborting the attempt. Default: 600000. This setting used
to be known as hbase.regionserver.fileSplitTimeout in hbase-1.x.
Split is now run master-side hence the rename (If a
'hbase.master.fileSplitTimeout' setting found, will use it to
prime the current 'hbase.master.fileSplitTimeout'
Configuration.</description>
</property>
<!--RegionServer configurations-->
<property>
<name>hbase.regionserver.port</name>
<value>16020</value>
<description>The port the HBase RegionServer binds to.</description>
</property>
<property>
<name>hbase.regionserver.info.port</name>
<value>16030</value>
<description>The port for the HBase RegionServer web UI
Set to -1 if you do not want the RegionServer UI to run.</description>
</property>
<property>
<name>hbase.regionserver.info.bindAddress</name>
<value>0.0.0.0</value>
<description>The address for the HBase RegionServer web UI</description>
</property>
<property>
<name>hbase.regionserver.info.port.auto</name>
<value>false</value>
<description>Whether or not the Master or RegionServer
UI should search for a port to bind to. Enables automatic port
search if hbase.regionserver.info.port is already in use.
Useful for testing, turned off by default.</description>
</property>
<property>
<name>hbase.regionserver.handler.count</name>
<value>30</value>
<description>Count of RPC Listener instances spun up on RegionServers.
Same property is used by the Master for count of master handlers.
Too many handlers can be counter-productive. Make it a multiple of
CPU count. If mostly read-only, handlers count close to cpu count
does well. Start with twice the CPU count and tune from there.</description>
</property>
<property>
<name>hbase.ipc.server.callqueue.handler.factor</name>
<value>0.1</value>
<description>Factor to determine the number of call queues.
A value of 0 means a single queue shared between all the handlers.
A value of 1 means that each handler has its own queue.</description>
</property>
<property>
<name>hbase.ipc.server.callqueue.read.ratio</name>
<value>0</value>
<description>Split the call queues into read and write queues.
The specified interval (which should be between 0.0 and 1.0)
will be multiplied by the number of call queues.
A value of 0 indicate to not split the call queues, meaning that both read and write
requests will be pushed to the same set of queues.
A value lower than 0.5 means that there will be less read queues than write queues.
A value of 0.5 means there will be the same number of read and write queues.
A value greater than 0.5 means that there will be more read queues than write queues.
A value of 1.0 means that all the queues except one are used to dispatch read requests.
Example: Given the total number of call queues being 10
a read.ratio of 0 means that: the 10 queues will contain both read/write requests.
a read.ratio of 0.3 means that: 3 queues will contain only read requests
and 7 queues will contain only write requests.
a read.ratio of 0.5 means that: 5 queues will contain only read requests
and 5 queues will contain only write requests.
a read.ratio of 0.8 means that: 8 queues will contain only read requests
and 2 queues will contain only write requests.
a read.ratio of 1 means that: 9 queues will contain only read requests
and 1 queues will contain only write requests.
</description>
</property>
<property>
<name>hbase.ipc.server.callqueue.scan.ratio</name>
<value>0</value>
<description>Given the number of read call queues, calculated from the total number
of call queues multiplied by the callqueue.read.ratio, the scan.ratio property
will split the read call queues into small-read and long-read queues.
A value lower than 0.5 means that there will be less long-read queues than short-read queues.
A value of 0.5 means that there will be the same number of short-read and long-read queues.
A value greater than 0.5 means that there will be more long-read queues than short-read queues
A value of 0 or 1 indicate to use the same set of queues for gets and scans.
Example: Given the total number of read call queues being 8
a scan.ratio of 0 or 1 means that: 8 queues will contain both long and short read requests.
a scan.ratio of 0.3 means that: 2 queues will contain only long-read requests
and 6 queues will contain only short-read requests.
a scan.ratio of 0.5 means that: 4 queues will contain only long-read requests
and 4 queues will contain only short-read requests.
a scan.ratio of 0.8 means that: 6 queues will contain only long-read requests
and 2 queues will contain only short-read requests.
</description>
</property>
<property>
<name>hbase.regionserver.msginterval</name>
<value>3000</value>
<description>Interval between messages from the RegionServer to Master
in milliseconds.</description>
</property>
<property>
<name>hbase.regionserver.logroll.period</name>
<value>3600000</value>
<description>Period at which we will roll the commit log regardless
of how many edits it has.</description>
</property>
<property>
<name>hbase.regionserver.logroll.errors.tolerated</name>
<value>2</value>
<description>The number of consecutive WAL close errors we will allow
before triggering a server abort. A setting of 0 will cause the
region server to abort if closing the current WAL writer fails during
log rolling. Even a small value (2 or 3) will allow a region server
to ride over transient HDFS errors.</description>
</property>
<property>
<name>hbase.regionserver.hlog.reader.impl</name>
<value>org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader</value>
<description>The WAL file reader implementation.</description>
</property>
<property>
<name>hbase.regionserver.hlog.writer.impl</name>
<value>org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter</value>
<description>The WAL file writer implementation.</description>
</property>
<property>
<name>hbase.regionserver.global.memstore.size</name>
<value></value>
<description>Maximum size of all memstores in a region server before new
updates are blocked and flushes are forced. Defaults to 40% of heap (0.4).
Updates are blocked and flushes are forced until size of all memstores
in a region server hits hbase.regionserver.global.memstore.size.lower.limit.
The default value in this configuration has been intentionally left empty in order to
honor the old hbase.regionserver.global.memstore.upperLimit property if present.
</description>
</property>
<property>
<name>hbase.regionserver.global.memstore.size.lower.limit</name>
<value></value>
<description>Maximum size of all memstores in a region server before flushes
are forced. Defaults to 95% of hbase.regionserver.global.memstore.size
(0.95). A 100% value for this value causes the minimum possible flushing
to occur when updates are blocked due to memstore limiting. The default
value in this configuration has been intentionally left empty in order to
honor the old hbase.regionserver.global.memstore.lowerLimit property if
present.
</description>
</property>
<property>
<name>hbase.systemtables.compacting.memstore.type</name>
<value>NONE</value>
<description>Determines the type of memstore to be used for system tables like
META, namespace tables etc. By default NONE is the type and hence we use the
default memstore for all the system tables. If we need to use compacting
memstore for system tables then set this property to BASIC/EAGER
</description>
</property>
<property>
<name>hbase.regionserver.optionalcacheflushinterval</name>
<value>3600000</value>
<description>
Maximum amount of time an edit lives in memory before being automatically flushed.
Default 1 hour. Set it to 0 to disable automatic flushing.
</description>
</property>
<property>
<name>hbase.regionserver.dns.interface</name>
<value>default</value>
<description>The name of the Network Interface from which a region server
should report its IP address.</description>
</property>
<property>
<name>hbase.regionserver.dns.nameserver</name>
<value>default</value>
<description>The host name or IP address of the name server (DNS)
which a region server should use to determine the host name used by the
master for communication and display purposes.</description>
</property>
<property>
<name>hbase.regionserver.region.split.policy</name>
<value>org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy</value>
<description>
A split policy determines when a region should be split. The various
other split policies that are available currently are BusyRegionSplitPolicy,
ConstantSizeRegionSplitPolicy, DisabledRegionSplitPolicy,
DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy, and
SteppingSplitPolicy. DisabledRegionSplitPolicy blocks manual region splitting.
</description>
</property>
<property>
<name>hbase.regionserver.regionSplitLimit</name>
<value>1000</value>
<description>
Limit for the number of regions after which no more region splitting
should take place. This is not hard limit for the number of regions
but acts as a guideline for the regionserver to stop splitting after
a certain limit. Default is set to 1000.
</description>
</property>
<!--ZooKeeper configuration-->
<property>
<name>zookeeper.session.timeout</name>
<value>90000</value>
<description>ZooKeeper session timeout in milliseconds. It is used in two different ways.
First, this value is used in the ZK client that HBase uses to connect to the ensemble.
It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'.
See http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
For example, if an HBase region server connects to a ZK ensemble that's also managed
by HBase, then the session timeout will be the one specified by this configuration.
But, a region server that connects to an ensemble managed with a different configuration
will be subjected that ensemble's maxSessionTimeout. So, even though HBase might propose
using 90 seconds, the ensemble can have a max timeout lower than this and it will take
precedence. The current default that ZK ships with is 40 seconds, which is lower than
HBase's.
</description>
</property>
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
<description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
files that are configured with a relative path will go under this node.
By default, all of HBase's ZooKeeper file paths are configured with a
relative path, so they will all go under this directory unless changed.
</description>
</property>
<property>
<name>zookeeper.znode.acl.parent</name>
<value>acl</value>
<description>Root ZNode for access control lists.</description>
</property>
<property>
<name>hbase.zookeeper.dns.interface</name>
<value>default</value>
<description>The name of the Network Interface from which a ZooKeeper server
should report its IP address.</description>
</property>
<property>
<name>hbase.zookeeper.dns.nameserver</name>
<value>default</value>
<description>The host name or IP address of the name server (DNS)
which a ZooKeeper server should use to determine the host name used by the
master for communication and display purposes.</description>
</property>
<!--
The following three properties are used together to create the list of
host:peer_port:leader_port quorum servers for ZooKeeper.
-->
<property>
<name>hbase.zookeeper.peerport</name>
<value>2888</value>
<description>Port used by ZooKeeper peers to talk to each other.
See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
for more information.</description>
</property>
<property>
<name>hbase.zookeeper.leaderport</name>
<value>3888</value>
<description>Port used by ZooKeeper for leader election.
See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
for more information.</description>
</property>
<!-- End of properties used to generate ZooKeeper host:port quorum list. -->
<!--
Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
All properties with an "hbase.zookeeper.property." prefix are converted for
ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
e.g. "initLimit=10" you would append the following to your configuration:
<property>
<name>hbase.zookeeper.property.initLimit</name>
<value>10</value>
</property>
-->
<property>
<name>hbase.zookeeper.property.initLimit</name>
<value>10</value>
<description>Property from ZooKeeper's config zoo.cfg.
The number of ticks that the initial synchronization phase can take.</description>
</property>
<property>
<name>hbase.zookeeper.property.syncLimit</name>
<value>5</value>
<description>Property from ZooKeeper's config zoo.cfg.
The number of ticks that can pass between sending a request and getting an
acknowledgment.</description>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>${hbase.tmp.dir}/zookeeper</value>
<description>Property from ZooKeeper's config zoo.cfg.
The directory where the snapshot is stored.</description>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
<description>Property from ZooKeeper's config zoo.cfg.
The port at which the clients will connect.</description>
</property>
<property>
<name>hbase.zookeeper.property.maxClientCnxns</name>
<value>300</value>
<description>Property from ZooKeeper's config zoo.cfg.
Limit on number of concurrent connections (at the socket level) that a
single client, identified by IP address, may make to a single member of
the ZooKeeper ensemble. Set high to avoid zk connection issues running
standalone and pseudo-distributed.</description>
</property>
<!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
<!--Client configurations-->
<property>
<name>hbase.client.write.buffer</name>
<value>2097152</value>
<description>Default size of the BufferedMutator write buffer in bytes.
A bigger buffer takes more memory -- on both the client and server
side since server instantiates the passed write buffer to process
it -- but a larger buffer size reduces the number of RPCs made.
For an estimate of server-side memory-used, evaluate
hbase.client.write.buffer * hbase.regionserver.handler.count</description>
</property>
<property>
<name>hbase.client.pause</name>
<value>100</value>
<description>General client pause value. Used mostly as value to wait
before running a retry of a failed get, region lookup, etc.
See hbase.client.retries.number for description of how we backoff from
this initial pause amount and how this pause works w/ retries.</description>
</property>
<property>
<name>hbase.client.pause.cqtbe</name>
<value></value>
<description>Whether or not to use a special client pause for
CallQueueTooBigException (cqtbe). Set this property to a higher value
than hbase.client.pause if you observe frequent CQTBE from the same
RegionServer and the call queue there keeps full</description>
</property>
<property>
<name>hbase.client.retries.number</name>
<value>15</value>
<description>Maximum retries. Used as maximum for all retryable
operations such as the getting of a cell's value, starting a row update,
etc. Retry interval is a rough function based on hbase.client.pause. At
first we retry at this interval but then with backoff, we pretty quickly reach
retrying every ten seconds. See HConstants#RETRY_BACKOFF for how the backup
ramps up. Change this setting and hbase.client.pause to suit your workload.</description>
</property>
<property>
<name>hbase.client.max.total.tasks</name>
<value>100</value>
<description>The maximum number of concurrent mutation tasks a single HTable instance will
send to the cluster.</description>
</property>
<property>
<name>hbase.client.max.perserver.tasks</name>
<value>2</value>
<description>The maximum number of concurrent mutation tasks a single HTable instance will
send to a single region server.</description>
</property>
<property>
<name>hbase.client.max.perregion.tasks</name>
<value>1</value>
<description>The maximum number of concurrent mutation tasks the client will
maintain to a single Region. That is, if there is already
hbase.client.max.perregion.tasks writes in progress for this region, new puts
won't be sent to this region until some writes finishes.</description>
</property>
<property>
<name>hbase.client.perserver.requests.threshold</name>
<value>2147483647</value>
<description>The max number of concurrent pending requests for one server in all client threads
(process level). Exceeding requests will be thrown ServerTooBusyException immediately to prevent
user's threads being occupied and blocked by only one slow region server. If you use a fix
number of threads to access HBase in a synchronous way, set this to a suitable value which is
related to the number of threads will help you. See
https://issues.apache.org/jira/browse/HBASE-16388 for details.</description>
</property>
<property>
<name>hbase.client.scanner.caching</name>
<value>2147483647</value>
<description>Number of rows that we try to fetch when calling next
on a scanner if it is not served from (local, client) memory. This configuration
works together with hbase.client.scanner.max.result.size to try and use the
network efficiently. The default value is Integer.MAX_VALUE by default so that
the network will fill the chunk size defined by hbase.client.scanner.max.result.size
rather than be limited by a particular number of rows since the size of rows varies
table to table. If you know ahead of time that you will not require more than a certain
number of rows from a scan, this configuration should be set to that row limit via
Scan#setCaching. Higher caching values will enable faster scanners but will eat up more
memory and some calls of next may take longer and longer times when the cache is empty.
Do not set this value such that the time between invocations is greater than the scanner
timeout; i.e. hbase.client.scanner.timeout.period</description>
</property>
<property>
<name>hbase.client.keyvalue.maxsize</name>
<value>10485760</value>
<description>Specifies the combined maximum allowed size of a KeyValue
instance. This is to set an upper boundary for a single entry saved in a
storage file. Since they cannot be split it helps avoiding that a region
cannot be split any further because the data is too large. It seems wise
to set this to a fraction of the maximum region size. Setting it to zero
or less disables the check.</description>
</property>
<property>
<name>hbase.server.keyvalue.maxsize</name>
<value>10485760</value>
<description>Maximum allowed size of an individual cell, inclusive of value and all key
components. A value of 0 or less disables the check.
The default value is 10MB.
This is a safety setting to protect the server from OOM situations.
</description>
</property>
<property>
<name>hbase.client.scanner.timeout.period</name>
<value>60000</value>
<description>Client scanner lease period in milliseconds.</description>
</property>
<property>
<name>hbase.client.localityCheck.threadPoolSize</name>
<value>2</value>
</property>
<!--Miscellaneous configuration-->
<property>
<name>hbase.bulkload.retries.number</name>
<value>10</value>
<description>Maximum retries. This is maximum number of iterations
to atomic bulk loads are attempted in the face of splitting operations
0 means never give up.</description>
</property>
<property>
<name>hbase.master.balancer.maxRitPercent</name>
<value>1.0</value>
<description>The max percent of regions in transition when balancing.
The default value is 1.0. So there are no balancer throttling. If set this config to 0.01,
It means that there are at most 1% regions in transition when balancing.
Then the cluster's availability is at least 99% when balancing.</description>
</property>
<property>
<name>hbase.balancer.period
</name>
<value>300000</value>
<description>Period at which the region balancer runs in the Master.</description>
</property>
<property>
<name>hbase.normalizer.period</name>
<value>300000</value>
<description>Period at which the region normalizer runs in the Master.</description>
</property>
<property>
<name>hbase.normalizer.min.region.count</name>
<value>3</value>
<description>configure the minimum number of regions</description>
</property>
<property>
<name>hbase.regions.slop</name>
<value>0.001</value>
<description>Rebalance if any regionserver has average + (average * slop) regions.
The default value of this parameter is 0.001 in StochasticLoadBalancer (the default load balancer),
while the default is 0.2 in other load balancers (i.e., SimpleLoadBalancer).</description>
</property>
<property>
<name>hbase.server.thread.wakefrequency</name>
<value>10000</value>
<description>Time to sleep in between searches for work (in milliseconds).
Used as sleep interval by service threads such as log roller.</description>
</property>
<property>
<name>hbase.server.versionfile.writeattempts</name>
<value>3</value>
<description>
How many times to retry attempting to write a version file
before just aborting. Each attempt is separated by the
hbase.server.thread.wakefrequency milliseconds.</description>
</property>
<property>
<name>hbase.hregion.memstore.flush.size</name>
<value>134217728</value>
<description>
Memstore will be flushed to disk if size of the memstore
exceeds this number of bytes. Value is checked by a thread that runs
every hbase.server.thread.wakefrequency.</description>
</property>
<property>
<name>hbase.hregion.percolumnfamilyflush.size.lower.bound.min</name>
<value>16777216</value>
<description>
If FlushLargeStoresPolicy is used and there are multiple column families,
then every time that we hit the total memstore limit, we find out all the
column families whose memstores exceed a "lower bound" and only flush them
while retaining the others in memory. The "lower bound" will be
"hbase.hregion.memstore.flush.size / column_family_number" by default
unless value of this property is larger than that. If none of the families
have their memstore size more than lower bound, all the memstores will be
flushed (just as usual).
</description>
</property>
<property>
<name>hbase.hregion.preclose.flush.size</name>
<value>5242880</value>
<description>
If the memstores in a region are this size or larger when we go
to close, run a "pre-flush" to clear out memstores before we put up
the region closed flag and take the region offline. On close,
a flush is run under the close flag to empty memory. During
this time the region is offline and we are not taking on any writes.
If the memstore content is large, this flush could take a long time to
complete. The preflush is meant to clean out the bulk of the memstore
before putting up the close flag and taking the region offline so the
flush that runs under the close flag has little to do.</description>
</property>
<property>
<name>hbase.hregion.memstore.block.multiplier</name>
<value>4</value>
<description>
Block updates if memstore has hbase.hregion.memstore.block.multiplier
times hbase.hregion.memstore.flush.size bytes. Useful preventing
runaway memstore during spikes in update traffic. Without an
upper-bound, memstore fills such that when it flushes the
resultant flush files take a long time to compact or split, or
worse, we OOME.</description>
</property>
<property>
<name>hbase.hregion.memstore.mslab.enabled</name>
<value>true</value>
<description>
Enables the MemStore-Local Allocation Buffer,
a feature which works to prevent heap fragmentation under
heavy write loads. This can reduce the frequency of stop-the-world
GC pauses on large heaps.</description>
</property>
<property>
<name>hbase.hregion.max.filesize</name>
<value>10737418240</value>
<description>
Maximum HFile size. If the sum of the sizes of a region's HFiles has grown to exceed this
value, the region is split in two.</description>
</property>
<property>
<name>hbase.hregion.majorcompaction</name>
<value>604800000</value>
<description>Time between major compactions, expressed in milliseconds. Set to 0 to disable
time-based automatic major compactions. User-requested and size-based major compactions will
still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause
compaction to start at a somewhat-random time during a given window of time. The default value
is 7 days, expressed in milliseconds. If major compactions are causing disruption in your
environment, you can configure them to run at off-peak times for your deployment, or disable
time-based major compactions by setting this parameter to 0, and run major compactions in a
cron job or by another external mechanism.</description>
</property>
<property>
<name>hbase.hregion.majorcompaction.jitter</name>
<value>0.50</value>
<description>A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur
a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number,
the closer the compactions will happen to the hbase.hregion.majorcompaction
interval.</description>
</property>
<property>
<name>hbase.hstore.compactionThreshold</name>
<value>3</value>
<description> If more than this number of StoreFiles exist in any one Store
(one StoreFile is written per flush of MemStore), a compaction is run to rewrite all
StoreFiles into a single StoreFile. Larger values delay compaction, but when compaction does
occur, it takes longer to complete.</description>
</property>
<property>
<name>hbase.hstore.flusher.count</name>
<value>2</value>
<description> The number of flush threads. With fewer threads, the MemStore flushes will be
queued. With more threads, the flushes will be executed in parallel, increasing the load on
HDFS, and potentially causing more compactions. </description>
</property>
<property>
<name>hbase.hstore.blockingStoreFiles</name>
<value>16</value>
<description> If more than this number of StoreFiles exist in any one Store (one StoreFile
is written per flush of MemStore), updates are blocked for this region until a compaction is
completed, or until hbase.hstore.blockingWaitTime has been exceeded.</description>
</property>
<property>
<name>hbase.hstore.blockingWaitTime</name>
<value>90000</value>
<description> The time for which a region will block updates after reaching the StoreFile limit
defined by hbase.hstore.blockingStoreFiles. After this time has elapsed, the region will stop
blocking updates even if a compaction has not been completed.</description>
</property>
<property>
<name>hbase.hstore.compaction.min</name>
<value>3</value>
<description>The minimum number of StoreFiles which must be eligible for compaction before
compaction can run. The goal of tuning hbase.hstore.compaction.min is to avoid ending up with
too many tiny StoreFiles to compact. Setting this value to 2 would cause a minor compaction
each time you have two StoreFiles in a Store, and this is probably not appropriate. If you
set this value too high, all the other values will need to be adjusted accordingly. For most
cases, the default value is appropriate. In previous versions of HBase, the parameter
hbase.hstore.compaction.min was named hbase.hstore.compactionThreshold.</description>
</property>
<property>
<name>hbase.hstore.compaction.max</name>
<value>10</value>
<description>The maximum number of StoreFiles which will be selected for a single minor
compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
hbase.hstore.compaction.max controls the length of time it takes a single compaction to
complete. Setting it larger means that more StoreFiles are included in a compaction. For most
cases, the default value is appropriate.</description>
</property>
<property>
<name>hbase.hstore.compaction.min.size</name>
<value>134217728</value>
<description>A StoreFile (or a selection of StoreFiles, when using ExploringCompactionPolicy)
smaller than this size will always be eligible for minor compaction.
HFiles this size or larger are evaluated by hbase.hstore.compaction.ratio to determine if
they are eligible. Because this limit represents the "automatic include" limit for all
StoreFiles smaller than this value, this value may need to be reduced in write-heavy
environments where many StoreFiles in the 1-2 MB range are being flushed, because every
StoreFile will be targeted for compaction and the resulting StoreFiles may still be under the
minimum size and require further compaction. If this parameter is lowered, the ratio check is
triggered more quickly. This addressed some issues seen in earlier versions of HBase but
changing this parameter is no longer necessary in most situations. Default: 128 MB expressed
in bytes.</description>
</property>
<property>
<name>hbase.hstore.compaction.max.size</name>
<value>9223372036854775807</value>
<description>A StoreFile (or a selection of StoreFiles, when using ExploringCompactionPolicy)
larger than this size will be excluded from compaction. The effect of
raising hbase.hstore.compaction.max.size is fewer, larger StoreFiles that do not get
compacted often. If you feel that compaction is happening too often without much benefit, you
can try raising this value. Default: the value of LONG.MAX_VALUE, expressed in bytes.</description>
</property>
<property>
<name>hbase.hstore.compaction.ratio</name>
<value>1.2F</value>
<description>For minor compaction, this ratio is used to determine whether a given StoreFile
which is larger than hbase.hstore.compaction.min.size is eligible for compaction. Its
effect is to limit compaction of large StoreFiles. The value of hbase.hstore.compaction.ratio
is expressed as a floating-point decimal. A large ratio, such as 10, will produce a single
giant StoreFile. Conversely, a low value, such as .25, will produce behavior similar to the
BigTable compaction algorithm, producing four StoreFiles. A moderate value of between 1.0 and
1.4 is recommended. When tuning this value, you are balancing write costs with read costs.
Raising the value (to something like 1.4) will have more write costs, because you will
compact larger StoreFiles. However, during reads, HBase will need to seek through fewer
StoreFiles to accomplish the read. Consider this approach if you cannot take advantage of
Bloom filters. Otherwise, you can lower this value to something like 1.0 to reduce the
background cost of writes, and use Bloom filters to control the number of StoreFiles touched
during reads. For most cases, the default value is appropriate.</description>
</property>
<property>
<name>hbase.hstore.compaction.ratio.offpeak</name>
<value>5.0F</value>
<description>Allows you to set a different (by default, more aggressive) ratio for determining
whether larger StoreFiles are included in compactions during off-peak hours. Works in the
same way as hbase.hstore.compaction.ratio. Only applies if hbase.offpeak.start.hour and
hbase.offpeak.end.hour are also enabled.</description>
</property>
<property>
<name>hbase.hstore.time.to.purge.deletes</name>
<value>0</value>
<description>The amount of time to delay purging of delete markers with future timestamps. If
unset, or set to 0, all delete markers, including those with future timestamps, are purged
during the next major compaction. Otherwise, a delete marker is kept until the major compaction
which occurs after the marker's timestamp plus the value of this setting, in milliseconds.
</description>
</property>
<property>
<name>hbase.offpeak.start.hour</name>
<value>-1</value>
<description>The start of off-peak hours, expressed as an integer between 0 and 23, inclusive.
Set to -1 to disable off-peak.</description>
</property>
<property>
<name>hbase.offpeak.end.hour</name>
<value>-1</value>
<description>The end of off-peak hours, expressed as an integer between 0 and 23, inclusive. Set
to -1 to disable off-peak.</description>
</property>
<property>
<name>hbase.regionserver.thread.compaction.throttle</name>
<value>2684354560</value>
<description>There are two different thread pools for compactions, one for large compactions and
the other for small compactions. This helps to keep compaction of lean tables (such as
hbase:meta) fast. If a compaction is larger than this threshold, it
goes into the large compaction pool. In most cases, the default value is appropriate. Default:
2 x hbase.hstore.compaction.max x hbase.hregion.memstore.flush.size (which defaults to 128MB).
The value field assumes that the value of hbase.hregion.memstore.flush.size is unchanged from
the default.</description>
</property>
<property>
<name>hbase.regionserver.majorcompaction.pagecache.drop</name>
<value>true</value>
<description>Specifies whether to drop pages read/written into the system page cache by
major compactions. Setting it to true helps prevent major compactions from
polluting the page cache, which is almost always required, especially for clusters
with low/moderate memory to storage ratio.</description>
</property>
<property>
<name>hbase.regionserver.minorcompaction.pagecache.drop</name>
<value>true</value>
<description>Specifies whether to drop pages read/written into the system page cache by
minor compactions. Setting it to true helps prevent minor compactions from
polluting the page cache, which is most beneficial on clusters with low
memory to storage ratio or very write heavy clusters. You may want to set it to
false under moderate to low write workload when bulk of the reads are
on the most recently written data.</description>
</property>
<property>
<name>hbase.hstore.compaction.kv.max</name>
<value>10</value>
<description>The maximum number of KeyValues to read and then write in a batch when flushing or
compacting. Set this lower if you have big KeyValues and problems with Out Of Memory
Exceptions Set this higher if you have wide, small rows. </description>
</property>
<property>
<name>hbase.storescanner.parallel.seek.enable</name>
<value>false</value>
<description>
Enables StoreFileScanner parallel-seeking in StoreScanner,
a feature which can reduce response latency under special conditions.</description>
</property>
<property>
<name>hbase.storescanner.parallel.seek.threads</name>
<value>10</value>
<description>
The default thread pool size if parallel-seeking feature enabled.</description>
</property>
<property>
<name>hfile.block.cache.size</name>
<value>0.4</value>
<description>Percentage of maximum heap (-Xmx setting) to allocate to block cache
used by a StoreFile. Default of 0.4 means allocate 40%.
Set to 0 to disable but it's not recommended; you need at least
enough cache to hold the storefile indices.</description>
</property>
<property>
<name>hfile.block.index.cacheonwrite</name>
<value>false</value>
<description>This allows to put non-root multi-level index blocks into the block
cache at the time the index is being written.</description>
</property>
<property>
<name>hfile.index.block.max.size</name>
<value>131072</value>
<description>When the size of a leaf-level, intermediate-level, or root-level
index block in a multi-level block index grows to this size, the
block is written out and a new block is started.</description>
</property>
<property>
<name>hbase.bucketcache.ioengine</name>
<value></value>
<description>Where to store the contents of the bucketcache. One of: offheap,
file, files or mmap. If a file or files, set it to file(s):PATH_TO_FILE.
mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
See http://hbase.apache.org/book.html#offheap.blockcache for more information.
</description>
</property>
<property>
<name>hbase.bucketcache.size</name>
<value></value>
<description>A float that EITHER represents a percentage of total heap memory
size to give to the cache (if < 1.0) OR, it is the total capacity in
megabytes of BucketCache. Default: 0.0</description>
</property>
<property>
<name>hbase.bucketcache.bucket.sizes</name>
<value></value>
<description>A comma-separated list of sizes for buckets for the bucketcache.
Can be multiple sizes. List block sizes in order from smallest to largest.
The sizes you use will depend on your data access patterns.
Must be a multiple of 256 else you will run into
'java.io.IOException: Invalid HFile block magic' when you go to read from cache.
If you specify no values here, then you pick up the default bucketsizes set
in code (See BucketAllocator#DEFAULT_BUCKET_SIZES).
</description>
</property>
<property>
<name>hfile.format.version</name>
<value>3</value>
<description>The HFile format version to use for new files.
Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags).
Also see the configuration 'hbase.replication.rpc.codec'.
</description>
</property>
<property>
<name>hfile.block.bloom.cacheonwrite</name>
<value>false</value>
<description>Enables cache-on-write for inline blocks of a compound Bloom filter.</description>
</property>
<property>
<name>io.storefile.bloom.block.size</name>
<value>131072</value>
<description>The size in bytes of a single block ("chunk") of a compound Bloom
filter. This size is approximate, because Bloom blocks can only be
inserted at data block boundaries, and the number of keys per data
block varies.</description>
</property>
<property>
<name>hbase.rs.cacheblocksonwrite</name>
<value>false</value>
<description>Whether an HFile block should be added to the block cache when the
block is finished.</description>
</property>
<property>
<name>hbase.rpc.timeout</name>
<value>60000</value>
<description>This is for the RPC layer to define how long (millisecond) HBase client applications
take for a remote call to time out. It uses pings to check connections
but will eventually throw a TimeoutException.</description>
</property>
<property>
<name>hbase.client.operation.timeout</name>
<value>1200000</value>
<description>Operation timeout is a top-level restriction (millisecond) that makes sure a
blocking operation in Table will not be blocked more than this. In each operation, if rpc
request fails because of timeout or other reason, it will retry until success or throw
RetriesExhaustedException. But if the total time being blocking reach the operation timeout
before retries exhausted, it will break early and throw SocketTimeoutException.</description>
</property>
<property>
<name>hbase.cells.scanned.per.heartbeat.check</name>
<value>10000</value>
<description>The number of cells scanned in between heartbeat checks. Heartbeat
checks occur during the processing of scans to determine whether or not the
server should stop scanning in order to send back a heartbeat message to the
client. Heartbeat messages are used to keep the client-server connection alive
during long running scans. Small values mean that the heartbeat checks will
occur more often and thus will provide a tighter bound on the execution time of
the scan. Larger values mean that the heartbeat checks occur less frequently
</description>
</property>
<property>