|
| 1 | +<?xml version="1.0"?> |
| 2 | +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| 3 | +<!-- |
| 4 | +/** |
| 5 | + * Copyright 2010 The Apache Software Foundation |
| 6 | + * |
| 7 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 8 | + * or more contributor license agreements. See the NOTICE file |
| 9 | + * distributed with this work for additional information |
| 10 | + * regarding copyright ownership. The ASF licenses this file |
| 11 | + * to you under the Apache License, Version 2.0 (the |
| 12 | + * "License"); you may not use this file except in compliance |
| 13 | + * with the License. You may obtain a copy of the License at |
| 14 | + * |
| 15 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 16 | + * |
| 17 | + * Unless required by applicable law or agreed to in writing, software |
| 18 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 19 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 20 | + * See the License for the specific language governing permissions and |
| 21 | + * limitations under the License. |
| 22 | + */ |
| 23 | +--> |
| 24 | +<configuration> |
| 25 | +<property> |
| 26 | + <name>hbase.cluster.distributed</name> |
| 27 | + <value>true</value> |
| 28 | +</property> |
| 29 | + |
| 30 | +<property> |
| 31 | + <name>hbase.rootdir</name> |
| 32 | + <value>hdfs://dwztgame/hbase</value> |
| 33 | +</property> |
| 34 | + |
| 35 | +<property> |
| 36 | + <name>dfs.datanode.max.xcievers</name> |
| 37 | + <value>40960</value> |
| 38 | +</property> |
| 39 | + |
| 40 | +<property> |
| 41 | + <name>hbase.zookeeper.quorum</name> |
| 42 | + <value>node01.dw.ztgame.com:2181,node02.dw.ztgame.com:2181,node03.dw.ztgame.com:2181</value> |
| 43 | +</property> |
| 44 | + |
| 45 | +<property> |
| 46 | + <name>hbase.regionserver.handler.count</name> |
| 47 | + <value>200</value> |
| 48 | + <description>Count of RPC Server instances spun up on RegionServers |
| 49 | + Same property is used by the Master for count of master handlers. |
| 50 | + Default is 10.</description> |
| 51 | +</property> |
| 52 | + |
| 53 | +<property> |
| 54 | + <name>hbase.regionserver.flushlogentries</name> |
| 55 | + <value>500</value> |
| 56 | + <description>Sync the HLog to HDFS when it has accumulated this many |
| 57 | + entries. Default 1. Value is checked on every HLog.hflush</description> |
| 58 | +</property> |
| 59 | + |
| 60 | +<property> |
| 61 | + <name>hbase.regionserver.optionallogflushinterval</name> |
| 62 | + <value>2000</value> |
| 63 | + <description>Sync the HLog to the HDFS after this interval if it has not |
| 64 | + accumulated enough entries to trigger a sync. Default 1 second. Units: |
| 65 | + milliseconds. </description> |
| 66 | +</property> |
| 67 | + |
| 68 | +<property> |
| 69 | + <name>hbase.regionserver.thread.splitcompactcheckfrequency</name> |
| 70 | + <value>600000</value> |
| 71 | + <description>How often a region server runs the split/compaction check. </description> |
| 72 | +</property> |
| 73 | + |
| 74 | +<property> |
| 75 | + <name>hbase.regions.slop</name> |
| 76 | + <value>0</value> |
| 77 | + <description>Rebalance if any regionserver has average + (average * slop) regions. |
| 78 | + Default is 0% slop. </description> |
| 79 | +</property> |
| 80 | + |
| 81 | +<property> |
| 82 | + <name>hbase.server.thread.wakefrequency</name> |
| 83 | + <value>5000</value> |
| 84 | + <description>Time to sleep in between searches for work (in milliseconds). |
| 85 | + Used as sleep interval by service threads such as log roller. </description> |
| 86 | +</property> |
| 87 | + |
| 88 | +<property> |
| 89 | + <name>hbase.hregion.memstore.flush.size</name> |
| 90 | + <value>134217728</value> |
| 91 | + <description>Memstore will be flushed to disk if size of the memstore |
| 92 | + exceeds this number of bytes. Value is checked by a thread that runs |
| 93 | + every hbase.server.thread.wakefrequency.</description> |
| 94 | +</property> |
| 95 | + |
| 96 | +<property> |
| 97 | + <name>hbase.hregion.memstore.block.multiplier</name> |
| 98 | + <value>6</value> |
| 99 | + <description> |
| 100 | + Block updates if memstore has hbase.hregion.block.memstore |
| 101 | + time hbase.hregion.flush.size bytes. Useful preventing |
| 102 | + runaway memstore during spikes in update traffic. Without an |
| 103 | + upper-bound, memstore fills such that when it flushes the |
| 104 | + resultant flush files take a long time to compact or split, or |
| 105 | + worse, we OOME. </description> |
| 106 | +</property> |
| 107 | + |
| 108 | +<property> |
| 109 | + <name>hbase.hregion.memstore.mslab.enabled</name> |
| 110 | + <value>true</value> |
| 111 | + <description> Experimental: Enables the MemStore-Local Allocation Buffer, |
| 112 | + a feature which works to prevent heap fragmentation under |
| 113 | + heavy write loads. This can reduce the frequency of stop-the-world |
| 114 | + GC pauses on large heaps.</description> |
| 115 | +</property> |
| 116 | + |
| 117 | +<property> |
| 118 | + <name>hfile.block.cache.size</name> |
| 119 | + <value>0.2</value> |
| 120 | + <description> Percentage of maximum heap (-Xmx setting) to allocate to block cache |
| 121 | + used by HFile/StoreFile. Default of 0.2 means allocate 20%. |
| 122 | + Set to 0 to disable. </description> |
| 123 | +</property> |
| 124 | + |
| 125 | +<property> |
| 126 | + <name>hbase.regionserver.nbreservationblocks</name> |
| 127 | + <value>10</value> |
| 128 | + <description>The number of resevoir blocks of memory release on |
| 129 | + OOME so we can cleanup properly before server shutdown.</description> |
| 130 | +</property> |
| 131 | + |
| 132 | +<property> |
| 133 | + <name>hbase.regionserver.global.memstore.upperLimit</name> |
| 134 | + <value>0.5</value> |
| 135 | + <description>Maximum size of all memstores in a region server before new |
| 136 | + updates are blocked and flushes are forced. Defaults to 40% of heap</description> |
| 137 | +</property> |
| 138 | + |
| 139 | +<property> |
| 140 | + <name>hbase.regionserver.global.memstore.lowerLimit</name> |
| 141 | + <value>0.4</value> |
| 142 | + <description>When memstores are being forced to flush to make room in |
| 143 | + memory, keep flushing until we hit this mark. Defaults to 35% of heap. |
| 144 | + This value equaggl to hbase.regionserver.global.memstore.upperLimit causes |
| 145 | + the minimum possible flushing to occur when updates are blocked due to |
| 146 | + memstore limiting.</description> |
| 147 | +</property> |
| 148 | + |
| 149 | +<property> |
| 150 | + <name>hbase.hregion.max.filesize</name> |
| 151 | + <value>2684354560</value> |
| 152 | + <description> |
| 153 | + Maximum HStoreFile size. If any one of a column families' HStoreFiles has |
| 154 | + grown to exceed this value, the hosting HRegion is split in two. |
| 155 | + Default: 256M.</description> |
| 156 | +</property> |
| 157 | + |
| 158 | +<property> |
| 159 | + <name>hbase.snapshot.enabled</name> |
| 160 | + <value>true</value> |
| 161 | +</property> |
| 162 | + |
| 163 | +<property> |
| 164 | + <name>hbase.regionserver.regionSplitLimit</name> |
| 165 | + <value>200</value> |
| 166 | + <description>Limit for the number of regions after which no more region |
| 167 | + splitting should take place. This is not a hard limit for the number of |
| 168 | + regions but acts as a guideline for the regionserver to stop splitting after |
| 169 | + a certain limit. Default is set to MAX_INT; i.e. do not block splitting.</description> |
| 170 | +</property> |
| 171 | + |
| 172 | +<property> |
| 173 | + <name>hbase.hstore.compactionThreshold</name> |
| 174 | + <value>4</value> |
| 175 | + <description>If more than this number of HStoreFiles in any one HStore |
| 176 | + (one HStoreFile is written per flush of memstore) then a compaction |
| 177 | + is run to rewrite all HStoreFiles files as one. Larger numbers |
| 178 | + put off compaction but when it runs, it takes longer to complete. </description> |
| 179 | +</property> |
| 180 | + |
| 181 | +<property> |
| 182 | + <name>hbase.hstore.blockingStoreFiles</name> |
| 183 | + <value>12</value> |
| 184 | + <description>If more than this number of StoreFiles in any one Store |
| 185 | + (one StoreFile is written per flush of MemStore) then updates are |
| 186 | + blocked for this HRegion until a compaction is completed, or |
| 187 | + until hbase.hstore.blockingWaitTime has been exceeded. </description> |
| 188 | +</property> |
| 189 | + |
| 190 | +<property> |
| 191 | + <name>hbase.hstore.compaction.max</name> |
| 192 | + <value>6</value> |
| 193 | + <description>Max number of HStoreFiles to compact per 'minor' compaction.</description> |
| 194 | +</property> |
| 195 | + |
| 196 | +<property> |
| 197 | + <name>hbase.hregion.majorcompaction</name> |
| 198 | + <value>172800000</value> |
| 199 | + <description>The time (in miliseconds) between 'major' compactions of all |
| 200 | + HStoreFiles in a region. Default: 1 day. |
| 201 | + .set to 0 to disable automated major compactions. </description> |
| 202 | +</property> |
| 203 | + |
| 204 | +<property> |
| 205 | + <name>io.storefile.bloom.enabled</name> |
| 206 | + <value>true</value> |
| 207 | +</property> |
| 208 | + |
| 209 | +<property> |
| 210 | + <name>hbase.replication</name> |
| 211 | + <value>true</value> |
| 212 | +</property> |
| 213 | + |
| 214 | +</configuration> |
0 commit comments