@@ -57,12 +57,134 @@ enabled = true
5757# Optional internal queue between receiver and cache
5858buffer-size = 0
5959
60+ # You can define unlimited count of additional receivers
61+ # Common definition scheme:
62+ # [receiver.<any receiver name>]
63+ # protocol = "<any supported protocol>"
64+ # <protocol specific options>
65+ #
66+ # All available protocols:
67+ #
68+ # [receiver.udp2]
69+ # protocol = "udp"
70+ # listen = ":2003"
71+ # # Enable optional logging of incomplete messages (chunked by max UDP packet size)
72+ # log-incomplete = false
73+ #
74+ # [receiver.tcp2]
75+ # protocol = "tcp"
76+ # listen = ":2003"
77+ #
78+ # [receiver.pickle2]
79+ # protocol = "pickle"
80+ # listen = ":2004"
81+ # # Limit message size for prevent memory overflow
82+ # max-message-size = 67108864
83+ #
84+ # [receiver.protobuf]
85+ # protocol = "protobuf"
86+ # # Same framing protocol as pickle, but message encoded in protobuf format
87+ # # See https://github.com/lomik/go-carbon/blob/master/helper/carbonpb/carbon.proto
88+ # listen = ":2005"
89+ # # Limit message size for prevent memory overflow
90+ # max-message-size = 67108864
91+ #
92+ # [receiver.http]
93+ # protocol = "http"
94+ # # This receiver receives data from POST requests body.
95+ # # Data can be encoded in plain text format (default),
96+ # # protobuf (with Content-Type: application/protobuf header) or
97+ # # pickle (with Content-Type: application/python-pickle header).
98+ # listen = ":2007"
99+ # max-message-size = 67108864
100+ #
101+ # [receiver.kafka]
102+ # protocol = "kafka
103+ # # This receiver receives data from kafka
104+ # # You can use Partitions and Topics to do sharding
105+ # # State is saved in local file to avoid problems with multiple consumers
106+ #
107+ # # Encoding of messages
108+ # # Available options: "plain" (default), "protobuf", "pickle"
109+ # # Please note that for "plain" you must pass metrics with leading "\n".
110+ # # e.x.
111+ # # echo "test.metric $(date +%s) $(date +%s)" | kafkacat -D $'\0' -z snappy -T -b localhost:9092 -t graphite
112+ # parse-protocol = "protobuf"
113+ # # Kafka connection parameters
114+ # brokers = [ "host1:9092", "host2:9092" ]
115+ # topic = "graphite"
116+ # partition = 0
117+ #
118+ # # Specify how often receiver will try to connect to kafka in case of network problems
119+ # reconnect-interval = "5m"
120+ # # How often receiver will ask Kafka for new data (in case there was no messages available to read)
121+ # fetch-interval = "200ms"
122+ #
123+ # # Path to saved kafka state. Used for restarts
124+ # state-file = "/var/lib/graphite/kafka.state"
125+ # # Initial offset, if there is no saved state. Can be relative time or "newest" or "oldest".
126+ # # In case offset is unavailable (in future, etc) fallback is "oldest"
127+ # initial-offset = "-30m"
128+ #
129+ # # Specify kafka feature level (default: 0.11.0.0).
130+ # # Please note that some features (consuming lz4 compressed streams) requires kafka >0.11
131+ # # You must specify version in full. E.x. '0.11.0.0' - ok, but '0.11' is not.
132+ # # Supported version (as of 22 Jan 2018):
133+ # # 0.8.2.0
134+ # # 0.8.2.1
135+ # # 0.8.2.2
136+ # # 0.9.0.0
137+ # # 0.9.0.1
138+ # # 0.10.0.0
139+ # # 0.10.0.1
140+ # # 0.10.1.0
141+ # # 0.10.2.0
142+ # # 0.11.0.0
143+ # # 1.0.0
144+ # kafka-version = "0.11.0.0"
145+ #
146+ # [receiver.pubsub]
147+ # # This receiver receives data from Google PubSub
148+ # # - Authentication is managed through APPLICATION_DEFAULT_CREDENTIALS:
149+ # # - https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application
150+ # # - Currently the subscription must exist before running go-carbon.
151+ # # - The "receiver_*" settings are optional and directly map to the google pubsub
152+ # # libraries ReceiveSettings (https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings)
153+ # # - How to think about the "receiver_*" settings: In an attempt to maximize throughput the
154+ # # pubsub library will spawn 'receiver_go_routines' to fetch messages from the server.
155+ # # These goroutines simply buffer them into memory until 'receiver_max_messages' or 'receiver_max_bytes'
156+ # # have been read. This does not affect the actual handling of these messages which are processed by other goroutines.
157+ # protocol = "pubsub"
158+ # project = "project-name"
159+ # subscription = "subscription-name"
160+ # receiver_go_routines = 4
161+ # receiver_max_messages = 1000
162+ # receiver_max_bytes = 500000000 # default 500MB
163+
60164[carbonlink]
61165listen = "127.0.0.1:7002"
62166enabled = false
63167# Close inactive connections after "read-timeout"
64168read-timeout = "30s"
65169
170+ # grpc api
171+ # protocol: https://github.com/lomik/go-carbon/blob/master/helper/carbonpb/carbon.proto
172+ # samples: https://github.com/lomik/go-carbon/tree/master/api/sample
173+ [grpc]
174+ listen = "127.0.0.1:7003"
175+ enabled = true
176+
177+ # http://graphite.readthedocs.io/en/latest/tags.html
178+ [tags]
179+ enabled = false
180+ # TagDB url. It should support /tags/tagMultiSeries endpoint
181+ tagdb-url = "http://127.0.0.1:8000"
182+ tagdb-chunk-size = 32
183+ # Directory for send queue (based on leveldb)
184+ local-dir = "/var/lib/graphite/tagging/"
185+ # POST timeout
186+ tagdb-timeout = "1s"
187+
66188[carbonserver]
67189# Please NOTE: carbonserver is not intended to fully replace graphite-web
68190# It acts as a "REMOTE_STORAGE" for graphite-web or carbonzipper/carbonapi
0 commit comments