Skip to content

Commit

Permalink
Proper upgrade
Browse files Browse the repository at this point in the history
  • Loading branch information
deniszh committed Mar 6, 2018
1 parent cfa4124 commit 91fed94
Show file tree
Hide file tree
Showing 4 changed files with 206 additions and 13 deletions.
4 changes: 1 addition & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,12 @@ RUN wget https://github.com/lomik/go-carbon/releases/download/v0.12.0-rc1/go-car

# install grafana
ADD conf/etc/grafana/grafana.ini /etc/grafana/grafana.ini
ADD conf/etc/grafana/provisioning/datasources/carbonapi.yaml /etc/grafana/provisioning/datasources/carbonapi.yaml
RUN wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.0.0_amd64.deb \
&& dpkg -i grafana_5.0.0_amd64.deb \
&& rm /grafana_5.0.0_amd64.deb \
&& service grafana-server restart \
&& sleep 5 \
&& curl -X POST -H 'Content-Type: application/json' -u 'admin:admin' \
-d '{ "name": "carbonapi", "type": "graphite", "url": "http://127.0.0.1:8081", "access": "proxy", "basicAuth": false }' \
"http://127.0.0.1:3000/api/datasources" \
&& service grafana-server stop \
&& mkdir -p /usr/share/grafana/data \
&& mv -fv /var/lib/grafana/* /usr/share/grafana/data
Expand Down
51 changes: 41 additions & 10 deletions conf/etc/carbonapi/carbonapi.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,3 @@
# Need to be URL, http or https
# This url specifies the backend or a loadbalancer
#
# Is you are using carbonzipper you should set it to
# zipper's url
#
# If you are using plain go-carbon or graphite-clickhouse
# you should set it to URL of go-carbon's carbonserver module
# or graphite-clickhouse's http url.
zipper: "http://localhost:8080"
# Listen address, should always include hostname or ip address and a port.
listen: "localhost:8081"
# Max concurrent requests to CarbonZipper
Expand Down Expand Up @@ -54,6 +44,47 @@ graphite:
# Maximium idle connections to carbonzipper
idleConnections: 10
pidFile: ""
upstreams:
# Number of 100ms buckets to track request distribution in. Used to build
# 'carbon.zipper.hostname.requests_in_0ms_to_100ms' metric and friends.
# Requests beyond the last bucket are logged as slow
# (default of 10 implies "slow" is >1 second).
buckets: 10
timeouts:
# Maximum total backend requesting timeout in ms.
# ( How long we may spend making requests. )
global: "15s"
# Timeout, in ms, once the final backend has been contacted.
# ( [Effectively] How long we'll wait for the slowest response. )
afterStarted: "10s"
# Timeout to connect to the server
connect: "200ms"
# Number of concurrent requests to any given backend - default is no limit.
# If set, you likely want >= MaxIdleConnsPerHost
concurrencyLimit: 0
# Configures how often keep alive packets will be sent out
keepAliveInterval: "30s"
# Control http.MaxIdleConnsPerHost. Large values can lead to more idle
# connections on the backend servers which may bump into limits; tune with care.
maxIdleConnsPerHost: 100
# "http://host:port" array of instances of carbonserver stores
# This is the *ONLY* config element in this section that MUST be specified.
# loclhost:8080 - go-carbon
backends:
- "http://localhost:8080"
carbonsearch:
# Instance of carbonsearch backend
backend: "http://127.0.0.1:8070"
# carbonsearch prefix to reserve/register
prefix: "virt.v1.*"
# Enable compatibility with graphite-web 0.9
# This will affect graphite-web 1.0+ with multiple cluster_servers
# Default: disabled
graphite09compat: false
# If not zero, enabled cache for find requests
# This parameter controls when it will expire (in seconds)
# Default: 600 (10 minutes)
expireDelaySec: 0
# See https://github.com/go-graphite/carbonzipper/blob/master/example.conf#L70-L108 for format explanation
logger:
- logger: ""
Expand Down
122 changes: 122 additions & 0 deletions conf/etc/go-carbon/go-carbon.conf
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,134 @@ enabled = true
# Optional internal queue between receiver and cache
buffer-size = 0

# You can define unlimited count of additional receivers
# Common definition scheme:
# [receiver.<any receiver name>]
# protocol = "<any supported protocol>"
# <protocol specific options>
#
# All available protocols:
#
# [receiver.udp2]
# protocol = "udp"
# listen = ":2003"
# # Enable optional logging of incomplete messages (chunked by max UDP packet size)
# log-incomplete = false
#
# [receiver.tcp2]
# protocol = "tcp"
# listen = ":2003"
#
# [receiver.pickle2]
# protocol = "pickle"
# listen = ":2004"
# # Limit message size for prevent memory overflow
# max-message-size = 67108864
#
# [receiver.protobuf]
# protocol = "protobuf"
# # Same framing protocol as pickle, but message encoded in protobuf format
# # See https://github.com/lomik/go-carbon/blob/master/helper/carbonpb/carbon.proto
# listen = ":2005"
# # Limit message size for prevent memory overflow
# max-message-size = 67108864
#
# [receiver.http]
# protocol = "http"
# # This receiver receives data from POST requests body.
# # Data can be encoded in plain text format (default),
# # protobuf (with Content-Type: application/protobuf header) or
# # pickle (with Content-Type: application/python-pickle header).
# listen = ":2007"
# max-message-size = 67108864
#
# [receiver.kafka]
# protocol = "kafka
# # This receiver receives data from kafka
# # You can use Partitions and Topics to do sharding
# # State is saved in local file to avoid problems with multiple consumers
#
# # Encoding of messages
# # Available options: "plain" (default), "protobuf", "pickle"
# # Please note that for "plain" you must pass metrics with leading "\n".
# # e.x.
# # echo "test.metric $(date +%s) $(date +%s)" | kafkacat -D $'\0' -z snappy -T -b localhost:9092 -t graphite
# parse-protocol = "protobuf"
# # Kafka connection parameters
# brokers = [ "host1:9092", "host2:9092" ]
# topic = "graphite"
# partition = 0
#
# # Specify how often receiver will try to connect to kafka in case of network problems
# reconnect-interval = "5m"
# # How often receiver will ask Kafka for new data (in case there was no messages available to read)
# fetch-interval = "200ms"
#
# # Path to saved kafka state. Used for restarts
# state-file = "/var/lib/graphite/kafka.state"
# # Initial offset, if there is no saved state. Can be relative time or "newest" or "oldest".
# # In case offset is unavailable (in future, etc) fallback is "oldest"
# initial-offset = "-30m"
#
# # Specify kafka feature level (default: 0.11.0.0).
# # Please note that some features (consuming lz4 compressed streams) requires kafka >0.11
# # You must specify version in full. E.x. '0.11.0.0' - ok, but '0.11' is not.
# # Supported version (as of 22 Jan 2018):
# # 0.8.2.0
# # 0.8.2.1
# # 0.8.2.2
# # 0.9.0.0
# # 0.9.0.1
# # 0.10.0.0
# # 0.10.0.1
# # 0.10.1.0
# # 0.10.2.0
# # 0.11.0.0
# # 1.0.0
# kafka-version = "0.11.0.0"
#
# [receiver.pubsub]
# # This receiver receives data from Google PubSub
# # - Authentication is managed through APPLICATION_DEFAULT_CREDENTIALS:
# # - https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application
# # - Currently the subscription must exist before running go-carbon.
# # - The "receiver_*" settings are optional and directly map to the google pubsub
# # libraries ReceiveSettings (https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings)
# # - How to think about the "receiver_*" settings: In an attempt to maximize throughput the
# # pubsub library will spawn 'receiver_go_routines' to fetch messages from the server.
# # These goroutines simply buffer them into memory until 'receiver_max_messages' or 'receiver_max_bytes'
# # have been read. This does not affect the actual handling of these messages which are processed by other goroutines.
# protocol = "pubsub"
# project = "project-name"
# subscription = "subscription-name"
# receiver_go_routines = 4
# receiver_max_messages = 1000
# receiver_max_bytes = 500000000 # default 500MB

[carbonlink]
listen = "127.0.0.1:7002"
enabled = false
# Close inactive connections after "read-timeout"
read-timeout = "30s"

# grpc api
# protocol: https://github.com/lomik/go-carbon/blob/master/helper/carbonpb/carbon.proto
# samples: https://github.com/lomik/go-carbon/tree/master/api/sample
[grpc]
listen = "127.0.0.1:7003"
enabled = true

# http://graphite.readthedocs.io/en/latest/tags.html
[tags]
enabled = false
# TagDB url. It should support /tags/tagMultiSeries endpoint
tagdb-url = "http://127.0.0.1:8000"
tagdb-chunk-size = 32
# Directory for send queue (based on leveldb)
local-dir = "/var/lib/graphite/tagging/"
# POST timeout
tagdb-timeout = "1s"

[carbonserver]
# Please NOTE: carbonserver is not intended to fully replace graphite-web
# It acts as a "REMOTE_STORAGE" for graphite-web or carbonzipper/carbonapi
Expand Down
42 changes: 42 additions & 0 deletions conf/etc/grafana/provisioning/datasources/carbonapi.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# config file version
apiVersion: 1

## list of datasources that should be deleted from the database
#deleteDatasources:
# - name: Graphite
# orgId: 1

# list of datasources to insert/update depending
# whats available in the database
datasources:
# <string, required> name of the datasource. Required
- name: carbonapi
# <string, required> datasource type. Required
type: graphite
# <string, required> access mode. direct or proxy. Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> url
url: http://127.0.0.1:8081
# <string> database password, if used
password:
# <string> database user, if used
user:
# <string> database name, if used
database:
# <bool> enable/disable basic auth
basicAuth:
# <string> basic auth username
basicAuthUser:
# <string> basic auth password
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault: true
# <map> fields that will be converted to json and stored in json_data
jsonData:
graphiteVersion: "1.0"
# <bool> allow users to edit datasources from the UI.
editable: true

0 comments on commit 91fed94

Please sign in to comment.