Skip to content
This repository was archived by the owner on May 4, 2021. It is now read-only.

Commit 5ca127d

Browse files
committed
Add a docker file for 0.10.3 that is literally copied from 0.10.
1 parent a14bb6b commit 5ca127d

File tree

4 files changed

+683
-0
lines changed

4 files changed

+683
-0
lines changed

0.10.3/Dockerfile

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
FROM tutum/curl:trusty
2+
MAINTAINER François-Guillaume Ribreau <[email protected]>
3+
4+
# Install InfluxDB
5+
ENV INFLUXDB_VERSION 0.10.3-1
6+
RUN curl -s -o /tmp/influxdb_latest_amd64.deb https://s3.amazonaws.com/influxdb/influxdb_${INFLUXDB_VERSION}_amd64.deb && \
7+
dpkg -i /tmp/influxdb_latest_amd64.deb && \
8+
rm /tmp/influxdb_latest_amd64.deb && \
9+
rm -rf /var/lib/apt/lists/*
10+
11+
ADD types.db /usr/share/collectd/types.db
12+
ADD config.toml /config/config.toml
13+
ADD run.sh /run.sh
14+
RUN chmod +x /*.sh
15+
16+
ENV PRE_CREATE_DB **None**
17+
ENV SSL_SUPPORT **False**
18+
ENV SSL_CERT **None**
19+
20+
# Admin server WebUI
21+
EXPOSE 8083
22+
23+
# HTTP API
24+
EXPOSE 8086
25+
26+
# Raft port (for clustering, don't expose publicly!)
27+
#EXPOSE 8090
28+
29+
# Protobuf port (for clustering, don't expose publicly!)
30+
#EXPOSE 8099
31+
32+
VOLUME ["/data"]
33+
34+
CMD ["/run.sh"]

0.10.3/config.toml

Lines changed: 265 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,265 @@
1+
### Welcome to the InfluxDB configuration file.
2+
3+
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com
4+
# The data includes raft id (random 8 bytes), os, arch, version, and metadata.
5+
# We don't track ip addresses of servers reporting. This is only used
6+
# to track the number of instances running and the versions, which
7+
# is very helpful for us.
8+
# Change this option to true to disable reporting.
9+
reporting-disabled = false
10+
11+
###
12+
### [meta]
13+
###
14+
### Controls the parameters for the Raft consensus group that stores metadata
15+
### about the InfluxDB cluster.
16+
###
17+
18+
[meta]
19+
# Controls if this node should run the metaservice and participate in the Raft group
20+
enabled = true
21+
22+
dir = "/data/meta"
23+
hostname = "localhost"
24+
bind-address = ":8088"
25+
retention-autocreate = true
26+
election-timeout = "1s"
27+
heartbeat-timeout = "1s"
28+
leader-lease-timeout = "500ms"
29+
commit-timeout = "50ms"
30+
31+
###
32+
### [data]
33+
###
34+
### Controls where the actual shard data for InfluxDB lives and how it is
35+
### flushed from the WAL. "dir" may need to be changed to a suitable place
36+
### for your system, but the WAL settings are an advanced configuration. The
37+
### defaults should work for most systems.
38+
###
39+
40+
[data]
41+
# Controls if this node holds time series data shards in the cluster
42+
enabled = true
43+
dir = "/data/db"
44+
45+
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
46+
# apply to any new shards created after upgrading to a version > 0.9.3.
47+
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
48+
wal-flush-interval = "10m0s" # Maximum time data can sit in WAL before a flush.
49+
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.
50+
51+
# These are the WAL settings for the storage engine >= 0.9.3
52+
wal-dir = "/data/wal"
53+
wal-enable-logging = true
54+
data-logging-enabled = true
55+
56+
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
57+
# flush to the index
58+
# wal-ready-series-size = 25600
59+
60+
# Flush and compact a partition once this ratio of series are over the ready size
61+
# wal-compaction-threshold = 0.6
62+
63+
# Force a flush and compaction if any series in a partition gets above this size in bytes
64+
# wal-max-series-size = 2097152
65+
66+
# Force a flush of all series and full compaction if there have been no writes in this
67+
# amount of time. This is useful for ensuring that shards that are cold for writes don't
68+
# keep a bunch of data cached in memory and in the WAL.
69+
# wal-flush-cold-interval = "10m"
70+
71+
# Force a partition to flush its largest series if it reaches this approximate size in
72+
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory.
73+
# The more memory you have, the bigger this can be.
74+
# wal-partition-size-threshold = 20971520
75+
76+
###
77+
### [cluster]
78+
###
79+
### Controls non-Raft cluster behavior, which generally includes how data is
80+
### shared across shards.
81+
###
82+
[cluster]
83+
write-timeout = "5s" # The time within which a write operation must complete on the cluster.
84+
shard-writer-timeout = "5s" # The time within which a shard must respond to write.
85+
86+
###
87+
### [retention]
88+
###
89+
### Controls the enforcement of retention policies for evicting old data.
90+
###
91+
[retention]
92+
enabled = true
93+
check-interval = "10m0s"
94+
95+
###
96+
### [shard-precreation]
97+
###
98+
### Controls the precreation of shards, so they are available before data arrives.
99+
### Only shards that, after creation, will have both a start- and end-time in the
100+
### future, will ever be created. Shards are never precreated that would be wholly
101+
### or partially in the past.
102+
103+
[shard-precreation]
104+
enabled = true
105+
check-interval = "10m"
106+
advance-period = "30m"
107+
108+
###
109+
### [admin]
110+
###
111+
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
112+
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
113+
###
114+
[admin]
115+
enabled = true
116+
bind-address = ":8083"
117+
https-enabled = false
118+
https-certificate = "/etc/ssl/influxdb.pem"
119+
120+
###
121+
### [http]
122+
###
123+
### Controls how the HTTP endpoints are configured. These are the primary
124+
### mechanism for getting data into and out of InfluxDB.
125+
###
126+
[http]
127+
enabled = true
128+
bind-address = ":8086"
129+
auth-enabled = false
130+
log-enabled = true
131+
write-tracing = false
132+
pprof-enabled = false
133+
https-enabled = false
134+
https-certificate = "/etc/ssl/influxdb.pem"
135+
136+
###
137+
### [[graphite]]
138+
###
139+
### Controls one or many listeners for Graphite data.
140+
###
141+
[[graphite]]
142+
enabled = false
143+
bind-address = ":2003"
144+
protocol = "tcp"
145+
consistency-level = "one"
146+
separator = "."
147+
database = "graphitedb"
148+
# These next lines control how batching works. You should have this enabled
149+
# otherwise you could get dropped metrics or poor performance. Batching
150+
# will buffer points in memory if you have many coming in.
151+
# batch-size = 1000 # will flush if this many points get buffered
152+
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
153+
batch-size = 1000
154+
batch-timeout = "1s"
155+
templates = [
156+
# filter + template
157+
#"*.app env.service.resource.measurement",
158+
# filter + template + extra tag
159+
#"stats.* .host.measurement* region=us-west,agent=sensu",
160+
# default template. Ignore the first graphite component "servers"
161+
"instance.profile.measurement*"
162+
]
163+
164+
###
165+
### [collectd]
166+
###
167+
### Controls the listener for collectd data.
168+
###
169+
[collectd]
170+
enabled = false
171+
# bind-address = ":25826"
172+
# database = "collectd"
173+
# retention-policy = ""
174+
# typesdb = "/usr/share/collectd/types.db"
175+
176+
# These next lines control how batching works. You should have this enabled
177+
# otherwise you could get dropped metrics or poor performance. Batching
178+
# will buffer points in memory if you have many coming in.
179+
180+
# batch-size = 1000 # will flush if this many points get buffered
181+
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
182+
183+
###
184+
### [opentsdb]
185+
###
186+
### Controls the listener for OpenTSDB data.
187+
###
188+
[opentsdb]
189+
enabled = false
190+
# bind-address = ":4242"
191+
# database = "opentsdb"
192+
# retention-policy = ""
193+
# consistency-level = "one"
194+
195+
###
196+
### [[udp]]
197+
###
198+
### Controls the listeners for InfluxDB line protocol data via UDP.
199+
###
200+
201+
[[udp]]
202+
enabled = false
203+
bind-address = ":4444"
204+
database = "udpdb"
205+
206+
# These next lines control how batching works. You should have this enabled
207+
# otherwise you could get dropped metrics or poor performance. Batching
208+
# will buffer points in memory if you have many coming in.
209+
210+
# batch-size = 1000 # will flush if this many points get buffered
211+
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
212+
213+
###
214+
### [monitoring]
215+
###
216+
### Send anonymous usage statistics to m.influxdb.com?
217+
###
218+
[monitoring]
219+
enabled = false
220+
write-interval = "24h"
221+
222+
###
223+
### Controls the system self-monitoring, statistics and diagnostics.
224+
###
225+
### The internal database for monitoring data is created automatically if
226+
### if it does not already exist. The target retention within this database
227+
### is called 'monitor' and is also created with a retention period of 7 days
228+
### and a replication factor of 1, if it does not exist. In all cases the
229+
### this retention policy is configured as the default for the database.
230+
231+
[monitor]
232+
store-enabled = true # Whether to record statistics internally.
233+
store-database = "_internal" # The destination database for recorded statistics
234+
store-interval = "10s" # The interval at which to record statistics
235+
236+
237+
###
238+
### [continuous_queries]
239+
###
240+
### Controls how continuous queries are run within InfluxDB.
241+
###
242+
243+
[continuous_queries]
244+
log-enabled = true
245+
enabled = true
246+
recompute-previous-n = 2
247+
recompute-no-older-than = "10m0s"
248+
compute-runs-per-interval = 10
249+
compute-no-more-than = "2m0s"
250+
251+
###
252+
### [hinted-handoff]
253+
###
254+
### Controls the hinted handoff feature, which allows nodes to temporarily
255+
### store queued data when one node of a cluster is down for a short period
256+
### of time.
257+
###
258+
259+
[hinted-handoff]
260+
enabled = true
261+
dir = "/data/hh"
262+
max-size = 1073741824
263+
max-age = "168h"
264+
retry-rate-limit = 0
265+
retry-interval = "1s"

0 commit comments

Comments
 (0)