Commit 79d38588 authored by Aleksey Shirokih's avatar Aleksey Shirokih
Browse files

cleanup

parent 2fd9edcd
[flake8]
exclude=.hg,var,share
# E501 line too long
# Not a real bug
ignore=E501
[branchname]
master = default
develop = develop
feature = feature/
release = release/
hotfix = hotfix/
support = support/
##
## Do not forget to sync with main/management/commands/build-manifest.py
##
syntax: regexp
^custom/
syntax: glob
*.pyc
*.pyo
*.swp
*.orig
dist/
build/
noc.egg-info/
bin/*
etc/**
include/*
lib/python*
lib64
local/*
static/doc/*
contrib/lib/*
contrib/bin/*
contrib/share/*
.*
ext_*
etc/requirements/local.txt
static/pkg/
share/
static/test/
pip-selfcheck.json
var/
scripts/custom/*
django/*
ui/pkg/
ansible/vars/local.yml
etc/etl.yml
speedup/*.c
speedup/*.so
ui/bi/**
ui/bi2/**
collections/
ee473f8f46cf1f65a3b3ca8a159bb4d4b9504285 0.1RC2
2258e819c610211d5590ddadb3c7c287554adb49 0.1
29296ebf54285653f4781d366522b2b1a8a1c987 0.1.1
d118759463a69c226cd1a491161762eb5f4e9834 0.1.2
fa98d6055dbb983a1579b284fa9351881fcea46a 0.1.3
c5766aef49177db6bdfad8d34af5c3c8d57b8794 0.1.4
3b077b52923b24c11d7682a9ac5373b2f73e4b2b 0.1.5
3c7a17369381a0bf3dde3547635812f463b5a7c9 0.1.6
6b71553c1b5ea3534ba465fd6733656a80889091 0.1.7
6b71553c1b5ea3534ba465fd6733656a80889091 0.1.7
0000000000000000000000000000000000000000 0.1.7
0000000000000000000000000000000000000000 0.1.7
bdf0614baa06ed6565d60c55ea5606d33f1b7e49 0.1.7
22f85c9ff7f8bc2786b9f4cb70acc819a1e5e874 0.1.8
6a8675160773e3afe54b00a772a5cb56e4e0f5b3 0.1.9
e79a1380cb38abd55b6ebeaf9d016222d2ec88a3 0.2
5e5aa3afc9b499f624e76b48651dcae2ffcfdcd0 0.2.2
64488e3ef8f99c5eddb4a8739f3eb6488c3392fe 0.2.3
196b518f039e9f9c6c2759fefc2be74459d0c687 0.2.4
87f4d2c8c92be742cb28c518bf8db86244fab2a9 0.2.5
6a573aed34eb6a47c3e3fa5a92c30968e762133f 0.2.6
60c7b0c44c59bdb79d477c2f5f48bab46e0871eb 0.2.7
f6e6084b6e2a9f77a1e82e7af9976f5759ebfd75 0.3
1c5c252bc795bf56b89eaf419128282defd94388 0.3.1
73a1fbcc2ab7b96d0a13b346c591fad5c6c210ad 0.3.2
b9fe85cf2f0af7cb6601fad779d50579894adda1 0.4
29129c079fa0e06b065d1c64ad528e500b35ad51 0.5
3596ea5a65edd765246a5efe3c679e32d6c73efd 0.5.1
bf8752bb66381394bf7d917f14f47c7ac944440d 0.6
f29ace0e609a70c8118c7c7a7a0e0e5d07e9575d 0.6.1
0fd366d49ce3c44dd141ff6664c44443d2bea88a 0.6.2
79cd076e4f6d0241a43cb09554a5f1a0d1a83f23 0.6.3
9134be6113f5ae48af7e2c328e4e2c58b80472f5 0.6.4
0f09cff51d5b85ea1964517090886f05403b5d93 0.7(1)
eaa0b4f4ed8347348464593d4e68868bf602bc29 0.7(2)
bb27bfde582a6aab451ecefd605ba2e1cbc918da 0.7(3)
8b472b71153b24bcf9762368ec98c15cb7a39e94 0.7(4)
7a59a46584707e9dc8550e06ffd4c50115d78c8a 0.8
8aba93363a51451c8f298c575b2791b3d2159db8 pip6
1d4301d472df285cd31408c96ecb89b538700c78 0.8-requirements
0281dceed3dc04ee0b7200a9b1987810053e52bd 15.05
60b79787e1d3719f81422153ef0f7bc0f3672a0f 15.05.1
{
"service": {
"name": "clickhouse",
"tags": [ ],
"port": {{clickhouse_http_port}},
"enableTagOverride": false,
"check": {
"http": "http://localhost:{{clickhouse_http_port}}/",
"interval": "10s"
}
}
}
<?xml version="1.0"?>
<yandex>
<logger>
<level>Information</level>
<log>{{ ch_log_dir }}/clickhouse-server.log</log>
<errorlog>{{ ch_log_dir }}/clickhouse-server.err.log</errorlog>
<size>10M</size>
<count>10</count>
</logger>
<http_port>8123</http_port>
<tcp_port>9000</tcp_port>
<!-- Port for communication between replicas. Used for data exchange. -->
<interserver_http_port>9009</interserver_http_port>
<listen_host>0.0.0.0</listen_host>
<include_from>{{ ch_etc_path }}/noc.xml</include_from>
<max_connections>4096</max_connections>
<keep_alive_timeout>3</keep_alive_timeout>
<!-- Maximum number of concurrent queries. -->
<max_concurrent_queries>100</max_concurrent_queries>
<max_query_size>{{ clickhouse_max_query_size }}</max_query_size>
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
correct maximum value. -->
<max_open_files>262144</max_open_files>
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
Uncompressed cache is advantageous only for very short queries and in rare cases.
-->
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<!-- Approximate size of mark cache, used in tables of MergeTree family.
In bytes. Cache is single for server. Memory is allocated only on demand.
You should not lower this value.
-->
<mark_cache_size>5368709120</mark_cache_size>
<!-- Path to data directory, with trailing slash. -->
<path>{{ ch_data_dir }}/</path>
<!-- Path to temporary data for processing hard queries. -->
<tmp_path>{{ch_big_query_dir}}/</tmp_path>
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
<users_config>users.xml</users_config>
<!-- Default profile of settings.. -->
<default_profile>default</default_profile>
<!-- Default database. -->
<default_database>default</default_database>
<timezone>Europe/Moscow</timezone>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.yandex/reference_en.html#Distributed
-->
<remote_servers incl="clickhouse_remote_servers" />
<zookeeper incl="zookeeper-servers" optional="true" />
<!-- Substitutions for parameters of replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
-->
<macros incl="macros" optional="true" />
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
<!-- Query log. Used only for queries with setting log_queries = 1. -->
<query_log>
<!-- What table to insert data. If table is not exist, it will be created.
When query log structure is changed after system update,
then old table will be renamed and new table will be created automatically.
-->
<database>system</database>
<table>query_log</table>
<!-- Interval of flushing data. -->
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<!-- Uncomment if use part_log
<part_log>
<database>system</database>
<table>part_log</table>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
-->
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
-->
<!-- Path to file with region hierarchy. -->
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
<!-- Path to directory with files containing names of regions -->
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
<!-- Configuration of external dictionaries. See:
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
-->
<dictionaries_config>{{ ch_dictionaries_path }}/*.xml</dictionaries_config>
<!-- Uncomment if you want data to be compressed 30-100% better.
Don't do that if you just started using ClickHouse.
-->
<compression incl="clickhouse_compression">
<case>
<min_part_size>10000000000</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio>
<method>zstd</method>
</case>
</compression>
<resharding>
<task_queue_path>/clickhouse/task_queue</task_queue_path>
</resharding>
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
<!--
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
-->
<!-- Protection from accidental DROP.
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
Uncomment to disable protection.
-->
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
</yandex>
<?xml version="1.0"?>
<yandex>
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
<use_uncompressed_cache>0</use_uncompressed_cache>
<load_balancing>random</load_balancing>
<max_query_size>{{ clickhouse_max_query_size }}</max_query_size>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
<max_query_size>{{ clickhouse_max_query_size }}</max_query_size>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<password></password>
<networks incl="networks" replace="replace">
<ip>127.0.0.1</ip>
{% for n in groups['svc-clickhouse'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
</default>
<{{ clickhouse_user }}>
<password_sha256_hex>{{ clickhouse_password | hash('sha256')}}</password_sha256_hex>
<!-- <password></password> -->
<networks incl="networks" replace="replace">
<ip>127.0.0.1</ip>
{% for n in groups['svc-clickhouse'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
{% for n in groups['svc-scheduler'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
</{{ clickhouse_user }}>
<!-- Example of user with readonly access. -->
<readonly>
<password_sha256_hex>{{ clickhouse_ro_password | hash('sha256')}}</password_sha256_hex>
<!-- <password></password> -->
<networks incl="networks" replace="replace">
<ip>127.0.0.1</ip>
{% for n in groups['svc-grafana'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
{% for n in groups['svc-bi'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
{% for n in groups['svc-web'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
{% for n in groups['svc-haproxy'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
<ip>{{tower_ip}}</ip>
{% for n in groups['svc-scheduler'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
{% for n in groups['svc-discovery'] %}
<ip>{{hostvars[n].ansible_host}}</ip>
{% endfor %}
</networks>
<profile>readonly</profile>
<quota>default</quota>
</readonly>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</yandex>
#!/bin/sh
# PROVIDE: consul_template
# REQUIRE: LOGIN
# KEYWORD: shutdown
# {{ ansible_managed }}
. /etc/rc.subr
name="consul_template"
rcvar=consul_template_enable
load_rc_config $name
: "${consul_template_enable="NO"}"
: "${consul_template_users="{{ consul_user }}"}"
pidfile="/var/run/consul/consul-template.pid"
command="{{ consul_template_bin_path }}/consul-template"
command_args="-config {{consul_template_config_dir}}/ -pid-file=${pidfile} -kill-signal=SIGTERM &"
start_precmd="${name}_prestart"
extra_commands="reload"
consul_template_prestart()
{
install -d /var/run/consul
chown -R "{{ consul_user }}:{{ consul_group }}" /var/run/consul/
}
run_rc_command "$1"
# {{ ansible_managed }}
consul = {
address = "{{ consul_template_consul_server }}:{{ consul_template_consul_port }}"
}
{% if consul_template_log_level -%}
log_level = "{{consul_template_log_level}}"
{% endif %}
{% if consul_template_wait is defined %}
wait = "{{consul_template_wait}}"
{% endif %}
\ No newline at end of file
[Unit]
Description=Consul-Template Daemon
Wants=basic.target
After=basic.target network.target
[Service]
Environment=HOSTNAME={{ ansible_nodename }}
User=root
Group=root
ExecStart={{ consul_template_bin_path }}/consul-template -config {{consul_template_config_dir}}{% if consul_template_dedup %} -dedup{% endif %}
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
RestartSec=10s
LimitNOFILE=4096
KillSignal=SIGINT
[Install]
WantedBy=multi-user.target
# {{ ansible_managed }}
template {
source = "{{consul_template_ch_dictionaries_dir}}/administrativedomain.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/administrativedomain.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/alarmclass.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/alarmclass.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/container.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/container.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/interfaceprofile.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/interfaceprofile.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/managedobject.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/managedobject.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/networksegment.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/networksegment.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/platform.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/platform.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/pool.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/pool.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/profile.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/profile.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/vendor.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/vendor.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/version.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/version.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
template {
source = "{{consul_template_ch_dictionaries_dir}}/interfacedescription.xml.ctmpl"
destination = "{{ ch_dictionaries_path }}/interfacedescription.xml"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
\ No newline at end of file
# {{ ansible_managed }}
template {
source = "{{ consul_template_templates_dir }}/noc_activator.ctmpl"
destination = "{{ telegraf_confd_path }}/noc-activator.conf"
command = "{{ telegraf_reload_command }}"
command_timeout = "60s"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
\ No newline at end of file
# {{ ansible_managed }}
template {
source = "{{ consul_template_templates_dir }}/noc_bi.ctmpl"
destination = "{{ telegraf_confd_path }}/noc-bi.conf"
command = "{{ telegraf_reload_command }}"
command_timeout = "60s"
perms = 0644
backup = false
wait {
min = "2s"
max = "60s"
}
}
\ No newline at end of file
# {{ ansible_managed }}
template {
source = "{{ consul_template_templates_dir }}/noc_card.ctmpl"
destination = "{{ telegraf_confd_path }}/noc-card.conf"
command = "{{ telegraf_reload_command }}"
command_timeout = "60s"
perms = 0644
backup = false