Skip to content

Commit

Permalink
Merge pull request #585 from Capgemini/coreos
Browse files Browse the repository at this point in the history
Initial commit of work in progress on CoreOS
  • Loading branch information
tayzlor committed Jan 4, 2016
2 parents d14ce40 + 2d16850 commit 3d17c24
Show file tree
Hide file tree
Showing 62 changed files with 644 additions and 853 deletions.
89 changes: 52 additions & 37 deletions Vagrantfile
Original file line number Diff line number Diff line change
@@ -1,44 +1,66 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
require 'fileutils'

base_dir = File.expand_path(File.dirname(__FILE__))
conf = YAML.load_file(File.join(base_dir, "vagrant.yml"))
groups = YAML.load_file(File.join(base_dir, "ansible-groups.yml"))

require File.join(base_dir, "vagrant_helper")
CONFIG_HELPER = File.join(base_dir, "vagrant_helper.rb")
CLOUD_CONFIG_PATH = File.join(base_dir, "user-data")

if File.exist?(CONFIG_HELPER)
require CONFIG_HELPER
end

# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.require_version ">= 1.7.0"
Vagrant.require_version ">= 1.8.0"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# if you want to use vagrant-cachier,
# please install vagrant-cachier plugin.
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.enable :apt
config.cache.scope = :box

config.vm.box = "coreos-%s" % conf['coreos_update_channel']
#config.vm.box_version = ">= 877.1.0"
config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % conf['coreos_update_channel']

if Vagrant.has_plugin?('vagrant-vbguest') then
config.vbguest.auto_update = false
end

# throw error if vagrant-hostmanager not installed
unless Vagrant.has_plugin?("vagrant-hostmanager")
raise "vagrant-hostmanager plugin not installed"
config.hostmanager.enabled = false

config.vm.provider :virtualbox do |vb|
# On VirtualBox, we don't have guest additions or a functional vboxsf
# in CoreOS, so tell Vagrant that so it can be smarter.
vb.check_guest_additions = false
vb.functional_vboxsf = false
end

config.vm.box = "capgemini/apollo"
config.hostmanager.enabled = true
config.hostmanager.manage_host = true
config.hostmanager.include_offline = true
config.ssh.insert_key = false

# Common ansible groups.
ansible_groups = groups['ansible_groups']
# We need to use a custom python interpreter for CoreOS because there is no
# python installed on the system.
ansible_groups["all:vars"] = {
"ansible_python_interpreter" => "\"PATH=/home/core/bin:$PATH python\""
}
ansible_groups["mesos_masters"] = []

masters_conf = conf['masters']
masters_n = masters_conf['ips'].count
master_infos = []

# Mesos slave nodes
slaves_conf = conf['slaves']
ansible_groups["mesos_slaves"] = []
slave_n = slaves_conf['ips'].count

# etcd discovery token
total_instances = slave_n + masters_n
etcd_discovery_token(total_instances)

# Mesos master nodes
(1..masters_n).each { |i|

Expand All @@ -61,7 +83,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
machine.vm.hostname = node[:hostname]
machine.vm.network :private_network, :ip => node[:ip]

vb.name = 'vagrant-mesos-' + node[:hostname]
vb.name = 'coreos-mesos-' + node[:hostname]
vb.customize ["modifyvm", :id, "--memory", node[:mem], "--cpus", node[:cpus] ]
end
end
Expand All @@ -83,19 +105,14 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
consul_join: consul_join,
consul_retry_join: consul_retry_join,
mesos_master_quorum: conf['mesos_master_quorum'],
consul_bootstrap_expect: conf['consul_bootstrap_expect']
consul_bootstrap_expect: conf['consul_bootstrap_expect'],
ansible_python_interpreter: 'PATH=/home/core/bin:$PATH python'
}
# Apollo environment variables
apollo_vars = get_apollo_variables(ENV)
# Add apollo variables to ansible ones
ansible_extra_vars.merge!(apollo_vars)

# Mesos slave nodes
slaves_conf = conf['slaves']
ansible_groups["mesos_slaves"] = []

slave_n = slaves_conf['ips'].count

(1..slave_n).each { |i|

ip = slaves_conf['ips'][i - 1]
Expand All @@ -109,39 +126,37 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Add the node to the correct ansible group.
ansible_groups["mesos_slaves"].push(node[:hostname])

# Bootstrap the machines for CoreOS first
if File.exist?(CLOUD_CONFIG_PATH)
config.vm.provision :file, :source => "#{CLOUD_CONFIG_PATH}", :destination => "/tmp/vagrantfile-user-data"
config.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true
end

config.vm.provision :hostmanager

config.vm.define node[:hostname] do |cfg|
cfg.vm.provider :virtualbox do |vb, machine|
machine.vm.hostname = node[:hostname]
machine.vm.network :private_network, :ip => node[:ip]

vb.name = 'vagrant-mesos-' + node[:hostname]
vb.name = 'coreos-mesos-' + node[:hostname]
vb.customize ["modifyvm", :id, "--memory", node[:mem], "--cpus", node[:cpus] ]

# We invoke ansible on the last slave with ansible.limit = 'all'
# this runs the provisioning across all masters and slaves in parallel.
if node[:hostname] == "slave#{slave_n}"

machine.vm.provision :ansible do |ansible|
ansible.playbook = "site.yml"
ansible.sudo = true
unless ENV['ANSIBLE_LOG'].nil? || ENV['ANSIBLE_LOG'].empty?
ansible.verbose = "#{ENV['ANSIBLE_LOG'].delete('-')}"
ansible.verbose = "#{ENV['ANSIBLE_LOG'].delete('-')}"
end
ansible.groups = ansible_groups
ansible.limit = 'all'
ansible.groups = ansible_groups
ansible.limit = 'all'
ansible.extra_vars = ansible_extra_vars
end
end
end
end
}

# If you want to use a custom `.dockercfg` file simply place it
# in this directory.
if File.exist?(".dockercfg")
config.vm.provision :shell, :priviledged => true, :inline => <<-SCRIPT
cp /vagrant/.dockercfg /root/.dockercfg
chmod 600 /root/.dockercfg
chown root /root/.dockercfg
SCRIPT
end
end
2 changes: 1 addition & 1 deletion ansible-groups.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ ansible_groups:
"vagrant:children":
- mesos_masters
- mesos_slaves
- load_balancers
- load_balancers
1 change: 1 addition & 0 deletions ansible.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ record_host_keys = no
jinja2_extensions = jinja2.ext.do
timeout = 15
gathering = smart
roles_path = roles

[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=30m
Expand Down
2 changes: 1 addition & 1 deletion group_vars/all
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,4 @@ datadog_config:
tags: "{{ mesos_cluster_name }}"
log_level: INFO


coreos_timezone: 'Europe/London'
14 changes: 14 additions & 0 deletions playbooks/coreos-bootstrap.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
- name: bootstrap coreos hosts
hosts: all
gather_facts: False
roles:
- coreos_bootstrap
- coreos_timezone

- name: Install docker-py
hosts: all
gather_facts: False
tasks:
- pip:
name: docker-py
version: 1.5.0
5 changes: 5 additions & 0 deletions requirements.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
- src: mkaag.coreos-timezone
name: coreos_timezone

- src: defunctzombie.coreos-bootstrap
name: coreos_bootstrap
55 changes: 15 additions & 40 deletions roles/cadvisor/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,35 +1,10 @@
# tasks for running cadvisor
- name: destroy old cadvisor container
when: cadvisor_rebuild_container|bool
docker:
name: cadvisor
image: "{{ cadvisor_image }}"
state: absent
tags:
- cadvisor

- name: run cadvisor container
docker:
name: cadvisor
image: "{{ cadvisor_image }}"
state: started
restart_policy: "{{ cadvisor_restart_policy }}"
net: "{{ cadvisor_net }}"
hostname: "{{ cadvisor_hostname }}"
volumes:
- "/var/lib/docker/:/var/lib/docker:ro"
- "/:/rootfs:ro"
- "/var/run:/var/run:rw"
- "/sys:/sys:ro"
tags:
- cadvisor

- name: upload cadvisor template service
template:
src: cadvisor.conf.j2
dest: /etc/init/cadvisor.conf
mode: 0755
- name: deploy cadvisor service
sudo: yes
sudo_user: root
template:
src: cadvisor.service.j2
dest: /etc/systemd/system/cadvisor.service
tags:
- cadvisor

Expand All @@ -53,13 +28,13 @@
tags:
- cadvisor

- name: Set cadvisor consul service definition
sudo: yes
template:
src: cadvisor-consul.j2
dest: "{{ cadvisor_consul_dir }}/cadvisor.json"
notify:
- restart consul
when: cadvisor_enabled
tags:
- cadvisor
#- name: Set cadvisor consul service definition
# sudo: yes
# template:
# src: cadvisor-consul.j2
# dest: "{{ cadvisor_consul_dir }}/cadvisor.json"
# notify:
# - restart consul
# when: cadvisor_enabled
# tags:
# - cadvisor
27 changes: 27 additions & 0 deletions roles/cadvisor/templates/cadvisor.service.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
[Unit]
Description=cadvisor
After=docker.service
Requires=docker.service

[Service]
Restart=on-failure
RestartSec=20
TimeoutStartSec=0
EnvironmentFile=-/etc/environment
ExecStartPre=-/usr/bin/docker kill cadvisor
ExecStartPre=-/usr/bin/docker rm cadvisor
ExecStartPre=/usr/bin/docker pull {{ cadvisor_image }}
ExecStart=/usr/bin/docker run --name cadvisor \
--restart={{ cadvisor_restart_policy }} \
--net={{ cadvisor_net }} \
--hostname={{ cadvisor_hostname }} \
-v /var/lib/docker/:/var/lib/docker:ro \
-v /:/rootfs:ro \
-v /var/run:/var/run:rw \
-v /sys:/sys:ro \
{{ cadvisor_image }}

ExecStop=/usr/bin/docker stop cadvisor

[Install]
WantedBy=multi-user.target
14 changes: 12 additions & 2 deletions roles/consul/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,20 @@
# defaults file for consul
consul_dc: dc1
consul_servers_group: consul_servers
consul_bootstrap_expect: "{{ groups[consul_servers_group] | length }}"
consul_advertise: "{{ ansible_ssh_host }}"
consul_config_dir: /etc/consul.d
consul_data_dir: /var/lib/consul
consul_atlas_join: false
consul_bind_addr: "{{ ansible_default_ipv4.address }}"
consul_retry_join: "{% for host in groups[consul_servers_group] %}\"{{ hostvars[host].ansible_default_ipv4.address }}\"{% if not loop.last %}, {% endif %}{% endfor %}"
consul_bootstrap_expect: "{{ groups[consul_servers_group] | length }}"
consul_client_addr: "0.0.0.0"
consul_atlas_join: false
consul_node_name: "{{ ansible_hostname }}"
consul_version: 0.6
consul_image: "
{%- if inventory_hostname in groups[consul_servers_group] -%}
gliderlabs/consul-server:{{ consul_version }}
{%- else -%}
gliderlabs/consul-agent:{{ consul_version }}
{%- endif -%}
"
5 changes: 5 additions & 0 deletions roles/consul/handlers/main.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
---
# handlers file for consul
# @todo - handle restarts properly (choose one approach here)
- name: restart consul
service:
name: consul
Expand All @@ -12,3 +13,7 @@
wait_for:
host: "{{ consul_bind_addr }}"
port: 8500

- name: restart consul systemd
sudo: yes
command: systemctl restart consul
4 changes: 2 additions & 2 deletions roles/consul/meta/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ galaxy_info:
# - 9.1
# - 9.1
# - 9.2
- name: Ubuntu
- name: CoreOS
versions:
# - all
# - lucid
Expand All @@ -80,7 +80,7 @@ galaxy_info:
# - quantal
# - raring
# - saucy
- trusty
# - trusty
#- name: SLES
# versions:
# - all
Expand Down
22 changes: 22 additions & 0 deletions roles/consul/tasks/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
- name: create consul dirs
sudo: yes
file:
path: "{{ item }}"
state: directory
mode: 0755
with_items:
- "{{ consul_data_dir }}"
- "{{ consul_config_dir }}"

- name: configure consul
sudo: yes
template:
src: consul.json.j2
dest: /etc/consul.d/consul.json
owner: root
group: root
mode: 0644
notify:
- restart consul
tags:
- consul
Loading

0 comments on commit 3d17c24

Please sign in to comment.