diff --git a/CHANGELOG b/CHANGELOG index 03d97cab..20d7535d 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -4,6 +4,16 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.0.0-rc.3] - 2020-11-10 +### Change +* Added TLS Support for both GRPC and HTTP interfaces #76 +* Prometheus metrics are now prefixed with `gubernator_` +* Switched prometheus Histograms to Summary's +* Changed gubernator.Config.GRPCServer to GRPCServers to support registering +with GRPC instances on multiple ports. +* Gubernator now opens a second GRPC instance on a random localhost port when +TLS is enabled for use by the HTTP API Gateway. + ## [1.0.0-rc.2] - 2020-11-05 ### Change * Add Service Account to k8s deployment yaml diff --git a/Dockerfile b/Dockerfile index d430f64b..6b7f8795 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build image -FROM golang:1.15.1 as build +FROM golang:1.15.4 as build WORKDIR /go/src diff --git a/Makefile b/Makefile index fadf6133..52187f2f 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: release docker proto +.PHONY: release docker proto certs .DEFAULT_GOAL := release VERSION=$(shell cat version) @@ -18,3 +18,18 @@ release: proto: scripts/proto.sh + +certs: + rm certs/*.key certs/*.srl certs/*.csr certs/*.pem + openssl genrsa -out certs/ca.key 4096 + openssl req -new -x509 -key certs/ca.key -sha256 -subj "/C=US/ST=TX/O=Mailgun Technologies, Inc." -days 3650 -out certs/ca.cert + openssl genrsa -out certs/gubernator.key 4096 + openssl req -new -key certs/gubernator.key -out certs/gubernator.csr -config certs/gubernator.conf + openssl x509 -req -in certs/gubernator.csr -CA certs/ca.cert -CAkey certs/ca.key -CAcreateserial -out certs/gubernator.pem -days 3650 -sha256 -extfile certs/gubernator.conf -extensions req_ext + # Client Auth + openssl req -new -x509 -days 3650 -keyout certs/client-auth-ca.key -out certs/client-auth-ca.pem -subj "/C=TX/ST=TX/O=Mailgun Technologies, Inc./CN=mailgun.com/emailAddress=admin@mailgun.com" -passout pass:test + openssl genrsa -out certs/client-auth.key 2048 + openssl req -sha1 -key certs/client-auth.key -new -out certs/client-auth.req -subj "/C=US/ST=TX/O=Mailgun Technologies, Inc./CN=client.com/emailAddress=admin@mailgun.com" + openssl x509 -req -days 3650 -in certs/client-auth.req -CA certs/client-auth-ca.pem -CAkey certs/client-auth-ca.key -passin pass:test -out certs/client-auth.pem + openssl x509 -extfile certs/client-auth.conf -extensions ssl_client -req -days 3650 -in certs/client-auth.req -CA certs/client-auth-ca.pem -CAkey certs/client-auth-ca.key -passin pass:test -out certs/client-auth.pem + diff --git a/README.md b/README.md index f50507d5..3980617f 100644 --- a/README.md +++ b/README.md @@ -252,15 +252,15 @@ don't have either, the docker-compose method is the simplest way to try gubernat ##### Docker with existing etcd cluster ```bash -$ docker run -p 8081:81 -p 8080:80 -e GUBER_ETCD_ENDPOINTS=etcd1:2379,etcd2:2379 \ +$ docker run -p 8081:81 -p 9080:80 -e GUBER_ETCD_ENDPOINTS=etcd1:2379,etcd2:2379 \ thrawn01/gubernator:latest -# Hit the API at localhost:8080 (GRPC is at 8081) -$ curl http://localhost:8080/v1/HealthCheck +# Hit the HTTP API at localhost:9080 +$ curl http://localhost:9080/v1/HealthCheck ``` ##### Docker compose -The docker compose file includes a local etcd server and 2 gubernator instances +The docker compose file uses member-list for peer discovery ```bash # Download the docker-compose file $ curl -O https://raw.githubusercontent.com/mailgun/gubernator/master/docker-compose.yaml @@ -271,8 +271,8 @@ $ vi docker-compose.yaml # Run the docker container $ docker-compose up -d -# Hit the API at localhost:8080 (GRPC is at 8081) -$ curl http://localhost:8080/v1/HealthCheck +# Hit the HTTP API at localhost:9080 (GRPC is at 9081) +$ curl http://localhost:9080/v1/HealthCheck ``` ##### Kubernetes @@ -287,16 +287,24 @@ $ vi k8s-deployment.yaml $ kubectl create -f k8s-deployment.yaml ``` +##### TLS +Gubernator supports TLS for both HTTP and GRPC connections. You can see an example with +self signed certs by running `docker-compose-tls.yaml` +```bash +# Run docker compose +$ docker-compose -f docker-compose-tls.yaml up -d + +# Hit the HTTP API at localhost:9080 (GRPC is at 9081) +$ curl --cacert certs/ca.pem --cert certs/gubernator.pem --key certs/gubernator.key https://localhost:9080/v1/HealthCheck +` + ### Configuration Gubernator is configured via environment variables with an optional `--config` flag which takes a file of key/values and places them into the local environment before startup. See the `example.conf` for all available config options and their descriptions. - ### Architecture See [architecture.md](/architecture.md) for a full description of the architecture and the inner workings of gubernator. - - diff --git a/benchmark_test.go b/benchmark_test.go index 4968275a..77b54132 100644 --- a/benchmark_test.go +++ b/benchmark_test.go @@ -31,7 +31,10 @@ func BenchmarkServer_GetPeerRateLimitNoBatching(b *testing.B) { b.Errorf("SetDefaults err: %s", err) } - client := guber.NewPeerClient(conf.Behaviors, cluster.GetRandomPeer()) + client := guber.NewPeerClient(guber.PeerConfig{ + Info: cluster.GetRandomPeer(), + Behavior: conf.Behaviors, + }) b.Run("GetPeerRateLimitNoBatching", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -51,7 +54,7 @@ func BenchmarkServer_GetPeerRateLimitNoBatching(b *testing.B) { } func BenchmarkServer_GetRateLimit(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) if err != nil { b.Errorf("NewV1Client err: %s", err) } @@ -77,7 +80,7 @@ func BenchmarkServer_GetRateLimit(b *testing.B) { } func BenchmarkServer_Ping(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) if err != nil { b.Errorf("NewV1Client err: %s", err) } @@ -105,7 +108,7 @@ func BenchmarkServer_Ping(b *testing.B) { }*/ func BenchmarkServer_ThunderingHeard(b *testing.B) { - client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) if err != nil { b.Errorf("NewV1Client err: %s", err) } diff --git a/cache.go b/cache.go index d22455ae..4840ca58 100644 --- a/cache.go +++ b/cache.go @@ -85,9 +85,9 @@ func NewLRUCache(maxSize int) *LRUCache { cache: make(map[interface{}]*list.Element), ll: list.New(), cacheSize: maxSize, - sizeMetric: prometheus.NewDesc("cache_size", + sizeMetric: prometheus.NewDesc("gubernator_cache_size", "Size of the LRU Cache which holds the rate limits.", nil, nil), - accessMetric: prometheus.NewDesc("cache_access_count", + accessMetric: prometheus.NewDesc("gubernator_cache_access_count", "Cache access counts.", []string{"type"}, nil), } } diff --git a/certs/DO_NOT_USE_THESE_IN_PRODUCTION b/certs/DO_NOT_USE_THESE_IN_PRODUCTION new file mode 100644 index 00000000..87823514 --- /dev/null +++ b/certs/DO_NOT_USE_THESE_IN_PRODUCTION @@ -0,0 +1 @@ +These are for testing only diff --git a/certs/ca.key b/certs/ca.key new file mode 100644 index 00000000..b329a0d2 --- /dev/null +++ b/certs/ca.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAvv4lHiIN5r+D5Ih6g0EQEo3ZB3P/GPdXZCJlybrgXssbn+OD +mcU42Iq25IzF+Dq65SRZvg+ZGI59uXmh4T0eyyBY1JjQAb+7PpXWU+GYcFIEOMDR +6IHOG2KLURYgmGL9AcgFP38ZOYRFczZMwpK7JfLDbFPjZkjhcGhnzBLPF4y3xA75 +E0cseJ6OA8reEKzhxVXoECqnLnlcZTbltlLmc4EI7tE2dAgvZoIx6K5tpR+6aOem +0gz/YwniJdj5mHnNL5bEMtlESzFEn8txp8iTgsKgfSI+2IRkVA+zg7hpq50+m4Dv +P6qmZ+e92ovMCiC89y5Oua11n5DJQeeo0wf/B8BTkphJ9TYNcvZJybB+YGemBT7G +AwyxM0kcO2Bl8SH6FDrglXgQpYCJGVdOnGBqfQir288Rvt3Q463v6gnProJLkCPM +eODKSdiPbn2tbUwo1hf4fUHiQ52bQHH15cfE5rG0lokkdqYj3WRPQ1YkYuDLY95K +MoAEx+tyaVhIRyTc/vUOCif4QOG/o1IMcyfj15Z4NBHLBNnUuR3uC4Yv1tq68ZmI +ZxZTjStCogd7sAJOefJKA/AhHdYSlwfW70tVSy2w9+y8QlPErT4C4386KVyW1jUh +JXb8vpRzipx7Z/3vQneFGEef2V16I6EWm4UEJGoX3QGIF7ZWd25mrdEFZfMCAwEA +AQKCAgBCSP/m0ljSwZrrwLYMQZNrbRFqdcaOCqGo3gtHlPTz0TfYKOTuhDUzagkJ +jxXSDcf5aMFApjcy/5kAuwcEnerHAoXx2ssuIDXDBcuptvzp1n0imUEAmbRHas8B +KFXNrWUzrhCsNdcyUAaucqT3TmdnRzatrPuZ7ydWlNWZTNnUyYCpqaymFSiJADY3 +eLvTO7zreOIeYj89cN8xPXlB6smSQrEv+SoV4RVaVUsu/wKMsVNHv0X1Vo939uEm +04PkeDlPD9st7pu2IPY5Iylh2snfMt7yLuVyzZfoWL6rs1/xMJSe8YOXLAmuvA33 +AFejGDOc8dReuqW86EoA9n5wJzFKDIHI4lo1gThULKaP4bUORr20KY+txxUMCf5+ +zkzuFfaPMB5AVvj6w6tbw2XRYlyC3Kq3WkmwYWel0vvnLUrDn6x0PdI39tdQqJmQ +Eh4HEWfm8fV6evPx+DH8wus1p6Ke8ITbJ6AIB200Ju5D4ZNgY00K1Jyh3uvnIwDe +VHSr9znaRreHQiOfU81c4hI7ktZV8g5uAOToGGINSz0GkhQ6/yQJpqQvhT3eQvVa +mVKVM+8CQsZKw4xtd1hc33GFoN7Tt5MkpNGJkdD4aR2zFXfLuangV0xBgl43q8j5 +pmi7rDwsvUifra47Dhz9Wdm4i2mUKiR7NNhZ6yljYqNZu4MkYQKCAQEA7bZi8GhE +tRiciou3T3I7/z/iZIIWuSi011Gfl9bR4UfzVLK7KDR2ZcHufwWWrkk03jL0nj0f +UalTOFNnthOXbyW1aq+TTHI15n2nK56tSKzHqDe13kTbxuq0jEXA4dFT3m+oj4LC +KupZYdXVA5100kYbqIRfaxvFAVTXt05IEcIbbYOo4/IrXXKw4OOGnzojc7l8zNKB +zmF7sfz+HBk5aQoujiEpi8mcHq8UyyektBSO2VN/Y7DvytfUrjYHMbs7345N93xk +Re2/AifTa+T/PYL1XPRLjhL2zoh9z5cc/XHan514Lxj9JGBTeshRB09l1a4KT9us +OIiriE3J6zj54wKCAQEAza+j67F5RQBv8Tt7lB/T6apYB9EmXGScZOjR3fgvappd +9lvy8nXu64b2dF25Gh5AcVzGrKM0jeYXEJOJcC1lu4wwFGBUSP+bM1YC7omwUacG +Xjwy3EE56m6I+7E9ndBLD3boDKmCPoAIzBh96aMDGKsCEkejBuOEp41UVyovvkyK +ig17JSyvKDPc2YF2JpA8TAZgkRohoKXQMp18CnGXEcjLTFkF8mLqAVv3lwJy+5xM +TM8+A4+xvmc/pTm06wiogbsdCCUb/7JqhjTazXvvxOfB/e4D9quPoc3cPbLTSq+d +jzQGt604lFuPlInk6UJw8cHEc9ehn44KbroyJT7gsQKCAQEAyCZSZhOVDlpbrDf5 +r7YCmGek6nWyRlLk+YsrckCZVTMsyfr3pOGPcxx4AJGnDrZrAlArMXVLgomsnXd0 +kpUqY5Z/iwWsY6iig6D2+b5QLynzrkrCIhUea/1A6f7tafXDxT2E0tkJPfM2MS9H +fRS9wTLwpNJYOSoXlYhnXVtXSUSDrZE2yj8kjjk8fw50Ums0YIMkdp0kWK4x5Wqc +VvJSKYQ+MMPxZFbr0dYfDvMhNdM9d/VbBIh9TvCtjcXGBvScdB4wvZoKH+sPcfQw +it80ngk/KPY1C7oh/0JjlD+rVCbiKpT/FcDXnCJTB8XUm/AZUXKKEjVna+5/Z3P/ ++MNvewKCAQEAwQqLSfWy3zPd3AX7obWNacxZ+lwtKKG0tnBcJ3t65Q4kCceaaDyP +E7YDMIuV4hFqYfq06+nwtQyxsPkHEKVKyY50wWr3L9vViYS8E6xeMwQTUfYltdnx +xTggkDh0n9yR1d2/Q8MDXi1EFGkYI2K+0TQOKaHaO+jk42wdMAGD9ZJYo+CrJuSd +L5odOHXssZzFOoTxtL1VujRlBlwPwq2BH0vYobsbfbWf8c6ivLOrvsGeSqhmh2kh +ZJX6gdN7HOtvWvKF+NL7SCvnFjYc9KXRDniE9RGh3qx9jVprzew7qejQc0pc055b +b8HPK5WPpeyZnAxDmIVURy9EU0+lKJeuwQKCAQBWtlQNdCfpOCSBDCZQ7y2Igbv9 +hYCK4+1KaqnGVCvd1XrVLzykCIlyCbMDum3Aiv4jMPFg+jtbEE8q5L6OD6bRIbdU +l7jQdGiCPcfp/aezb71scMC/YdYwDAuYkfDKylxfdvalqepvQ952HzbFv6qSW0nU +NpVwrgE3ZkFYzu2fV9u4mubkxb4FOYAlVyOnX+VVjUwBFD6MpUG3sTPjfj0Tgd1m +BKueLwgfbQJa6i+TnCq+PEAFXlYfkC/gyuXbCgynFao2tBpJzIXPJj+lNascXeNT +6PahNZ1mElSNYyEA4INVDjUGpGPVnkBRkmUIodfwzGrIheRR3khOl2MPf+8z +-----END RSA PRIVATE KEY----- diff --git a/certs/ca.pem b/certs/ca.pem new file mode 100644 index 00000000..b897751e --- /dev/null +++ b/certs/ca.pem @@ -0,0 +1,29 @@ +-----BEGIN CERTIFICATE----- +MIIE+jCCAuICCQD4067NpJ3JHjANBgkqhkiG9w0BAQsFADA/MQswCQYDVQQGEwJV +UzELMAkGA1UECAwCVFgxIzAhBgNVBAoMGk1haWxndW4gVGVjaG5vbG9naWVzLCBJ +bmMuMB4XDTIwMTAyNjIxMjkwMloXDTMwMTAyNDIxMjkwMlowPzELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAlRYMSMwIQYDVQQKDBpNYWlsZ3VuIFRlY2hub2xvZ2llcywg +SW5jLjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL7+JR4iDea/g+SI +eoNBEBKN2Qdz/xj3V2QiZcm64F7LG5/jg5nFONiKtuSMxfg6uuUkWb4PmRiOfbl5 +oeE9HssgWNSY0AG/uz6V1lPhmHBSBDjA0eiBzhtii1EWIJhi/QHIBT9/GTmERXM2 +TMKSuyXyw2xT42ZI4XBoZ8wSzxeMt8QO+RNHLHiejgPK3hCs4cVV6BAqpy55XGU2 +5bZS5nOBCO7RNnQIL2aCMeiubaUfumjnptIM/2MJ4iXY+Zh5zS+WxDLZREsxRJ/L +cafIk4LCoH0iPtiEZFQPs4O4aaudPpuA7z+qpmfnvdqLzAogvPcuTrmtdZ+QyUHn +qNMH/wfAU5KYSfU2DXL2ScmwfmBnpgU+xgMMsTNJHDtgZfEh+hQ64JV4EKWAiRlX +Tpxgan0Iq9vPEb7d0OOt7+oJz66CS5AjzHjgyknYj259rW1MKNYX+H1B4kOdm0Bx +9eXHxOaxtJaJJHamI91kT0NWJGLgy2PeSjKABMfrcmlYSEck3P71Dgon+EDhv6NS +DHMn49eWeDQRywTZ1Lkd7guGL9bauvGZiGcWU40rQqIHe7ACTnnySgPwIR3WEpcH +1u9LVUstsPfsvEJTxK0+AuN/OilcltY1ISV2/L6Uc4qce2f970J3hRhHn9ldeiOh +FpuFBCRqF90BiBe2VnduZq3RBWXzAgMBAAEwDQYJKoZIhvcNAQELBQADggIBAGU4 +V3YItAgFN8hp+ipVBmwz2Fi/ui+/RBuz11zhpYg3V1BZIcsHt1QaWhwfOydipeiW +jScQ2fu0nawJlpM275R63xeJcNlp1qR0cbKFP5u7V5EOLIcwpOACKZ9rJUS3IrQ1 +yxdaM/jlh4y3wckiCC4+vnXtWa4EX5/euDlBU9hEJxHhwojEbgd1W91tGjkzv/t8 +UzIuxjWLMBfcVaKSiFOg8fBZttDiI578/rTz560+wtxwxgriK0ZZU01W9do9x+Yl +tHToZvIB6vwfALWGhiVSNv5X5l40akRFRHuIOZqrRrP+3Avhq6QReYeaeI4C7eCw +aNaDIEj9+5b/N7CkHwgI5gaogQtx4brgDOF+bw1+1bvQ3LCG1f12AKX2E+YEpr/w +/lv96VFPnmktadnCGgzwiN3poEBz6seEtRWqFWD2yBySy5CSuhmo0MOGuYgyn2/2 +nYjB0oWyT7dlanqtv+N4xdV+0EqqQANfnHBd4AUOZiDcQPXpygn1JsGr29VxTxh4 +xN8rgcHEiDYRw78MHLxAXM5C8mhqLeQxGYHsILwAGeFFGmFMontiEnrKdSUqEgZ2 +W1yl0ZPehOLoen1aheAem5gvFV5AMB6iQqiG+CGUFeLxtHz1odpYHpR54NKKh0tj +6pitN2Yt2GIiW4REmWP91B9ngWhSXpGHlB48mbgO +-----END CERTIFICATE----- diff --git a/certs/ca.srl b/certs/ca.srl new file mode 100644 index 00000000..84be7777 --- /dev/null +++ b/certs/ca.srl @@ -0,0 +1 @@ +A12D63448A5A6809 diff --git a/certs/client-auth-ca.key b/certs/client-auth-ca.key new file mode 100644 index 00000000..1c077645 --- /dev/null +++ b/certs/client-auth-ca.key @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQIJCKFdsqSX+sCAggA +MB0GCWCGSAFlAwQBKgQQhfE6viFG/7MEMoDHe0N5agSCBNAv5UKv5KIAbN7bu+3Q +vQaSQqFtEjWHZ0op7MtwtYrZdRMU3RYGkaXEqBNPImD5Z7tP7qPIDDh9VRVgHk6K +tQFRmr/xqsUTmsz74VGfieyxl92+IEhUaIaBgIcW7TStoQpVV+mHBi1hfvGpecmU +VbEwM8ToYlqHxGvoK6AAANP3bOezGlv4DqNI87gVlp2ujctnKZ8dY007WrsiUCVI +GjT179kN6oiRxKKRehsrVgY9WusM5sM2lNTbwY+G7r/u3Hk8wkfIB0Iin3k8QwV7 +OpYaWkbxrRCDsAOvmc6xQhUjJjjtAn/EKcpstvfxpXyTSJHpK76GoHkBN2WOvgcf +LinOYkld+Zx/fOVDjuESqMCvPypPb3n2i45rZ8pVCc6jk2/JaZWE8ldFDmxvp04A +NX3sb0+THMOfejgSNBPeNUXcqjI2vbY3thdkzf+IvxOk2LIj1mHGratAcBW9MPqE +nUKkY4OApt8fKb27xL5g6YXf5HoMHckEcB9krmDfWte/qtXmHl6ZUv/EqpQsrIvz +yTpTJ4DxlnalWv9Wp65juZxAJbaDYArqTyttnDIvNbTQ5Bm3l1iGa2CJnEITUbF1 +huQ4lxJmyrZnS+5ki76McTCo6ND/3vqeyCbc/uIftbCxefoUxFA0O3ZGwK7EZUOH +d3nVT/+XH8ivmZ5MDPA9qiqAtUdvPh87QPzeIGgw91xiHcjOnEcJlnxPaFqLWx2F +Me0UwAXjmQgmjegL23+NQLPRh5mXwmgawAbyH0Cm0WhCumBeEmonXlmm37OXz0GX +zi3uLmxvY0jnk37+oqUDbKw0v5xI3tar2BDl/+/Q4PgC4mLWFFWrGT4Fho4yYSr6 +7eKLahPl1a1jk+7OvWivct8QkbXVXDZHz6/HVbIi+OwwVeyEexH0DCzIvgqXSHSJ +pkEQCHoyP+vYcpLeRbT37OLuPY9MQckW8a+EcGUDn5dJxsHKTUSdn05ILk8g71t2 +qag6BO0P6XJvPAJo4QyB2Om2PpoeEjbaT6rtjoT4tkYbD3/v59UNEYpnJR3knYTw +HEsieTXTGLV0D3iojiwU2G5dcndJUxmRMexCwPgMJw0spb35mUbuKKqYcSrZ1K0m +RSFeT9/RCVVzbul91IUARPVaUyE1FzyN1RALgL84HKVPwMsfOjaXkvAIvvNYT1Ag +sFRYRYpu/aEClZNGaRrbu1jmbew1wHeD70b1iy8GjRhFz70igT+H3i0bVHEM/6uq +VRMLR7LsfXlVgelyvWrI3p6yjKEGwgVNdWQM7FbbPGZOUcwFDlgTqzr551szQm4Z +eRUN5KMcxMlmkIL1enMsVMG0MVi6o1ecF7qoJY+GSgoVEVYyYMFj+FBq0+jZfQPK +YQsFB4hyQgUQiZE8fkOXjXVp+cdZtLcehHSfHtiJruPcWPDhijs1A9VD2vIDRgfv +VU75JTWc/S/FwwfmhYE1R59XZd5avhUETq7uY16fhYROYVKSPUStPHDnjOL4vJag +KBtObke1WbprPMINj9qxTSRddVRp15BlDSry492k/fMdcaxnpPQ5LTm4dkx4hkKc +G3iPHtTMc5rtzdCzuaDZKYuCqK5xAHXLLUXJyN/44jWaS470c7FCzwGlfGmYbgYo +PeD0sov0pRLf1E9XTqzqfu8sYg== +-----END ENCRYPTED PRIVATE KEY----- diff --git a/certs/client-auth-ca.pem b/certs/client-auth-ca.pem new file mode 100644 index 00000000..db3e9de6 --- /dev/null +++ b/certs/client-auth-ca.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDajCCAlICCQD4bLevNK0t4TANBgkqhkiG9w0BAQsFADB3MQswCQYDVQQGEwJU +WDELMAkGA1UECAwCVFgxIzAhBgNVBAoMGk1haWxndW4gVGVjaG5vbG9naWVzLCBJ +bmMuMRQwEgYDVQQDDAttYWlsZ3VuLmNvbTEgMB4GCSqGSIb3DQEJARYRYWRtaW5A +bWFpbGd1bi5jb20wHhcNMjAxMDI5MTY1OTA1WhcNMzAxMDI3MTY1OTA1WjB3MQsw +CQYDVQQGEwJUWDELMAkGA1UECAwCVFgxIzAhBgNVBAoMGk1haWxndW4gVGVjaG5v +bG9naWVzLCBJbmMuMRQwEgYDVQQDDAttYWlsZ3VuLmNvbTEgMB4GCSqGSIb3DQEJ +ARYRYWRtaW5AbWFpbGd1bi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQDFC/R6UkS7kZhOHevochs6pLrCcfsF/fPXc9HESlLbBlrJRvM3snvAKaO0 +GDZwrwRjvcV4RQDnblDuWRTLMODFfQVT95Y0M5vRd/mwj6AIlMjP0+RkWXXyzeYm +V1hU/I3OlIWfzLQ3JistIHQ9Yai8x0GnXsM70nRQlgE8+582FKaUzyBARXDS2N45 ++Nck+7TyHKhxE/1Qpcj1r/Y3mUScDGEUZWhMAYI5mpoaZ9tVIGODsZeIj01PGQMg +xkWYrI+l/+fIUZF1kZOMOvBg/Zq4/zIkjBKYm+UlmJ4sev8AwIlxmYT4UsI9O05k +gBsLg0mNji14wYkFIFjqCHJ7LAVbAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEdf +AGOHroICT4b0bPDK2XZTtl6wbCvrPeuwCCJho6ODi19we2FOIbPwoGeJAE40L0S/ +fJP+GBg22uUtF4nvEXrRhIKhQeQx7a6dg0ovVfmhTbVJEhCxfaY7mp9sCqEPV1Up +NqHYFp9dgBho7kakTODSfqw2WCyl1E3zOAa9ALOF/ynSor9IZeaD/ZrHe3kyYqwY +Hanzq/MXIztBNBSZE1xf18DVPDDw9vfSS9KY3RKw8bHRczpd2CMNZ/Ma5cbgwehv +gRdbBWum36a9SDUiK2LnjgJ8a2GWg+FuE2Mdzo2xoIr+Ennj9F6E5ZfZs5sueiSO +H8l75DteSnn4+9lbPCE= +-----END CERTIFICATE----- diff --git a/certs/client-auth-ca.srl b/certs/client-auth-ca.srl new file mode 100644 index 00000000..9e22bcb8 --- /dev/null +++ b/certs/client-auth-ca.srl @@ -0,0 +1 @@ +02 diff --git a/certs/client-auth.conf b/certs/client-auth.conf new file mode 100644 index 00000000..30ece2e1 --- /dev/null +++ b/certs/client-auth.conf @@ -0,0 +1,2 @@ +[ ssl_client ] +extendedKeyUsage = clientAuth diff --git a/certs/client-auth.key b/certs/client-auth.key new file mode 100644 index 00000000..cc77e64a --- /dev/null +++ b/certs/client-auth.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAuFF5DAhUZG0SGRkn4Mp2JmQRD670Fxoo41lHfG8LlxymuXAi +LKux/vblNd6DA4SF+tZ4nje00wSO0rlAM0OF4zoiz3bBSTw2XVsXkaW0GsWuCyEj +1UvRmWoKScANKchOuHmiLgQkoOkGs47iGv6Lsl+ffBKe3TRLqu4iT7uXRYYtbDVd +CTd9dCRu29+XMGKJ1ns2L6IH/KCenK1kIzBDWGvA4XcHFrjdm0zFr936wPbN6aRI +/RTlZEhuuMRYU5r37I4Mabe/IfG0GTaihSwD3pasKbxlrivha/g50jmQDoueyWsN +qgcKdCiu8fqWOS2XdPxQBgqA+WyPqq+Mb24ZCwIDAQABAoIBAElo+aMXNjWBU3H3 +mPBo8sG7nSf38HO7EYnrJxOiTqVy3dyBkrrj71KzFvtrkha2k34iKYwqd7SL3i6D +ZdSFEjA8GV0z2SvH9YcdHrf9nUwEa5s3KcRsHCDUISDaguOfSesjVrqpH8WOygJ9 +6AQEWbNDbovGBsvnZjPAAJ5tAoTHi/R3N1koRlqTfGnsVouQLq5YsFQxqs5/V7dH +s/F2hIn5021IVrWs4Olt+94Oavs6JBIMPm0PdmixikkhUF9Y3J58yd3/tkCHJpx/ +GymgMj0sl7KmNF+Jbk0iXEJiG3abuYvqHqCjZs40vgMZW4IvOFDddGgw0ve76S2V +j1T7rhECgYEA8wH2VFM1wGDf1p3iwiIKBNAhW4J3IuTVD0CstaPPksekNdnHz3kd +vT5N1845XuEa3UlNcCFgeV8D/+lHxc0mrilb9+FZzl4MK3h0oLsb8sLueQeotK3f +pd/rQsw15mBkXpFQlefUrzgstXpNdEfAZaKd70Lqypq7AOWPJkWM5T8CgYEAwiw7 +CMknsvcLA5BEREuyCq28KOTQ/OeFMpeeksABegy0qMuV1cYsr6uG4+CPtVayk/ss +xHwUvRHXXKPdsV+pn2PP3NZDfxyLVMS3LsKYeSx1kF9RJ+l3qZ92noL/7/mGAtij +g9WOiM38XVm10PSs8IqAEI75ZGlWCwnK7Q2VnTUCgYACuLzniN8LPoqDPtVxUyxF +jYcyHS30aBeyygilKCaFAFNofv3r5vFcUzxP9HFUDLVeURna7aTE9zl2PkidgIS0 +YqYzCoUU+JyuR/UWb8IKYACHvnw3OdNNakqaPutDn0TAgmQiqawKIljt12bSrJMN +EFsweNFkX4NEqU2HIjRHxQKBgGo9bSeHeFMxXDNc8h00FXxGRtdRKw/VVUmzL643 +pBc1cHuSuK64uaZ8gVeZfMfJYfgZzArNoUM5yc4EUr5ECzkMkaTRDykzYwDEiT3q +dyaFruWJYYwm77Q9bdeY8ZRJwIs6IW12oYA0xEoHVbW4yg7qmNt2fvnzsIJln0RI +1H2pAoGBAIKTBFfvLKGg/YnLnJ30vVYaMW6vAHkIAueFWW1j9do1qkZG2dN9ucLF +JG6KOCprReJpAHut/VLFnxdsCSmy1simhe2oWZYQTi0UiWf/E5aSJnHhXu6eTizk +ERRq8Ewf2HL5fs4dk7qQLJNdQCR74FyEJNuoWmWlsLBdM4SSXCxo +-----END RSA PRIVATE KEY----- diff --git a/certs/client-auth.pem b/certs/client-auth.pem new file mode 100644 index 00000000..398e6b94 --- /dev/null +++ b/certs/client-auth.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIBAjANBgkqhkiG9w0BAQUFADB3MQswCQYDVQQGEwJUWDEL +MAkGA1UECAwCVFgxIzAhBgNVBAoMGk1haWxndW4gVGVjaG5vbG9naWVzLCBJbmMu +MRQwEgYDVQQDDAttYWlsZ3VuLmNvbTEgMB4GCSqGSIb3DQEJARYRYWRtaW5AbWFp +bGd1bi5jb20wHhcNMjAxMDI5MTY1OTA1WhcNMzAxMDI3MTY1OTA1WjB2MQswCQYD +VQQGEwJVUzELMAkGA1UECAwCVFgxIzAhBgNVBAoMGk1haWxndW4gVGVjaG5vbG9n +aWVzLCBJbmMuMRMwEQYDVQQDDApjbGllbnQuY29tMSAwHgYJKoZIhvcNAQkBFhFh +ZG1pbkBtYWlsZ3VuLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALhReQwIVGRtEhkZJ+DKdiZkEQ+u9BcaKONZR3xvC5ccprlwIiyrsf725TXegwOE +hfrWeJ43tNMEjtK5QDNDheM6Is92wUk8Nl1bF5GltBrFrgshI9VL0ZlqCknADSnI +Trh5oi4EJKDpBrOO4hr+i7Jfn3wSnt00S6ruIk+7l0WGLWw1XQk3fXQkbtvflzBi +idZ7Ni+iB/ygnpytZCMwQ1hrwOF3Bxa43ZtMxa/d+sD2zemkSP0U5WRIbrjEWFOa +9+yODGm3vyHxtBk2ooUsA96WrCm8Za4r4Wv4OdI5kA6LnslrDaoHCnQorvH6ljkt +l3T8UAYKgPlsj6qvjG9uGQsCAwEAAaMXMBUwEwYDVR0lBAwwCgYIKwYBBQUHAwIw +DQYJKoZIhvcNAQEFBQADggEBAEj2iWqIEP14MGqh9jaTIeg9OVEd1b+Df6jKTtSJ +BdfhTozT8hM5bUQxKzsE58twuTmH9M6VQViEBhBQ8zk7pWlIZe/qHM5qbDFytULR +JNLhLukCR+kabw2lHL+MRiljzcwQMrOV+uPgI2XFgRe+ow18nfeEIK2tzclx9y1H +TYtqCc3ndjK8ZHh5pKRd4GBMkXgN+QeETj3Pr8+jTFLlMynpwKJMwi/uAAkagfFO +PPVBvIMwnYhV9bPF/AsOs4B+DYkK+eY/RM6POuzGeIs9g3SCVYc7lrYKBMcVfuZI +LZAV5B5E5XKePXe3cVgfgEto7OSL1hjiMWZev98baEA6LU8= +-----END CERTIFICATE----- diff --git a/certs/client-auth.req b/certs/client-auth.req new file mode 100644 index 00000000..531ce6fa --- /dev/null +++ b/certs/client-auth.req @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICuzCCAaMCAQAwdjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlRYMSMwIQYDVQQK +DBpNYWlsZ3VuIFRlY2hub2xvZ2llcywgSW5jLjETMBEGA1UEAwwKY2xpZW50LmNv +bTEgMB4GCSqGSIb3DQEJARYRYWRtaW5AbWFpbGd1bi5jb20wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC4UXkMCFRkbRIZGSfgynYmZBEPrvQXGijjWUd8 +bwuXHKa5cCIsq7H+9uU13oMDhIX61nieN7TTBI7SuUAzQ4XjOiLPdsFJPDZdWxeR +pbQaxa4LISPVS9GZagpJwA0pyE64eaIuBCSg6QazjuIa/ouyX598Ep7dNEuq7iJP +u5dFhi1sNV0JN310JG7b35cwYonWezYvogf8oJ6crWQjMENYa8DhdwcWuN2bTMWv +3frA9s3ppEj9FOVkSG64xFhTmvfsjgxpt78h8bQZNqKFLAPelqwpvGWuK+Fr+DnS +OZAOi57Jaw2qBwp0KK7x+pY5LZd0/FAGCoD5bI+qr4xvbhkLAgMBAAGgADANBgkq +hkiG9w0BAQUFAAOCAQEAhrHrhsYmeCpMUSqrgYcEo3swwb4EvdcmZESzTsSnePt8 +21FGoY7ccXr5itUbH2VeFz02u171L+14wfQ3zjsgZt7orMC6qsCt84nYnXJ8mjMP +aQ5GprB4aXSgsdMZk1YtSrTa094SlcSuLRLnH/xtIa2RiLr06zQZZWVljwDvL8nZ +D/gFrC8nmtcW1J/0RXfDxdzpdAgwhiMg9Pwqqntz4IO0eTFma/Vq3MdWff+Hee0W ++j0eaqvkX+dBlYPOKg3lpN8ZFbw3pUWC9yKT4hRjaa8JczyCxZsG4MsMv88xa7rm +pjNlSwggbr7ZI4/XST5fdKlSylg1Py72HLuz7L7h6g== +-----END CERTIFICATE REQUEST----- diff --git a/certs/gubernator.conf b/certs/gubernator.conf new file mode 100644 index 00000000..b1fcdf08 --- /dev/null +++ b/certs/gubernator.conf @@ -0,0 +1,17 @@ +[req] +default_bits = 4096 +prompt = no +default_md = sha256 +req_extensions = req_ext +distinguished_name = dn +[dn] +C = US +ST = TX +O = Mailgun Technologies, Inc. +CN = localhost +[req_ext] +subjectAltName = @alt_names +[alt_names] +DNS.1 = localhost +IP.1 = ::1 +IP.2 = 127.0.0.1 diff --git a/certs/gubernator.csr b/certs/gubernator.csr new file mode 100644 index 00000000..3c49c7a1 --- /dev/null +++ b/certs/gubernator.csr @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIE1zCCAr8CAQAwUzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlRYMSMwIQYDVQQK +DBpNYWlsZ3VuIFRlY2hub2xvZ2llcywgSW5jLjESMBAGA1UEAwwJbG9jYWxob3N0 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA3KZ3GOVs2d/ZXHGTgFw5 +a5+s14f1aszdtFoV45g4QQ8mEvZAsfa77Rn5UhtIcVu3e1Uk4VVOgQR5wF3RDnmH +mllxYctnCTlS2a9NU7+d6Jo0N75eQezWvePIskqUG6hIzYijJheUESJ5Jm8LNsf8 +x/VONxCDQB+fUNwAPQsr/nCsFFhGCxfaLXV9pB1ysrPX1eH0hFMn0mL2XS40xX8M +9pPfAS15Xn1Pnq+jpwmHwARLorHVNNJv8/o+sz/TmnWl4SXOsWoRz8OWDWHMbrjH +UaJnCfnsPHSdaMoH9upsN+UQYeYfj+y89gGZdO/kwLxQAEdNaDku9HC6o5dRo/tP ++rOVCs1mQCZKql88tT0RJr5O0xcN5Y0rgHtBavrOQztXinLBOH8Yhb5kH5FBcPCo +BaWOW2gXNDL+/dCXHVJlxZdobWfDoAL7d0IyEhlRKQV0skUQIdRvcrWcIXDVDwHn +NAJENknw11QN5BjvSIWJFU+nfQaiGrrKFtclP2bap7B8EwxIBkOIl2tGTXCz3NBB +vRMbBp2OZ6yNyzWeiusx2JCF8awPCTWRIvLICMcZoG3ZYB/ZCZokcs3fU4Fra0IR +4u3WBn7nygmG855RXS74sM1XNffBang38lwNqEX2v8qmrpwmRDENx1+cL4O2DE2S +E5aktxhPB/3jZm+ONaGez3ECAwEAAaA/MD0GCSqGSIb3DQEJDjEwMC4wLAYDVR0R +BCUwI4IJbG9jYWxob3N0hxAAAAAAAAAAAAAAAAAAAAABhwR/AAABMA0GCSqGSIb3 +DQEBCwUAA4ICAQBFObKZiTaNkXLPUQ+1zTextGPlbrWHBOOsYPOtqBmu1+CFz7pX +Q8qiO4pboXwPs/Y51QBvxK1z/U97S2dMS9Jyi6tGMrl72fWsyiGeKHxFHqfCCZFr +vPRmlP5DXBgAyQASkU7ULrP1Pgvn2SDOhVLP6V31qvVybxL9I0vqKTz9ul/2rAmZ +6COjxoWNDywd9FUdARRpoyEJfp/dzwA1Ww4mcnrJrf7h2X9+ht6tr2hVieRrOhlm +foCKv1rSUKbua7hJ1JC6dE/L1rpH8315TpTkVhbCS/RxqGK+sLyS86u78Ka6q/7/ +BK3bsWU9QiaCrI6i9rXDgSd1SWCdNmPzGEkVwxQuuP5RxnMjwGfggVkxkq5Bb85b +gKlOZD2Hsv9/6Kgf9Zbe1MrEEpAiQoanV3nLuEC02F8UcRKzPdj1KaGTF3DdVfvY +ozQeeynL+dysk7RazicgsSYHVYqX0tuNbGSK2SMyPAKqmhvbFPNUsGqPP2/ARXt3 +TX6cHLj3+WHsFAvpQO7lsGtbFVYRtek21QaEDJeUQIAu9CvInM8DfoWkTRyqFOzE +9YGNvD4pBJHI//nekBc+IapPYsZ62XOZ/Agthv2t2aUeFYUFrnL//S8ExjMQqucK +q0cR2H0oCdEReDcmECn36UJ3whlX/Ja2UHV/UVe+ErIUSIToeLPGTFMpCg== +-----END CERTIFICATE REQUEST----- diff --git a/certs/gubernator.key b/certs/gubernator.key new file mode 100644 index 00000000..8954b57b --- /dev/null +++ b/certs/gubernator.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJJwIBAAKCAgEA3KZ3GOVs2d/ZXHGTgFw5a5+s14f1aszdtFoV45g4QQ8mEvZA +sfa77Rn5UhtIcVu3e1Uk4VVOgQR5wF3RDnmHmllxYctnCTlS2a9NU7+d6Jo0N75e +QezWvePIskqUG6hIzYijJheUESJ5Jm8LNsf8x/VONxCDQB+fUNwAPQsr/nCsFFhG +CxfaLXV9pB1ysrPX1eH0hFMn0mL2XS40xX8M9pPfAS15Xn1Pnq+jpwmHwARLorHV +NNJv8/o+sz/TmnWl4SXOsWoRz8OWDWHMbrjHUaJnCfnsPHSdaMoH9upsN+UQYeYf +j+y89gGZdO/kwLxQAEdNaDku9HC6o5dRo/tP+rOVCs1mQCZKql88tT0RJr5O0xcN +5Y0rgHtBavrOQztXinLBOH8Yhb5kH5FBcPCoBaWOW2gXNDL+/dCXHVJlxZdobWfD +oAL7d0IyEhlRKQV0skUQIdRvcrWcIXDVDwHnNAJENknw11QN5BjvSIWJFU+nfQai +GrrKFtclP2bap7B8EwxIBkOIl2tGTXCz3NBBvRMbBp2OZ6yNyzWeiusx2JCF8awP +CTWRIvLICMcZoG3ZYB/ZCZokcs3fU4Fra0IR4u3WBn7nygmG855RXS74sM1XNffB +ang38lwNqEX2v8qmrpwmRDENx1+cL4O2DE2SE5aktxhPB/3jZm+ONaGez3ECAwEA +AQKCAgAv5kdGdU+rAIg8JD/EBsFEVtZ7t30UYULjywajcMENho9aYHDs8UYck53n +MBsK2ME9Gd+2twEiyujvVK50ePdyD2aotzI382TD8uVTf+50tz0MuNvXbeW0NUW1 +RrFeS6r6S2Y3d7jh+1zXdhW2H7YFZoBDPedwPS0lrYyzs8AqLSV+CSezcssfQTk0 +Llxe5OG/AZ1GOJMVffxDgCtekFUtFi68H7YQSB3HshNRyKTaj1QoDnjdTa2WBkYk +PX/fMRkAhmeXZhJVyvzBDPEHPD+wvhOmO8YH3bzqKamO9Zbmj2h+CCnj4Z2nvfMl +SoqbOIYZeWEZdgh2ch5iMxA7C+xxdeUdjFwklzoGIfOq1ax+KXlIS4sSf4+vf50p +DpliDD0Mmvgr/2uBAbPxiLSapchMEPepgRq8WnOi2m7pCl4O0CXNGH8DRrcQ7Xzg +59Wa/9lPq5QOaR0Oar41fs8j06n+9HJJkaP3IPjk6eBjJobH0Mnvw7Ft1LIVo4/5 +72GfdIibewVXZJXg6xdbSL1OygREWlLdsrIyhMwmmgKRvV7PztZidEtoQxmHrcQD ++RC7ClNyHJSjwUxSHkh8JVcKvEn5i41rvWnMsFnsMV2ld1jjrcuEkdQlBzZs2LDZ +pQDns21lp1VE5PQftgQuuvU+ww884m18onpVLz8oaJY5N6t3wQKCAQEA8U5eijj3 +NnJzPh5XH5O8sMDqBQErlh2hy7/xF/dPqoBtHAORW4rEE0WihthwzCXpAHOX57HN +wqLu/QXXvNSPE9PlSL5/2dlk0qW4w3x8o+GcJ5O+aLtgnr1bZf05SHDDocFy0Je+ +mTuknpz4Ki27jXJHNtlGAnp1n2J6N4IT0QY4CSD2xxvzVtjZpdxePBDNiqz88vjT +QgN4HAQm6SSxPMRnIHPbTuHvh2EY4XEasFHrMKXKTN007trwD3Ut6IhXIRzGQ8Yx +V/Dm5lFimTgozOXb1ssRAoWLb0hK6LvEjhmWEWRJB6dhX7s8sicNtrnqIXaHx6oS +uuy+0j1lZY0fqQKCAQEA6hYZZ7ppkj85aj/phrZwqEa/nZYS02ekAHwVd1X6I/Bq +uwHVI4bW01q+9XZ0JIb5FYkhTuiMwz1tPU3gPL9Pafa4aV582jkzX2lA8M2x19fe +MSPk8Cqfj7lujjADBM545e5+PA0O7JeW9/Mt6wD6lA7NF+/QfTht9Zqd5dAK3TXC +w14Br5y/acSXHN2VuzN3VzZhzwGFiQctb46gim+F+0/wqrPEW4E/j2VAGD6gR8ky +5WXgUBoa+FLja9vJhEc0ktHX5679uh9MO3La86Nc0mkj6B/hz7+byf59TluFEHI6 +vEKwEaXME6NS4sLgSCzApiKhr2tLIuRhC/ww/XSuiQKCAQBv9UOJ8CpLGcj8bklP +/lTS0X8CuWtGqBL0nOa4judonVOCZulfRr/4jRt+YxfasFAuBHPbbTShdMVbogVZ +uGl18p90I+bf5ayQy5ExKiYOR1QTTMpQf3exzvuEE3wrbx5lg4LI407se67CZOQD +ddqKIxaFjUOdTMIcJC2+aVzWY4NGQQQel4pMpio9eURDrCwhhmeKOAaUn1Vv8kE3 +dO3C2wFaHkcHj256KoPHvZl7a1aq6JE3Hn87v1sYYO7e77s1Qst+kIs+WShYfjQ4 +EMVlY9SfEPKgb+Okyy7uUog8wgRNp3D22yMjrpTXt6T2PcYypnMTIYa1MbjLhO7x +sguZAoIBAHFF6ltAYE+tmnnonXdblx5Gxfhao5d7AmhdowGRMC7t6qe9lkbPu+qg +ceFTxZXbXnLmXaDSdTMQ6WiN46FhresHwWrzlxT+MvbLdupPjux0gAdlaCYzaezh +fTb+0qcjxmr1Qk6KvxsLq7kmCqU6XwcANoyLtmaxxLvhauDHEErE9g8V0VkKT7G9 +uKX2L7tbhzdDkIbcJKMPPxRJfzQeDUZ0A3CG9tCUtY4NH7vbqjGykZdh+e6pAz4A +1h3eSCBjWHRnaMOq8Qp8lKX7ySm2dnc2+3Ia42llPdy78iTVsoknzXZGCu8vPJaQ +JM6oIGGsjjiWb4j0MXKb8r4a9hXfvYkCggEACzVOr5XpLpr2YUSVDOrqsvGDz3rM +3iMpJI5xuyZ2TtVjdW5YexkwEihgGojxOYbTYT7VQHUu6i39I5W7+w/J073bp+vi +GQ6uHq9cafWY5s4jSYr4NPyWve7e/H0vpe3k03CeaVTy2JxiUQkdpT6FfvhOO0PK +0ZvgX7xB+OwjQUc8eyL3kyNHK1FWfpP8cC63qxLOAze5Q5WDcRZ5mzTHhOIpi1Dd +0efhw0C1aWvyPMOQKgGl9jFkaOsGGoH3dfaU35TJzYqC2KXMLe8acirOTWxIL4Em +Sre9sCEmz4t5MbV6AuJQ4mgQSTz4f7/0P+UH6MJKtUPXQFZtmX+R1FNiTQ== +-----END RSA PRIVATE KEY----- diff --git a/certs/gubernator.pem b/certs/gubernator.pem new file mode 100644 index 00000000..e8f1bff0 --- /dev/null +++ b/certs/gubernator.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFRTCCAy2gAwIBAgIJAKEtY0SKWmgJMA0GCSqGSIb3DQEBCwUAMD8xCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJUWDEjMCEGA1UECgwaTWFpbGd1biBUZWNobm9sb2dp +ZXMsIEluYy4wHhcNMjAxMDI2MjEyOTAyWhcNMzAxMDI0MjEyOTAyWjBTMQswCQYD +VQQGEwJVUzELMAkGA1UECAwCVFgxIzAhBgNVBAoMGk1haWxndW4gVGVjaG5vbG9n +aWVzLCBJbmMuMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDcpncY5WzZ39lccZOAXDlrn6zXh/VqzN20WhXjmDhBDyYS +9kCx9rvtGflSG0hxW7d7VSThVU6BBHnAXdEOeYeaWXFhy2cJOVLZr01Tv53omjQ3 +vl5B7Na948iySpQbqEjNiKMmF5QRInkmbws2x/zH9U43EINAH59Q3AA9Cyv+cKwU +WEYLF9otdX2kHXKys9fV4fSEUyfSYvZdLjTFfwz2k98BLXlefU+er6OnCYfABEui +sdU00m/z+j6zP9OadaXhJc6xahHPw5YNYcxuuMdRomcJ+ew8dJ1oygf26mw35RBh +5h+P7Lz2AZl07+TAvFAAR01oOS70cLqjl1Gj+0/6s5UKzWZAJkqqXzy1PREmvk7T +Fw3ljSuAe0Fq+s5DO1eKcsE4fxiFvmQfkUFw8KgFpY5baBc0Mv790JcdUmXFl2ht +Z8OgAvt3QjISGVEpBXSyRRAh1G9ytZwhcNUPAec0AkQ2SfDXVA3kGO9IhYkVT6d9 +BqIausoW1yU/ZtqnsHwTDEgGQ4iXa0ZNcLPc0EG9ExsGnY5nrI3LNZ6K6zHYkIXx +rA8JNZEi8sgIxxmgbdlgH9kJmiRyzd9TgWtrQhHi7dYGfufKCYbznlFdLviwzVc1 +98FqeDfyXA2oRfa/yqaunCZEMQ3HX5wvg7YMTZITlqS3GE8H/eNmb441oZ7PcQID +AQABozAwLjAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAAAAAAAAAAAAAAAAAAAAGH +BH8AAAEwDQYJKoZIhvcNAQELBQADggIBAJDkvzCrTdvLY93hIRthFRYVyee1jlRy +lYbRJ8Z7cBOWGbhceyL7lr1s/K1ntzSrWjYdbEpNUxnHbJpelxGz8tr9TS7TsHvz +J/3O9G5WzLZ2lvm4m0i5zxuJP2vDiWIpIy+H4wOXFkeetB6dBLIIV+e/NLmM+0x3 +D+1fjO5LRBo/Ipu/TFwNCRjasUv3KuDvsW6wgTRV0s3D8YnKoDKuUdydYitFfFCC +qQ4emBaymzpcA04I90Nl05xdiEDyKFVMBlw7cDGHbPqXymJyLFqmlZBo+KHyUGKZ +vD93LpTajxh5Cj2SWeTUsAYeEzgevKmjIAOLuQAzjtanj0Xx869qGYMHgx2qTX57 +KjAiT54NCIxMkPclONq1pHHqycNf1X95BqK5loeid469xs9hI7OK4ZgmGAxMOUyE +M/DqpfGetpTyILKGeRg4f9B4mRL/u2PE0O+STrp0x2ZAKvuVKkaY51ne5Y94ZvI0 +qYG4nYG0yJKKUZFlZM7CqcDeU+/npd2X+zuhQEicZVZTBBvvJiXqhzsG5oCGoSdS +l5imlrFH+ouYWAoAPlGtYE1RPxTPWExqY6VlkWlU4z8gcQF6g0cEO174K90FcRls +Cqo24pvfR3ihzVkudEBpksveuuvrrrna0MhpONTx/mzQuqQ17iofxiuhhydhAPP7 +bv+JfhIWsjyC +-----END CERTIFICATE----- diff --git a/cli-tls.conf b/cli-tls.conf new file mode 100644 index 00000000..2394c482 --- /dev/null +++ b/cli-tls.conf @@ -0,0 +1,5 @@ +GUBER_DEBUG=true +GUBER_GRPC_ADDRESS=localhost:9081 +GUBER_TLS_CA=certs/ca.pem +GUBER_TLS_KEY=certs/gubernator.key +GUBER_TLS_CERT=certs/gubernator.pem diff --git a/client.go b/client.go index e325e369..96bbce47 100644 --- a/client.go +++ b/client.go @@ -17,11 +17,14 @@ limitations under the License. package gubernator import ( + "crypto/tls" "math/rand" + "time" "github.com/mailgun/holster/v3/clock" "github.com/pkg/errors" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) const ( @@ -34,35 +37,47 @@ func (m *RateLimitReq) HashKey() string { return m.Name + "_" + m.UniqueKey } -// Create a new connection to the server -func DialV1Server(server string) (V1Client, error) { +// DialV1Server is a convenience function for dialing gubernator instances +func DialV1Server(server string, tls *tls.Config) (V1Client, error) { if len(server) == 0 { return nil, errors.New("server is empty; must provide a server") } - conn, err := grpc.Dial(server, grpc.WithInsecure()) + opts := []grpc.DialOption{grpc.WithInsecure()} + if tls != nil { + opts = []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tls))} + } + + conn, err := grpc.Dial(server, opts...) if err != nil { - return nil, errors.Wrapf(err, "failed to dial peer %s", server) + return nil, errors.Wrapf(err, "failed to dial server %s", server) } return NewV1Client(conn), nil } -// Convert a clock.Duration to a unix millisecond timestamp -func ToTimeStamp(duration clock.Duration) int64 { - return int64(duration / clock.Millisecond) +// ToTimeStamp is a convenience function to convert a time.Duration +// to a unix millisecond timestamp. Useful when working with gubernator +// request and response duration and reset_time fields. +func ToTimeStamp(duration time.Duration) int64 { + return int64(duration / time.Millisecond) } -// Convert a unix millisecond timestamp to a time.Duration -func FromTimeStamp(ts int64) clock.Duration { +// FromTimeStamp is a convenience function to convert a unix millisecond +// timestamp to a time.Duration. Useful when working with gubernator +// request and response duration and reset_time fields. +func FromTimeStamp(ts int64) time.Duration { return clock.Now().Sub(FromUnixMilliseconds(ts)) } -func FromUnixMilliseconds(ts int64) clock.Time { +// FromUnixMilliseconds is a convenience function to convert a unix +// millisecond timestamp to a time.Time. Useful when working with gubernator +// request and response duration and reset_time fields. +func FromUnixMilliseconds(ts int64) time.Time { return clock.Unix(0, ts*int64(clock.Millisecond)) } -// Given a list of peers, return a random peer +// RandomPeer returns a random peer from the list of peers provided func RandomPeer(peers []PeerInfo) PeerInfo { rand.Shuffle(len(peers), func(i, j int) { peers[i], peers[j] = peers[j], peers[i] @@ -70,7 +85,7 @@ func RandomPeer(peers []PeerInfo) PeerInfo { return peers[0] } -// Return a random alpha string of 'n' length +// RandomString returns a random alpha string of 'n' length func RandomString(n int) string { const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" var bytes = make([]byte, n) diff --git a/cluster/cluster.go b/cluster/cluster.go index 1fedc168..dfa39985 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -96,7 +96,7 @@ func StartWith(localPeers []gubernator.PeerInfo) error { // Add the peers and daemons to the package level variables peers = append(peers, gubernator.PeerInfo{ - GRPCAddress: d.GRPCListener.Addr().String(), + GRPCAddress: d.GRPCListeners[0].Addr().String(), HTTPAddress: d.HTTPListener.Addr().String(), }) daemons = append(daemons, d) diff --git a/cluster/cluster_test.go b/cluster/cluster_test.go index e44a6e9f..c904781c 100644 --- a/cluster/cluster_test.go +++ b/cluster/cluster_test.go @@ -59,9 +59,9 @@ func TestStartMultipleDaemons(t *testing.T) { daemons := cluster.GetDaemons() assert.Equal(t, wantPeers, cluster.GetPeers()) assert.Equal(t, 2, len(daemons)) - assert.Equal(t, "127.0.0.1:1111", daemons[0].GRPCListener.Addr().String()) - assert.Equal(t, "127.0.0.1:2222", daemons[1].GRPCListener.Addr().String()) - assert.Equal(t, "127.0.0.1:2222", cluster.DaemonAt(1).GRPCListener.Addr().String()) + assert.Equal(t, "127.0.0.1:1111", daemons[0].GRPCListeners[0].Addr().String()) + assert.Equal(t, "127.0.0.1:2222", daemons[1].GRPCListeners[0].Addr().String()) + assert.Equal(t, "127.0.0.1:2222", cluster.DaemonAt(1).GRPCListeners[0].Addr().String()) assert.Equal(t, "127.0.0.1:2222", cluster.PeerAt(1).GRPCAddress) } diff --git a/cmd/gubernator-cli/main.go b/cmd/gubernator-cli/main.go index 6af16c8e..9681d315 100644 --- a/cmd/gubernator-cli/main.go +++ b/cmd/gubernator-cli/main.go @@ -18,6 +18,8 @@ package main import ( "context" + "errors" + "flag" "fmt" "math/rand" "os" @@ -25,12 +27,16 @@ import ( "github.com/davecgh/go-spew/spew" guber "github.com/mailgun/gubernator" "github.com/mailgun/holster/v3/clock" + "github.com/mailgun/holster/v3/setter" "github.com/mailgun/holster/v3/syncutil" + "github.com/sirupsen/logrus" ) +var log *logrus.Logger + func checkErr(err error) { if err != nil { - fmt.Fprintf(os.Stderr, "error: %s\n", err) + log.Errorf(err.Error()) os.Exit(1) } } @@ -40,12 +46,29 @@ func randInt(min, max int) int64 { } func main() { - if len(os.Args) < 2 { - fmt.Printf("Please provide an gubernator GRPC endpoint address\n") - os.Exit(1) + var configFile, GRPCAddress string + var err error + + log = logrus.StandardLogger() + flags := flag.NewFlagSet("gubernator", flag.ContinueOnError) + flags.StringVar(&configFile, "config", "", "environment config file") + flags.StringVar(&GRPCAddress, "e", "", "the gubernator GRPC endpoint address") + checkErr(flags.Parse(os.Args[1:])) + + conf, err := guber.SetupDaemonConfig(log, configFile) + checkErr(err) + setter.SetOverride(&conf.GRPCListenAddress, GRPCAddress) + + if configFile == "" && GRPCAddress == "" && os.Getenv("GUBER_GRPC_ADDRESS") == "" { + checkErr(errors.New("please provide a GRPC endpoint via -e or from a config " + + "file via -config or set the env GUBER_GRPC_ADDRESS")) } - client, err := guber.DialV1Server(os.Args[1]) + err = guber.SetupTLS(conf.TLS) + checkErr(err) + + log.Infof("Connecting to '%s'...\n", conf.GRPCListenAddress) + client, err := guber.DialV1Server(conf.GRPCListenAddress, conf.ClientTLS()) checkErr(err) // Generate a selection of rate limits with random limits diff --git a/cmd/gubernator/main.go b/cmd/gubernator/main.go index 07021ccc..c1832cd2 100644 --- a/cmd/gubernator/main.go +++ b/cmd/gubernator/main.go @@ -18,45 +18,39 @@ package main import ( "context" - "crypto/tls" - "crypto/x509" "flag" - "fmt" - "io/ioutil" - "net" "os" "os/signal" "runtime" - "strconv" - "strings" - etcd "github.com/coreos/etcd/clientv3" - "github.com/davecgh/go-spew/spew" "github.com/mailgun/gubernator" "github.com/mailgun/holster/v3/clock" - "github.com/mailgun/holster/v3/setter" - "github.com/mailgun/holster/v3/slice" - "github.com/pkg/errors" - "github.com/segmentio/fasthash/fnv1" - "github.com/segmentio/fasthash/fnv1a" "github.com/sirupsen/logrus" "k8s.io/klog" ) -var log = logrus.WithField("category", "server") +var log = logrus.WithField("category", "gubernator") var Version = "dev-build" func main() { var configFile string + var err error logrus.Infof("Gubernator %s (%s/%s)", Version, runtime.GOARCH, runtime.GOOS) flags := flag.NewFlagSet("gubernator", flag.ContinueOnError) - flags.StringVar(&configFile, "config", "", "yaml config file") + flags.StringVar(&configFile, "config", "", "environment config file") flags.BoolVar(&gubernator.DebugEnabled, "debug", false, "enable debug") checkErr(flags.Parse(os.Args[1:]), "while parsing flags") + // in order to prevent logging to /tmp by k8s.io/client-go + // and other kubernetes related dependencies which are using + // klog (https://github.com/kubernetes/klog), we need to + // initialize klog in the way it prints to stderr only. + klog.InitFlags(nil) + flag.Set("logtostderr", "true") + // Read our config from the environment or optional environment config file - conf, err := confFromFile(configFile) + conf, err := gubernator.SetupDaemonConfig(logrus.StandardLogger(), configFile) checkErr(err, "while getting config") ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) @@ -78,312 +72,9 @@ func main() { } } -func confFromFile(configFile string) (gubernator.DaemonConfig, error) { - var conf gubernator.DaemonConfig - - // in order to prevent logging to /tmp by k8s.io/client-go - // and other kubernetes related dependencies which are using - // klog (https://github.com/kubernetes/klog), we need to - // initialize klog in the way it prints to stderr only. - klog.InitFlags(nil) - flag.Set("logtostderr", "true") - - setter.SetDefault(&gubernator.DebugEnabled, getEnvBool("GUBER_DEBUG")) - if gubernator.DebugEnabled { - logrus.SetLevel(logrus.DebugLevel) - logrus.Debug("Debug enabled") - } - - if configFile != "" { - log.Infof("Loading env config: %s", configFile) - if err := fromEnvFile(configFile); err != nil { - return conf, err - } - } - - // Main config - setter.SetDefault(&conf.GRPCListenAddress, os.Getenv("GUBER_GRPC_ADDRESS"), "localhost:81") - setter.SetDefault(&conf.HTTPListenAddress, os.Getenv("GUBER_HTTP_ADDRESS"), "localhost:80") - setter.SetDefault(&conf.CacheSize, getEnvInteger("GUBER_CACHE_SIZE"), 50_000) - setter.SetDefault(&conf.DataCenter, os.Getenv("GUBER_DATA_CENTER"), "") - - setter.SetDefault(&conf.AdvertiseAddress, os.Getenv("GUBER_ADVERTISE_ADDRESS"), conf.GRPCListenAddress) - - advAddr, advPort, err := net.SplitHostPort(conf.AdvertiseAddress) - if err != nil { - return conf, errors.Wrap(err, "GUBER_ADVERTISE_ADDRESS is invalid; expected format is `address:port`") - } - advAddr, err = gubernator.ResolveHostIP(advAddr) - if err != nil { - return conf, errors.Wrap(err, "failed to discover host ip for GUBER_ADVERTISE_ADDRESS") - } - conf.AdvertiseAddress = net.JoinHostPort(advAddr, advPort) - - // Behaviors - setter.SetDefault(&conf.Behaviors.BatchTimeout, getEnvDuration("GUBER_BATCH_TIMEOUT")) - setter.SetDefault(&conf.Behaviors.BatchLimit, getEnvInteger("GUBER_BATCH_LIMIT")) - setter.SetDefault(&conf.Behaviors.BatchWait, getEnvDuration("GUBER_BATCH_WAIT")) - - setter.SetDefault(&conf.Behaviors.GlobalTimeout, getEnvDuration("GUBER_GLOBAL_TIMEOUT")) - setter.SetDefault(&conf.Behaviors.GlobalBatchLimit, getEnvInteger("GUBER_GLOBAL_BATCH_LIMIT")) - setter.SetDefault(&conf.Behaviors.GlobalSyncWait, getEnvDuration("GUBER_GLOBAL_SYNC_WAIT")) - - setter.SetDefault(&conf.Behaviors.MultiRegionTimeout, getEnvDuration("GUBER_MULTI_REGION_TIMEOUT")) - setter.SetDefault(&conf.Behaviors.MultiRegionBatchLimit, getEnvInteger("GUBER_MULTI_REGION_BATCH_LIMIT")) - setter.SetDefault(&conf.Behaviors.MultiRegionSyncWait, getEnvDuration("GUBER_MULTI_REGION_SYNC_WAIT")) - - choices := []string{"member-list", "k8s", "etcd"} - setter.SetDefault(&conf.PeerDiscoveryType, os.Getenv("GUBER_PEER_DISCOVERY_TYPE"), "member-list") - if !slice.ContainsString(conf.PeerDiscoveryType, choices, nil) { - return conf, fmt.Errorf("GUBER_PEER_DISCOVERY_TYPE is invalid; choices are [%s]`", strings.Join(choices, ",")) - } - - // ETCD Config - setter.SetDefault(&conf.EtcdPoolConf.KeyPrefix, os.Getenv("GUBER_ETCD_KEY_PREFIX"), "/gubernator-peers") - setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig, &etcd.Config{}) - setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Endpoints, getEnvSlice("GUBER_ETCD_ENDPOINTS"), []string{"localhost:2379"}) - setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.DialTimeout, getEnvDuration("GUBER_ETCD_DIAL_TIMEOUT"), clock.Second*5) - setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Username, os.Getenv("GUBER_ETCD_USER")) - setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Password, os.Getenv("GUBER_ETCD_PASSWORD")) - setter.SetDefault(&conf.EtcdPoolConf.AdvertiseAddress, os.Getenv("GUBER_ETCD_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) - setter.SetDefault(&conf.EtcdPoolConf.DataCenter, os.Getenv("GUBER_ETCD_DATA_CENTER"), conf.DataCenter) - - setter.SetDefault(&conf.MemberListPoolConf.AdvertiseAddress, os.Getenv("GUBER_MEMBERLIST_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) - setter.SetDefault(&conf.MemberListPoolConf.MemberListAddress, os.Getenv("GUBER_MEMBERLIST_ADDRESS"), fmt.Sprintf("%s:7946", advAddr)) - setter.SetDefault(&conf.MemberListPoolConf.KnownNodes, getEnvSlice("GUBER_MEMBERLIST_KNOWN_NODES"), []string{}) - setter.SetDefault(&conf.MemberListPoolConf.DataCenter, conf.DataCenter) - - // Kubernetes Config - setter.SetDefault(&conf.K8PoolConf.Namespace, os.Getenv("GUBER_K8S_NAMESPACE"), "default") - conf.K8PoolConf.PodIP = os.Getenv("GUBER_K8S_POD_IP") - conf.K8PoolConf.PodPort = os.Getenv("GUBER_K8S_POD_PORT") - conf.K8PoolConf.Selector = os.Getenv("GUBER_K8S_ENDPOINTS_SELECTOR") - - // PeerPicker Config - if pp := os.Getenv("GUBER_PEER_PICKER"); pp != "" { - var replicas int - var hash string - - switch pp { - case "consistent-hash": - setter.SetDefault(&hash, os.Getenv("GUBER_PEER_PICKER_HASH"), "fnv1a") - hashFuncs := map[string]gubernator.HashFunc{ - "fnv1a": fnv1a.HashBytes32, - "fnv1": fnv1.HashBytes32, - "crc32": nil, - } - fn, ok := hashFuncs[hash] - if !ok { - return conf, errors.Errorf("'GUBER_PEER_PICKER_HASH=%s' is invalid; choices are [%s]", - hash, validHashKeys(hashFuncs)) - } - conf.Picker = gubernator.NewConsistentHash(fn) - - case "replicated-hash": - setter.SetDefault(&replicas, getEnvInteger("GUBER_REPLICATED_HASH_REPLICAS"), gubernator.DefaultReplicas) - conf.Picker = gubernator.NewReplicatedConsistentHash(nil, replicas) - setter.SetDefault(&hash, os.Getenv("GUBER_PEER_PICKER_HASH"), "fnv1a") - hashFuncs := map[string]gubernator.HashFunc64{ - "fnv1a": fnv1a.HashBytes64, - "fnv1": fnv1.HashBytes64, - } - fn, ok := hashFuncs[hash] - if !ok { - return conf, errors.Errorf("'GUBER_PEER_PICKER_HASH=%s' is invalid; choices are [%s]", - hash, validHash64Keys(hashFuncs)) - } - conf.Picker = gubernator.NewReplicatedConsistentHash(fn, replicas) - default: - return conf, errors.Errorf("'GUBER_PEER_PICKER=%s' is invalid; choices are ['replicated-hash', 'consistent-hash']", pp) - } - } - - if anyHasPrefix("GUBER_K8S_", os.Environ()) { - logrus.Debug("K8s peer pool config found") - if conf.K8PoolConf.Selector == "" { - return conf, errors.New("when using k8s for peer discovery, you MUST provide a " + - "`GUBER_K8S_ENDPOINTS_SELECTOR` to select the gubernator peers from the endpoints listing") - } - } - - if anyHasPrefix("GUBER_MEMBERLIST_", os.Environ()) { - logrus.Debug("Memberlist pool config found") - if len(conf.MemberListPoolConf.KnownNodes) == 0 { - return conf, errors.New("when using `member-list` for peer discovery, you MUST provide a " + - "hostname of a known host in the cluster via `GUBER_MEMBERLIST_KNOWN_NODES`") - } - } - - if anyHasPrefix("GUBER_ETCD_", os.Environ()) { - logrus.Debug("ETCD peer pool config found") - } - - // If env contains any TLS configuration - if anyHasPrefix("GUBER_ETCD_TLS_", os.Environ()) { - if err := setupTLS(conf.EtcdPoolConf.EtcdConfig); err != nil { - return conf, err - } - } - - if gubernator.DebugEnabled { - spew.Dump(conf) - } - - return conf, nil -} - func checkErr(err error, msg string) { if err != nil { log.WithError(err).Error(msg) os.Exit(1) } } - -func setupTLS(conf *etcd.Config) error { - var tlsCertFile, tlsKeyFile, tlsCAFile string - - // set `GUBER_ETCD_TLS_ENABLE` and this line will - // create a TLS config with no config. - setter.SetDefault(&conf.TLS, &tls.Config{}) - - setter.SetDefault(&tlsCertFile, os.Getenv("GUBER_ETCD_TLS_CERT")) - setter.SetDefault(&tlsKeyFile, os.Getenv("GUBER_ETCD_TLS_KEY")) - setter.SetDefault(&tlsCAFile, os.Getenv("GUBER_ETCD_TLS_CA")) - - // If the CA file was provided - if tlsCAFile != "" { - setter.SetDefault(&conf.TLS, &tls.Config{}) - - var certPool *x509.CertPool = nil - if pemBytes, err := ioutil.ReadFile(tlsCAFile); err == nil { - certPool = x509.NewCertPool() - certPool.AppendCertsFromPEM(pemBytes) - } else { - return errors.Wrapf(err, "while loading cert CA file '%s'", tlsCAFile) - } - setter.SetDefault(&conf.TLS.RootCAs, certPool) - conf.TLS.InsecureSkipVerify = false - } - - // If the cert and key files are provided attempt to load them - if tlsCertFile != "" && tlsKeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(tlsCertFile, tlsKeyFile) - if err != nil { - return errors.Wrapf(err, "while loading cert '%s' and key file '%s'", - tlsCertFile, tlsKeyFile) - } - setter.SetDefault(&conf.TLS.Certificates, []tls.Certificate{tlsCert}) - } - - // If no other TLS config is provided this will force connecting with TLS, - // without cert verification - if os.Getenv("GUBER_ETCD_TLS_SKIP_VERIFY") != "" { - setter.SetDefault(&conf.TLS, &tls.Config{}) - conf.TLS.InsecureSkipVerify = true - } - return nil -} - -func anyHasPrefix(prefix string, items []string) bool { - for _, i := range items { - if strings.HasPrefix(i, prefix) { - return true - } - } - return false -} - -func getEnvBool(name string) bool { - v := os.Getenv(name) - if v == "" { - return false - } - b, err := strconv.ParseBool(v) - if err != nil { - log.WithError(err).Errorf("while parsing '%s' as an boolean", name) - return false - } - return b -} - -func getEnvInteger(name string) int { - v := os.Getenv(name) - if v == "" { - return 0 - } - i, err := strconv.ParseInt(v, 10, 64) - if err != nil { - log.WithError(err).Errorf("while parsing '%s' as an integer", name) - return 0 - } - return int(i) -} - -func getEnvDuration(name string) clock.Duration { - v := os.Getenv(name) - if v == "" { - return 0 - } - d, err := clock.ParseDuration(v) - if err != nil { - log.WithError(err).Errorf("while parsing '%s' as a duration", name) - return 0 - } - return d -} - -func getEnvSlice(name string) []string { - v := os.Getenv(name) - if v == "" { - return nil - } - return strings.Split(v, ",") -} - -// Take values from a file in the format `GUBER_CONF_ITEM=my-value` and put them into the environment -// lines that begin with `#` are ignored -func fromEnvFile(configFile string) error { - fd, err := os.Open(configFile) - if err != nil { - return fmt.Errorf("while opening config file: %s", err) - } - - contents, err := ioutil.ReadAll(fd) - if err != nil { - return fmt.Errorf("while reading config file '%s': %s", configFile, err) - } - for i, line := range strings.Split(string(contents), "\n") { - // Skip comments, empty lines or lines with tabs - if strings.HasPrefix(line, "#") || strings.HasPrefix(line, " ") || - strings.HasPrefix(line, "\t") || len(line) == 0 { - continue - } - - logrus.Debugf("config: [%d] '%s'", i, line) - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - return errors.Errorf("malformed key=value on line '%d'", i) - } - - if err := os.Setenv(strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])); err != nil { - return errors.Wrapf(err, "while settings environ for '%s=%s'", parts[0], parts[1]) - } - } - return nil -} - -func validHashKeys(m map[string]gubernator.HashFunc) string { - var rs []string - for k, _ := range m { - rs = append(rs, k) - } - return strings.Join(rs, ",") -} - -func validHash64Keys(m map[string]gubernator.HashFunc64) string { - var rs []string - for k, _ := range m { - rs = append(rs, k) - } - return strings.Join(rs, ",") -} diff --git a/config.go b/config.go index 1b9a67e8..29b15f4d 100644 --- a/config.go +++ b/config.go @@ -17,18 +17,55 @@ limitations under the License. package gubernator import ( + "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" + "net" + "os" + "strconv" + "strings" "time" + etcd "github.com/coreos/etcd/clientv3" + "github.com/davecgh/go-spew/spew" + "github.com/mailgun/holster/v3/clock" "github.com/mailgun/holster/v3/setter" + "github.com/mailgun/holster/v3/slice" + "github.com/pkg/errors" + "github.com/segmentio/fasthash/fnv1" + "github.com/segmentio/fasthash/fnv1a" "github.com/sirupsen/logrus" "google.golang.org/grpc" ) +type BehaviorConfig struct { + // How long we should wait for a batched response from a peer + BatchTimeout time.Duration + // How long we should wait before sending a batched request + BatchWait time.Duration + // The max number of requests we can batch into a single peer request + BatchLimit int + + // How long a non-owning peer should wait before syncing hits to the owning peer + GlobalSyncWait time.Duration + // How long we should wait for global sync responses from peers + GlobalTimeout time.Duration + // The max number of global updates we can batch into a single peer request + GlobalBatchLimit int + + // How long the current region will collect request before pushing them to other regions + MultiRegionSyncWait time.Duration + // How long the current region will wait for responses from other regions + MultiRegionTimeout time.Duration + // The max number of requests the current region will collect + MultiRegionBatchLimit int +} + // config for a gubernator instance type Config struct { - // Required - GRPCServer *grpc.Server + // (Required) A list of GRPC servers to register our instance with + GRPCServers []*grpc.Server // (Optional) Adjust how gubernator behaviors are configured Behaviors BehaviorConfig @@ -59,31 +96,40 @@ type Config struct { // using multi data center support. DataCenter string - // (Optional) Logger to be used when + // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) Logger logrus.FieldLogger + + // (Optional) The TLS config used when connecting to gubernator peers + PeerTLS *tls.Config } -type BehaviorConfig struct { - // How long we should wait for a batched response from a peer - BatchTimeout time.Duration - // How long we should wait before sending a batched request - BatchWait time.Duration - // The max number of requests we can batch into a single peer request - BatchLimit int +func (c *Config) SetDefaults() error { + setter.SetDefault(&c.Behaviors.BatchTimeout, time.Millisecond*500) + setter.SetDefault(&c.Behaviors.BatchLimit, maxBatchSize) + setter.SetDefault(&c.Behaviors.BatchWait, time.Microsecond*500) - // How long a non-owning peer should wait before syncing hits to the owning peer - GlobalSyncWait time.Duration - // How long we should wait for global sync responses from peers - GlobalTimeout time.Duration - // The max number of global updates we can batch into a single peer request - GlobalBatchLimit int + setter.SetDefault(&c.Behaviors.GlobalTimeout, time.Millisecond*500) + setter.SetDefault(&c.Behaviors.GlobalBatchLimit, maxBatchSize) + setter.SetDefault(&c.Behaviors.GlobalSyncWait, time.Microsecond*500) - // How long the current region will collect request before pushing them to other regions - MultiRegionSyncWait time.Duration - // How long the current region will wait for responses from other regions - MultiRegionTimeout time.Duration - // The max number of requests the current region will collect - MultiRegionBatchLimit int + setter.SetDefault(&c.Behaviors.MultiRegionTimeout, time.Millisecond*500) + setter.SetDefault(&c.Behaviors.MultiRegionBatchLimit, maxBatchSize) + setter.SetDefault(&c.Behaviors.MultiRegionSyncWait, time.Second) + + setter.SetDefault(&c.LocalPicker, NewReplicatedConsistentHash(nil, DefaultReplicas)) + setter.SetDefault(&c.RegionPicker, NewRegionPicker(nil)) + setter.SetDefault(&c.Cache, NewLRUCache(0)) + + if c.Behaviors.BatchLimit > maxBatchSize { + return fmt.Errorf("Behaviors.BatchLimit cannot exceed '%d'", maxBatchSize) + } + + // Make a copy of the TLS config in case our caller decides to make changes + if c.PeerTLS != nil { + c.PeerTLS = c.PeerTLS.Clone() + } + + return nil } type PeerInfo struct { @@ -104,25 +150,399 @@ func (p PeerInfo) HashKey() string { type UpdateFunc func([]PeerInfo) -func (c *Config) SetDefaults() error { - setter.SetDefault(&c.Behaviors.BatchTimeout, time.Millisecond*500) - setter.SetDefault(&c.Behaviors.BatchLimit, maxBatchSize) - setter.SetDefault(&c.Behaviors.BatchWait, time.Microsecond*500) +var DebugEnabled = false - setter.SetDefault(&c.Behaviors.GlobalTimeout, time.Millisecond*500) - setter.SetDefault(&c.Behaviors.GlobalBatchLimit, maxBatchSize) - setter.SetDefault(&c.Behaviors.GlobalSyncWait, time.Microsecond*500) +type DaemonConfig struct { + // (Required) The `address:port` that will accept GRPC requests + GRPCListenAddress string - setter.SetDefault(&c.Behaviors.MultiRegionTimeout, time.Millisecond*500) - setter.SetDefault(&c.Behaviors.MultiRegionBatchLimit, maxBatchSize) - setter.SetDefault(&c.Behaviors.MultiRegionSyncWait, time.Second) + // (Required) The `address:port` that will accept HTTP requests + HTTPListenAddress string - setter.SetDefault(&c.LocalPicker, NewReplicatedConsistentHash(nil, DefaultReplicas)) - setter.SetDefault(&c.RegionPicker, NewRegionPicker(nil)) - setter.SetDefault(&c.Cache, NewLRUCache(0)) + // (Optional) The `address:port` that is advertised to other Gubernator peers. + // Defaults to `GRPCListenAddress` + AdvertiseAddress string - if c.Behaviors.BatchLimit > maxBatchSize { - return fmt.Errorf("Behaviors.BatchLimit cannot exceed '%d'", maxBatchSize) + // (Optional) The number of items in the cache. Defaults to 50,000 + CacheSize int + + // (Optional) Configure how behaviours behave + Behaviors BehaviorConfig + + // (Optional) Identifies the datacenter this instance is running in. For + // use with multi-region support + DataCenter string + + // (Optional) Which pool to use when discovering other Gubernator peers + // Valid options are [etcd, k8s, member-list] (Defaults to 'member-list') + PeerDiscoveryType string + + // (Optional) Etcd configuration used for peer discovery + EtcdPoolConf EtcdPoolConfig + + // (Optional) K8s configuration used for peer discovery + K8PoolConf K8sPoolConfig + + // (Optional) Member list configuration used for peer discovery + MemberListPoolConf MemberListPoolConfig + + // (Optional) The PeerPicker as selected by `GUBER_PEER_PICKER` + Picker PeerPicker + + // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) + Logger logrus.FieldLogger + + // (Optional) TLS Configuration; SpawnDaemon() will modify the passed TLS config in an + // attempt to build a complete TLS config if one is not provided. + TLS *TLSConfig +} + +func (d *DaemonConfig) ClientTLS() *tls.Config { + if d.TLS != nil { + return d.TLS.ClientTLS + } + return nil +} + +func (d *DaemonConfig) ServerTLS() *tls.Config { + if d.TLS != nil { + return d.TLS.ServerTLS } return nil } + +// SetupDaemonConfig returns a DaemonConfig object as configured by reading the provided config file +// and environment. +func SetupDaemonConfig(logger *logrus.Logger, configFile string) (DaemonConfig, error) { + log := logrus.NewEntry(logger) + var conf DaemonConfig + + if configFile != "" { + log.Infof("Loading env config: %s", configFile) + if err := fromEnvFile(log, configFile); err != nil { + return conf, err + } + } + + setter.SetDefault(&DebugEnabled, getEnvBool(log, "GUBER_DEBUG")) + if DebugEnabled { + logger.SetLevel(logrus.DebugLevel) + log.Debug("Debug enabled") + } + + // Main config + setter.SetDefault(&conf.GRPCListenAddress, os.Getenv("GUBER_GRPC_ADDRESS"), "localhost:81") + setter.SetDefault(&conf.HTTPListenAddress, os.Getenv("GUBER_HTTP_ADDRESS"), "localhost:80") + setter.SetDefault(&conf.CacheSize, getEnvInteger(log, "GUBER_CACHE_SIZE"), 50_000) + setter.SetDefault(&conf.AdvertiseAddress, os.Getenv("GUBER_ADVERTISE_ADDRESS"), conf.GRPCListenAddress) + setter.SetDefault(&conf.DataCenter, os.Getenv("GUBER_DATA_CENTER"), "") + + advAddr, advPort, err := net.SplitHostPort(conf.AdvertiseAddress) + if err != nil { + return conf, errors.Wrap(err, "GUBER_ADVERTISE_ADDRESS is invalid; expected format is `address:port`") + } + advAddr, err = ResolveHostIP(advAddr) + if err != nil { + return conf, errors.Wrap(err, "failed to discover host ip for GUBER_ADVERTISE_ADDRESS") + } + conf.AdvertiseAddress = net.JoinHostPort(advAddr, advPort) + + // Behaviors + setter.SetDefault(&conf.Behaviors.BatchTimeout, getEnvDuration(log, "GUBER_BATCH_TIMEOUT")) + setter.SetDefault(&conf.Behaviors.BatchLimit, getEnvInteger(log, "GUBER_BATCH_LIMIT")) + setter.SetDefault(&conf.Behaviors.BatchWait, getEnvDuration(log, "GUBER_BATCH_WAIT")) + + setter.SetDefault(&conf.Behaviors.GlobalTimeout, getEnvDuration(log, "GUBER_GLOBAL_TIMEOUT")) + setter.SetDefault(&conf.Behaviors.GlobalBatchLimit, getEnvInteger(log, "GUBER_GLOBAL_BATCH_LIMIT")) + setter.SetDefault(&conf.Behaviors.GlobalSyncWait, getEnvDuration(log, "GUBER_GLOBAL_SYNC_WAIT")) + + setter.SetDefault(&conf.Behaviors.MultiRegionTimeout, getEnvDuration(log, "GUBER_MULTI_REGION_TIMEOUT")) + setter.SetDefault(&conf.Behaviors.MultiRegionBatchLimit, getEnvInteger(log, "GUBER_MULTI_REGION_BATCH_LIMIT")) + setter.SetDefault(&conf.Behaviors.MultiRegionSyncWait, getEnvDuration(log, "GUBER_MULTI_REGION_SYNC_WAIT")) + + choices := []string{"member-list", "k8s", "etcd"} + setter.SetDefault(&conf.PeerDiscoveryType, os.Getenv("GUBER_PEER_DISCOVERY_TYPE"), "member-list") + if !slice.ContainsString(conf.PeerDiscoveryType, choices, nil) { + return conf, fmt.Errorf("GUBER_PEER_DISCOVERY_TYPE is invalid; choices are [%s]`", strings.Join(choices, ",")) + } + + // TLS Config + if anyHasPrefix("GUBER_TLS_", os.Environ()) { + conf.TLS = &TLSConfig{} + setter.SetDefault(&conf.TLS.CaFile, os.Getenv("GUBER_TLS_CA")) + setter.SetDefault(&conf.TLS.CaKeyFile, os.Getenv("GUBER_TLS_CA_KEY")) + setter.SetDefault(&conf.TLS.KeyFile, os.Getenv("GUBER_TLS_KEY")) + setter.SetDefault(&conf.TLS.CertFile, os.Getenv("GUBER_TLS_CERT")) + setter.SetDefault(&conf.TLS.AutoTLS, getEnvBool(log, "GUBER_TLS_AUTO")) + + clientAuth := os.Getenv("GUBER_TLS_CLIENT_AUTH") + if clientAuth != "" { + clientAuthTypes := map[string]tls.ClientAuthType{ + "request-cert": tls.RequestClientCert, + "verify-cert": tls.VerifyClientCertIfGiven, + "require-any-cert": tls.RequireAnyClientCert, + "require-and-verify": tls.RequireAndVerifyClientCert, + } + t, ok := clientAuthTypes[clientAuth] + if !ok { + return conf, errors.Errorf("'GUBER_TLS_CLIENT_AUTH=%s' is invalid; choices are [%s]", + clientAuth, validClientAuthTypes(clientAuthTypes)) + } + conf.TLS.ClientAuth = t + } + setter.SetDefault(&conf.TLS.ClientAuthKeyFile, os.Getenv("GUBER_TLS_CLIENT_AUTH_KEY")) + setter.SetDefault(&conf.TLS.ClientAuthCertFile, os.Getenv("GUBER_TLS_CLIENT_AUTH_CERT")) + setter.SetDefault(&conf.TLS.ClientAuthCaFile, os.Getenv("GUBER_TLS_CLIENT_AUTH_CA_CERT")) + setter.SetDefault(&conf.TLS.InsecureSkipVerify, getEnvBool(log, "GUBER_TLS_INSECURE_SKIP_VERIFY")) + } + + // ETCD Config + setter.SetDefault(&conf.EtcdPoolConf.KeyPrefix, os.Getenv("GUBER_ETCD_KEY_PREFIX"), "/gubernator-peers") + setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig, &etcd.Config{}) + setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Endpoints, getEnvSlice("GUBER_ETCD_ENDPOINTS"), []string{"localhost:2379"}) + setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.DialTimeout, getEnvDuration(log, "GUBER_ETCD_DIAL_TIMEOUT"), clock.Second*5) + setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Username, os.Getenv("GUBER_ETCD_USER")) + setter.SetDefault(&conf.EtcdPoolConf.EtcdConfig.Password, os.Getenv("GUBER_ETCD_PASSWORD")) + setter.SetDefault(&conf.EtcdPoolConf.AdvertiseAddress, os.Getenv("GUBER_ETCD_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) + setter.SetDefault(&conf.EtcdPoolConf.DataCenter, os.Getenv("GUBER_ETCD_DATA_CENTER"), conf.DataCenter) + + setter.SetDefault(&conf.MemberListPoolConf.AdvertiseAddress, os.Getenv("GUBER_MEMBERLIST_ADVERTISE_ADDRESS"), conf.AdvertiseAddress) + setter.SetDefault(&conf.MemberListPoolConf.MemberListAddress, os.Getenv("GUBER_MEMBERLIST_ADDRESS"), fmt.Sprintf("%s:7946", advAddr)) + setter.SetDefault(&conf.MemberListPoolConf.KnownNodes, getEnvSlice("GUBER_MEMBERLIST_KNOWN_NODES"), []string{}) + setter.SetDefault(&conf.MemberListPoolConf.DataCenter, conf.DataCenter) + + // Kubernetes Config + setter.SetDefault(&conf.K8PoolConf.Namespace, os.Getenv("GUBER_K8S_NAMESPACE"), "default") + conf.K8PoolConf.PodIP = os.Getenv("GUBER_K8S_POD_IP") + conf.K8PoolConf.PodPort = os.Getenv("GUBER_K8S_POD_PORT") + conf.K8PoolConf.Selector = os.Getenv("GUBER_K8S_ENDPOINTS_SELECTOR") + + // PeerPicker Config + if pp := os.Getenv("GUBER_PEER_PICKER"); pp != "" { + var replicas int + var hash string + + switch pp { + case "consistent-hash": + setter.SetDefault(&hash, os.Getenv("GUBER_PEER_PICKER_HASH"), "fnv1a") + hashFuncs := map[string]HashFunc{ + "fnv1a": fnv1a.HashBytes32, + "fnv1": fnv1.HashBytes32, + "crc32": nil, + } + fn, ok := hashFuncs[hash] + if !ok { + return conf, errors.Errorf("'GUBER_PEER_PICKER_HASH=%s' is invalid; choices are [%s]", + hash, validHashKeys(hashFuncs)) + } + conf.Picker = NewConsistentHash(fn) + + case "replicated-hash": + setter.SetDefault(&replicas, getEnvInteger(log, "GUBER_REPLICATED_HASH_REPLICAS"), DefaultReplicas) + conf.Picker = NewReplicatedConsistentHash(nil, replicas) + setter.SetDefault(&hash, os.Getenv("GUBER_PEER_PICKER_HASH"), "fnv1a") + hashFuncs := map[string]HashFunc64{ + "fnv1a": fnv1a.HashBytes64, + "fnv1": fnv1.HashBytes64, + } + fn, ok := hashFuncs[hash] + if !ok { + return conf, errors.Errorf("'GUBER_PEER_PICKER_HASH=%s' is invalid; choices are [%s]", + hash, validHash64Keys(hashFuncs)) + } + conf.Picker = NewReplicatedConsistentHash(fn, replicas) + default: + return conf, errors.Errorf("'GUBER_PEER_PICKER=%s' is invalid; choices are ['replicated-hash', 'consistent-hash']", pp) + } + } + + if anyHasPrefix("GUBER_K8S_", os.Environ()) { + log.Debug("K8s peer pool config found") + if conf.K8PoolConf.Selector == "" { + return conf, errors.New("when using k8s for peer discovery, you MUST provide a " + + "`GUBER_K8S_ENDPOINTS_SELECTOR` to select the gubernator peers from the endpoints listing") + } + } + + if anyHasPrefix("GUBER_MEMBERLIST_", os.Environ()) { + log.Debug("Memberlist pool config found") + if len(conf.MemberListPoolConf.KnownNodes) == 0 { + return conf, errors.New("when using `member-list` for peer discovery, you MUST provide a " + + "hostname of a known host in the cluster via `GUBER_MEMBERLIST_KNOWN_NODES`") + } + } + + if anyHasPrefix("GUBER_ETCD_", os.Environ()) { + log.Debug("ETCD peer pool config found") + } + + // If env contains any TLS configuration + if anyHasPrefix("GUBER_ETCD_TLS_", os.Environ()) { + if err := setupEtcdTLS(conf.EtcdPoolConf.EtcdConfig); err != nil { + return conf, err + } + } + + if DebugEnabled { + log.Debug(spew.Sdump(conf)) + } + + return conf, nil +} + +func setupEtcdTLS(conf *etcd.Config) error { + var tlsCertFile, tlsKeyFile, tlsCAFile string + + // set `GUBER_ETCD_TLS_ENABLE` and this line will + // create a TLS config with no config. + setter.SetDefault(&conf.TLS, &tls.Config{}) + + setter.SetDefault(&tlsCertFile, os.Getenv("GUBER_ETCD_TLS_CERT")) + setter.SetDefault(&tlsKeyFile, os.Getenv("GUBER_ETCD_TLS_KEY")) + setter.SetDefault(&tlsCAFile, os.Getenv("GUBER_ETCD_TLS_CA")) + + // If the CA file was provided + if tlsCAFile != "" { + setter.SetDefault(&conf.TLS, &tls.Config{}) + + var certPool *x509.CertPool = nil + if pemBytes, err := ioutil.ReadFile(tlsCAFile); err == nil { + certPool = x509.NewCertPool() + certPool.AppendCertsFromPEM(pemBytes) + } else { + return errors.Wrapf(err, "while loading cert CA file '%s'", tlsCAFile) + } + setter.SetDefault(&conf.TLS.RootCAs, certPool) + conf.TLS.InsecureSkipVerify = false + } + + // If the cert and key files are provided attempt to load them + if tlsCertFile != "" && tlsKeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsCertFile, tlsKeyFile) + if err != nil { + return errors.Wrapf(err, "while loading cert '%s' and key file '%s'", + tlsCertFile, tlsKeyFile) + } + setter.SetDefault(&conf.TLS.Certificates, []tls.Certificate{tlsCert}) + } + + // If no other TLS config is provided this will force connecting with TLS, + // without cert verification + if os.Getenv("GUBER_ETCD_TLS_SKIP_VERIFY") != "" { + setter.SetDefault(&conf.TLS, &tls.Config{}) + conf.TLS.InsecureSkipVerify = true + } + return nil +} + +func anyHasPrefix(prefix string, items []string) bool { + for _, i := range items { + if strings.HasPrefix(i, prefix) { + return true + } + } + return false +} + +func getEnvBool(log logrus.FieldLogger, name string) bool { + v := os.Getenv(name) + if v == "" { + return false + } + b, err := strconv.ParseBool(v) + if err != nil { + log.WithError(err).Errorf("while parsing '%s' as an boolean", name) + return false + } + return b +} + +func getEnvInteger(log logrus.FieldLogger, name string) int { + v := os.Getenv(name) + if v == "" { + return 0 + } + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + log.WithError(err).Errorf("while parsing '%s' as an integer", name) + return 0 + } + return int(i) +} + +func getEnvDuration(log logrus.FieldLogger, name string) time.Duration { + v := os.Getenv(name) + if v == "" { + return 0 + } + d, err := time.ParseDuration(v) + if err != nil { + log.WithError(err).Errorf("while parsing '%s' as a duration", name) + return 0 + } + return d +} + +func getEnvSlice(name string) []string { + v := os.Getenv(name) + if v == "" { + return nil + } + return strings.Split(v, ",") +} + +// Take values from a file in the format `GUBER_CONF_ITEM=my-value` and put them into the environment +// lines that begin with `#` are ignored +func fromEnvFile(log logrus.FieldLogger, configFile string) error { + fd, err := os.Open(configFile) + if err != nil { + return fmt.Errorf("while opening config file: %s", err) + } + + contents, err := ioutil.ReadAll(fd) + if err != nil { + return fmt.Errorf("while reading config file '%s': %s", configFile, err) + } + for i, line := range strings.Split(string(contents), "\n") { + // Skip comments, empty lines or lines with tabs + if strings.HasPrefix(line, "#") || strings.HasPrefix(line, " ") || + strings.HasPrefix(line, "\t") || len(line) == 0 { + continue + } + + log.Debugf("config: [%d] '%s'", i, line) + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + return errors.Errorf("malformed key=value on line '%d'", i) + } + + if err := os.Setenv(strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])); err != nil { + return errors.Wrapf(err, "while settings environ for '%s=%s'", parts[0], parts[1]) + } + } + return nil +} + +func validClientAuthTypes(m map[string]tls.ClientAuthType) string { + var rs []string + for k, _ := range m { + rs = append(rs, k) + } + return strings.Join(rs, ",") +} + +func validHashKeys(m map[string]HashFunc) string { + var rs []string + for k, _ := range m { + rs = append(rs, k) + } + return strings.Join(rs, ",") +} + +func validHash64Keys(m map[string]HashFunc64) string { + var rs []string + for k, _ := range m { + rs = append(rs, k) + } + return strings.Join(rs, ",") +} diff --git a/daemon.go b/daemon.go index 1767685c..8b6e96a9 100644 --- a/daemon.go +++ b/daemon.go @@ -18,6 +18,7 @@ package gubernator import ( "context" + "log" "net" "net/http" "strings" @@ -31,61 +32,19 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) -var DebugEnabled = false - -type DaemonConfig struct { - // (Required) The `address:port` that will accept GRPC requests - GRPCListenAddress string - - // (Required) The `address:port` that will accept HTTP requests - HTTPListenAddress string - - // (Optional) The `address:port` that is advertised to other Gubernator peers. - // Defaults to `GRPCListenAddress` - AdvertiseAddress string - - // (Optional) The number of items in the cache. Defaults to 50,000 - CacheSize int - - // (Optional) Configure how behaviours behave - Behaviors BehaviorConfig - - // (Optional) Identifies the datacenter this instance is running in. For - // use with multi-region support - DataCenter string - - // (Optional) Which pool to use when discovering other Gubernator peers - // Valid options are [etcd, k8s, member-list] (Defaults to 'member-list') - PeerDiscoveryType string - - // (Optional) Etcd configuration used for peer discovery - EtcdPoolConf EtcdPoolConfig - - // (Optional) K8s configuration used for peer discovery - K8PoolConf K8sPoolConfig - - // (Optional) Member list configuration used for peer discovery - MemberListPoolConf MemberListPoolConfig - - // (Optional) The PeerPicker as selected by `GUBER_PEER_PICKER` - Picker PeerPicker - - // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) - Logger logrus.FieldLogger -} - type Daemon struct { - GRPCListener net.Listener - HTTPListener net.Listener - V1Server *V1Instance + GRPCListeners []net.Listener + HTTPListener net.Listener + V1Server *V1Instance log logrus.FieldLogger pool PoolInterface conf DaemonConfig httpSrv *http.Server - grpcSrv *grpc.Server + grpcSrvs []*grpc.Server wg syncutil.WaitGroup statsHandler *GRPCStatsHandler promRegister *prometheus.Registry @@ -122,16 +81,27 @@ func (s *Daemon) Start(ctx context.Context) error { s.statsHandler = NewGRPCStatsHandler() s.promRegister.Register(s.statsHandler) - // New GRPC server - s.grpcSrv = grpc.NewServer( + opts := []grpc.ServerOption{ grpc.StatsHandler(s.statsHandler), - grpc.MaxRecvMsgSize(1024*1024)) + grpc.MaxRecvMsgSize(1024 * 1024), + } + + if err := SetupTLS(s.conf.TLS); err != nil { + return err + } + + if s.conf.ServerTLS() != nil { + // Create two GRPC server instances, one for TLS and the other for the API Gateway + s.grpcSrvs = append(s.grpcSrvs, grpc.NewServer(append(opts, grpc.Creds(credentials.NewTLS(s.conf.ServerTLS())))...)) + } + s.grpcSrvs = append(s.grpcSrvs, grpc.NewServer(opts...)) // Registers a new gubernator instance with the GRPC server s.V1Server, err = NewV1Instance(Config{ + PeerTLS: s.conf.ClientTLS(), DataCenter: s.conf.DataCenter, LocalPicker: s.conf.Picker, - GRPCServer: s.grpcSrv, + GRPCServers: s.grpcSrvs, Logger: s.log, Cache: cache, }) @@ -142,19 +112,45 @@ func (s *Daemon) Start(ctx context.Context) error { // V1Server instance also implements prometheus.Collector interface s.promRegister.Register(s.V1Server) - s.GRPCListener, err = net.Listen("tcp", s.conf.GRPCListenAddress) + l, err := net.Listen("tcp", s.conf.GRPCListenAddress) if err != nil { return errors.Wrap(err, "while starting GRPC listener") } + s.GRPCListeners = append(s.GRPCListeners, l) // Start serving GRPC Requests s.wg.Go(func() { s.log.Infof("GRPC Listening on %s ...", s.conf.GRPCListenAddress) - if err := s.grpcSrv.Serve(s.GRPCListener); err != nil { + if err := s.grpcSrvs[0].Serve(l); err != nil { s.log.WithError(err).Error("while starting GRPC server") } }) + var gatewayAddr string + if s.conf.ServerTLS() != nil { + // We start a new local GRPC instance because we can't guarantee the TLS cert provided by the + // user has localhost or the local interface included in the certs valid hostnames. If they are not + // included it means the local gateway connections will not be able to connect. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return errors.Wrap(err, "while starting GRPC Gateway listener") + } + s.GRPCListeners = append(s.GRPCListeners, l) + + s.wg.Go(func() { + s.log.Infof("GRPC Gateway Listening on %s ...", l.Addr()) + if err := s.grpcSrvs[1].Serve(l); err != nil { + s.log.WithError(err).Error("while starting GRPC Gateway server") + } + }) + gatewayAddr = l.Addr().String() + } else { + gatewayAddr, err = ResolveHostIP(s.conf.GRPCListenAddress) + if err != nil { + return errors.Wrap(err, "while resolving GRPC gateway client address") + } + } + switch s.conf.PeerDiscoveryType { case "k8s": // Source our list of peers from kubernetes endpoint API @@ -190,8 +186,7 @@ func (s *Daemon) Start(ctx context.Context) error { gateway := runtime.NewServeMux() var gwCtx context.Context gwCtx, s.gwCancel = context.WithCancel(context.Background()) - err = RegisterV1HandlerFromEndpoint(gwCtx, gateway, - s.conf.GRPCListenAddress, []grpc.DialOption{grpc.WithInsecure()}) + err = RegisterV1HandlerFromEndpoint(gwCtx, gateway, gatewayAddr, []grpc.DialOption{grpc.WithInsecure()}) if err != nil { return errors.Wrap(err, "while registering GRPC gateway handler") } @@ -203,24 +198,43 @@ func (s *Daemon) Start(ctx context.Context) error { s.promRegister, promhttp.HandlerFor(s.promRegister, promhttp.HandlerOpts{}), )) mux.Handle("/", gateway) - s.httpSrv = &http.Server{Addr: s.conf.HTTPListenAddress, Handler: mux} + log := log.New(newLogWriter(s.log), "", 0) + s.httpSrv = &http.Server{Addr: s.conf.HTTPListenAddress, Handler: mux, ErrorLog: log} s.HTTPListener, err = net.Listen("tcp", s.conf.HTTPListenAddress) if err != nil { return errors.Wrap(err, "while starting HTTP listener") } - s.wg.Go(func() { - s.log.Infof("HTTP Gateway Listening on %s ...", s.conf.HTTPListenAddress) - if err := s.httpSrv.Serve(s.HTTPListener); err != nil { - if err != http.ErrServerClosed { - s.log.WithError(err).Error("while starting HTTP server") + if s.conf.ServerTLS() != nil { + // This is to avoid any race conditions that might occur + // since the tls config is a shared pointer. + s.httpSrv.TLSConfig = s.conf.ServerTLS().Clone() + s.wg.Go(func() { + s.log.Infof("HTTPS Gateway Listening on %s ...", s.conf.HTTPListenAddress) + if err := s.httpSrv.ServeTLS(s.HTTPListener, "", ""); err != nil { + if err != http.ErrServerClosed { + s.log.WithError(err).Error("while starting TLS HTTP server") + } } - } - }) + }) + } else { + s.wg.Go(func() { + s.log.Infof("HTTP Gateway Listening on %s ...", s.conf.HTTPListenAddress) + if err := s.httpSrv.Serve(s.HTTPListener); err != nil { + if err != http.ErrServerClosed { + s.log.WithError(err).Error("while starting HTTP server") + } + } + }) + } // Validate we can reach the GRPC and HTTP endpoints before returning - if err := WaitForConnect(ctx, []string{s.conf.HTTPListenAddress, s.conf.GRPCListenAddress}); err != nil { + addrs := []string{s.conf.HTTPListenAddress} + for _, l := range s.GRPCListeners { + addrs = append(addrs, l.Addr().String()) + } + if err := WaitForConnect(ctx, addrs); err != nil { return err } @@ -239,13 +253,15 @@ func (s *Daemon) Close() { s.log.Infof("HTTP Gateway close for %s ...", s.conf.HTTPListenAddress) s.httpSrv.Shutdown(context.Background()) - s.log.Infof("GRPC close for %s ...", s.conf.GRPCListenAddress) - s.grpcSrv.GracefulStop() + for i, srv := range s.grpcSrvs { + s.log.Infof("GRPC close for %s ...", s.GRPCListeners[i].Addr()) + srv.GracefulStop() + } s.wg.Stop() s.statsHandler.Close() s.gwCancel() s.httpSrv = nil - s.grpcSrv = nil + s.grpcSrvs = nil } // SetPeers sets the peers for this daemon @@ -270,12 +286,13 @@ func (s *Daemon) Config() DaemonConfig { func (s *Daemon) Peers() []PeerInfo { var peers []PeerInfo for _, client := range s.V1Server.GetPeerList() { - peers = append(peers, client.PeerInfo()) + peers = append(peers, client.Info()) } return peers } -// WaitForConnect returns nil if the list of addresses is listening for connections; will block until context is cancelled. +// WaitForConnect returns nil if the list of addresses is listening +// for connections; will block until context is cancelled. func WaitForConnect(ctx context.Context, addresses []string) error { var d net.Dialer var errs []error @@ -286,6 +303,9 @@ func WaitForConnect(ctx context.Context, addresses []string) error { continue } + // TODO: golang 15.3 introduces tls.DialContext(). When we are ready to drop + // support for older versions we can detect tls and use the tls.DialContext to + // avoid the `http: TLS handshake error` we get when using TLS. conn, err := d.DialContext(ctx, "tcp", addr) if err != nil { errs = append(errs, err) diff --git a/docker-compose-tls.yaml b/docker-compose-tls.yaml new file mode 100644 index 00000000..3cd3df47 --- /dev/null +++ b/docker-compose-tls.yaml @@ -0,0 +1,82 @@ +version: '3' +services: + gubernator-1: + image: thrawn01/gubernator:latest + command: "/gubernator" + environment: + # Basic member-list config + - GUBER_GRPC_ADDRESS=0.0.0.0:81 + - GUBER_HTTP_ADDRESS=0.0.0.0:80 + - GUBER_ADVERTISE_ADDRESS=gubernator-1:81 + - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 + # TLS config + - GUBER_TLS_CA=/etc/tls/ca.pem + - GUBER_TLS_KEY=/etc/tls/gubernator.key + - GUBER_TLS_CERT=/etc/tls/gubernator.pem + - GUBER_TLS_CLIENT_AUTH=require-and-verify + ports: + - "9081:81" + - "9080:80" + volumes: + - ${PWD}/certs:/etc/tls + + gubernator-2: + image: thrawn01/gubernator:latest + command: "/gubernator" + environment: + # Basic member-list config + - GUBER_GRPC_ADDRESS=0.0.0.0:81 + - GUBER_HTTP_ADDRESS=0.0.0.0:80 + - GUBER_ADVERTISE_ADDRESS=gubernator-2:81 + - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 + # TLS config + - GUBER_TLS_CA=/etc/tls/ca.pem + - GUBER_TLS_KEY=/etc/tls/gubernator.key + - GUBER_TLS_CERT=/etc/tls/gubernator.pem + - GUBER_TLS_CLIENT_AUTH=require-and-verify + ports: + - "9181:81" + - "9180:80" + volumes: + - ${PWD}/certs:/etc/tls + + gubernator-3: + image: thrawn01/gubernator:latest + command: "/gubernator" + environment: + # Basic member-list config + - GUBER_GRPC_ADDRESS=0.0.0.0:81 + - GUBER_HTTP_ADDRESS=0.0.0.0:80 + - GUBER_ADVERTISE_ADDRESS=gubernator-3:81 + - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 + # TLS config + - GUBER_TLS_CA=/etc/tls/ca.pem + - GUBER_TLS_KEY=/etc/tls/gubernator.key + - GUBER_TLS_CERT=/etc/tls/gubernator.pem + - GUBER_TLS_CLIENT_AUTH=require-and-verify + ports: + - "9281:81" + - "9280:80" + volumes: + - ${PWD}/certs:/etc/tls + + gubernator-4: + image: thrawn01/gubernator:latest + command: "/gubernator" + environment: + # Basic member-list config + - GUBER_DEBUG=true + - GUBER_GRPC_ADDRESS=0.0.0.0:81 + - GUBER_HTTP_ADDRESS=0.0.0.0:80 + - GUBER_ADVERTISE_ADDRESS=gubernator-4:81 + - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 + # TLS config + - GUBER_TLS_CA=/etc/tls/ca.pem + - GUBER_TLS_KEY=/etc/tls/gubernator.key + - GUBER_TLS_CERT=/etc/tls/gubernator.pem + - GUBER_TLS_CLIENT_AUTH=require-and-verify + ports: + - "9381:81" + - "9380:80" + volumes: + - ${PWD}/certs:/etc/tls diff --git a/docker-compose.yaml b/docker-compose.yaml index cac65026..9dba583e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -10,8 +10,6 @@ services: - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - GUBER_ADVERTISE_ADDRESS=gubernator-1:81 - # Max size of the cache; The cache size will never grow beyond this size. - - GUBER_CACHE_SIZE=50000 # A comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 #- GUBER_DATA_CENTER=us-east-1 @@ -29,8 +27,6 @@ services: - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - GUBER_ADVERTISE_ADDRESS=gubernator-2:81 - # Max size of the cache; The cache size will never grow beyond this size. - - GUBER_CACHE_SIZE=50000 # A comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 #- GUBER_DATA_CENTER=us-east-1 @@ -48,8 +44,6 @@ services: - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - GUBER_ADVERTISE_ADDRESS=gubernator-3:81 - # Max size of the cache; The cache size will never grow beyond this size. - - GUBER_CACHE_SIZE=50000 # A comma separated list of known gubernator nodes - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 #- GUBER_DATA_CENTER=us-west-2 @@ -68,10 +62,8 @@ services: - GUBER_HTTP_ADDRESS=0.0.0.0:80 # The address that is advertised to other peers - GUBER_ADVERTISE_ADDRESS=gubernator-4:81 - # Max size of the cache; The cache size will never grow beyond this size. - - GUBER_CACHE_SIZE=50000 # A Comma separated list of known gubernator nodes - - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1 + - GUBER_MEMBERLIST_KNOWN_NODES=gubernator-1,gubernator-2 #- GUBER_DATA_CENTER=us-west-2 ports: - "9381:81" diff --git a/example.conf b/example.conf index b470a8d5..7b5c4292 100644 --- a/example.conf +++ b/example.conf @@ -15,16 +15,17 @@ GUBER_HTTP_ADDRESS=0.0.0.0:9980 # # If unset, will default to the hostname or if that fails will attempt # to guess at a non loopback interface -# GUBER_ADVERTISE_ADDRESS=localhost:81 +GUBER_ADVERTISE_ADDRESS=localhost:9990 # Max size of the cache; This is the cache that holds # all the rate limits. The cache size will never grow # beyond this size. -GUBER_CACHE_SIZE=50000 +# GUBER_CACHE_SIZE=50000 # The name of the datacenter this gubernator instance is in. # GUBER_DATA_CENTER=datacenter1 + ############################ # Behavior Config ############################ @@ -47,12 +48,57 @@ GUBER_CACHE_SIZE=50000 # How long a node will wait before sending a batch of GLOBAL updates to a peer #GUBER_GLOBAL_SYNC_WAIT=500ns + +############################ +# TLS Config +############################ +# Path to the CA certificate. This is primarily used by gubernator +# when connecting to other gubernator peers. +# GUBER_TLS_CA=/path/to/ca.pem + +# Path to the CA private key. See GUBER_TLS_AUTO for details +# GUBER_TLS_CA_KEY=/path/to/ca.key + +# Path to the server certificate. Certificate used by the server/clients for TLS connections. +# GUBER_TLS_CERT=/path/to/server.pem + +# Path to the server private key. This is the key for the certificate. Must be unencrypted. +# GUBER_TLS_KEY=/path/to/server.key + +# If set to `true` gubernator will generate both the CA and self-signed server certificates. +# If GUBER_TLS_CA and GUBER_TLS_CA_KEY are set but no GUBER_TLS_KEY or GUBER_TLS_CERT is set +# then gubernator will generate a self-signed key using the provided GUBER_TLS_CA and +# GUBER_TLS_CA_KEY. This avoids the need to distribute a new server cert for each gubernator +# instance at the cost of distributing the CA private key. If set but no CA or TLS certs are +# provided gubernator will generate a CA and certs needed TLS. +# GUBER_TLS_AUTO=false + +# Sets the Client Authentication type as defined in the golang standard 'crypto/tls' package. +# Valid types are ('request-cert', 'verify-cert', 'require-any-cert', 'require-and-verify'). +# Use `require-and-verify` to achieve secure client authentication which will apply to all +# client and gubernator peer to peer communication. If set then gubernator will attempt to +# load GUBER_TLS_CLIENT_AUTH_CA_CERT, GUBER_TLS_CLIENT_AUTH_KEY and +# GUBER_TLS_CLIENT_AUTH_CERT for use with client authentication. If not provided client +# auth will use GUBER_TLS_CA, GUBER_TLS_CERT and GUBER_TLS_KEY for client authentication. +# GUBER_TLS_CLIENT_AUTH=require-and-verify + +# For use with GUBER_TLS_CLIENT_AUTH +# GUBER_TLS_CLIENT_AUTH_KEY=/path/to/client.key +# GUBER_TLS_CLIENT_AUTH_CERT=/path/to/client.pem +# GUBER_TLS_CLIENT_AUTH_CA_CERT=/path/to/client-ca.pem + +# If true, TLS peer to peer clients will accept any certificate presented by the remote +# gubernator instance and any host name in that certificate. +# GUBER_TLS_INSECURE_SKIP_VERIFY=false + + ############################ # Peer Discovery Type ############################ # Which type of peer discovery gubernator will use ('member-list', 'etcd', 'k8s') # GUBER_PEER_DISCOVERY_TYPE=member-list + ############################ # Member-List Config (GUBER_PEER_DISCOVERY_TYPE=member-list) ############################ @@ -69,6 +115,7 @@ GUBER_CACHE_SIZE=50000 # GUBER_MEMBERLIST_KNOWN_NODES=peer1:7946,peer2:7946,peer3:7946 # GUBER_MEMBERLIST_KNOWN_NODES=memberlist.example.com + ############################ # Kubernetes Config (GUBER_PEER_DISCOVERY_TYPE=k8s) ############################ @@ -107,13 +154,8 @@ GUBER_CACHE_SIZE=50000 # GUBER_ETCD_DATA_CENTER=datacenter1 # Authentication -#GUBER_ETCD_USER= -#GUBER_ETCD_PASSWORD= - - -############################ -# Etcd TLS Config -############################ +#GUBER_ETCD_USER=my-user +#GUBER_ETCD_PASSWORD=my-password # Enables TLS config, with an empty config #GUBER_ETCD_TLS_EABLED=False @@ -126,6 +168,7 @@ GUBER_CACHE_SIZE=50000 # Skip CERT verification #GUBER_ETCD_TLS_SKIP_VERIFY=true + ############################ # Picker Config ############################ @@ -144,3 +187,4 @@ GUBER_CACHE_SIZE=50000 # Choose the number of replications # GUBER_REPLICATED_HASH_REPLICAS=512 + diff --git a/functional_test.go b/functional_test.go index 3e9dc940..cb2d6e21 100644 --- a/functional_test.go +++ b/functional_test.go @@ -19,16 +19,18 @@ package gubernator_test import ( "context" "fmt" + "io" "net/http" "os" + "strings" "testing" guber "github.com/mailgun/gubernator" "github.com/mailgun/gubernator/cluster" "github.com/mailgun/holster/v3/clock" "github.com/mailgun/holster/v3/testutil" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -51,7 +53,7 @@ func TestMain(m *testing.M) { } func TestOverTheLimit(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) require.Nil(t, errs) tests := []struct { @@ -79,7 +81,7 @@ func TestOverTheLimit(t *testing.T) { Name: "test_over_limit", UniqueKey: "account:1234", Algorithm: guber.Algorithm_TOKEN_BUCKET, - Duration: guber.Second, + Duration: guber.Second * 9, Limit: 2, Hits: 1, Behavior: 0, @@ -100,7 +102,7 @@ func TestOverTheLimit(t *testing.T) { func TestTokenBucket(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) require.Nil(t, errs) tests := []struct { @@ -154,7 +156,7 @@ func TestTokenBucket(t *testing.T) { func TestLeakyBucket(t *testing.T) { defer clock.Freeze(clock.Now()).Unfreeze() - client, errs := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress) + client, errs := guber.DialV1Server(cluster.PeerAt(0).GRPCAddress, nil) require.Nil(t, errs) tests := []struct { @@ -216,7 +218,7 @@ func TestLeakyBucket(t *testing.T) { } func TestMissingFields(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) require.Nil(t, errs) tests := []struct { @@ -280,7 +282,7 @@ func TestMissingFields(t *testing.T) { func TestGlobalRateLimits(t *testing.T) { peer := cluster.PeerAt(0).GRPCAddress - client, errs := guber.DialV1Server(peer) + client, errs := guber.DialV1Server(peer, nil) require.NoError(t, errs) sendHit := func(status guber.Status, remain int64, i int) string { @@ -324,28 +326,32 @@ func TestGlobalRateLimits(t *testing.T) { testutil.UntilPass(t, 20, clock.Millisecond*200, func(t testutil.TestingT) { // Inspect our metrics, ensure they collected the counts we expected during this test d := cluster.DaemonAt(0) - metricCh := make(chan prometheus.Metric, 5) - d.V1Server.Collect(metricCh) + config := d.Config() + resp, err := http.Get(fmt.Sprintf("http://%s/metrics", config.HTTPListenAddress)) + if !assert.NoError(t, err) { + return + } + defer resp.Body.Close() - buf := dto.Metric{} - m := <-metricCh // Async metric - assert.Nil(t, m.Write(&buf)) - assert.Equal(t, uint64(2), *buf.Histogram.SampleCount) + m := getMetric(t, resp.Body, "gubernator_async_durations_count") + assert.NotEqual(t, 0, int(m.Value)) // V1Instance 2 should be the owner of our global rate limit d = cluster.DaemonAt(2) - metricCh = make(chan prometheus.Metric, 5) - d.V1Server.Collect(metricCh) + config = d.Config() + resp, err = http.Get(fmt.Sprintf("http://%s/metrics", config.HTTPListenAddress)) + if !assert.NoError(t, err) { + return + } + defer resp.Body.Close() - m = <-metricCh // Async metric - m = <-metricCh // Broadcast metric - assert.Nil(t, m.Write(&buf)) - assert.Equal(t, uint64(2), *buf.Histogram.SampleCount) + m = getMetric(t, resp.Body, "gubernator_broadcast_durations_count") + assert.NotEqual(t, 0, int(m.Value)) }) } func TestChangeLimit(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) require.Nil(t, errs) tests := []struct { @@ -421,7 +427,7 @@ func TestChangeLimit(t *testing.T) { Name: "test_change_limit", UniqueKey: "account:1234", Algorithm: tt.Algorithm, - Duration: guber.Millisecond * 100, + Duration: guber.Millisecond * 9000, Limit: tt.Limit, Hits: 1, }, @@ -440,7 +446,7 @@ func TestChangeLimit(t *testing.T) { } func TestResetRemaining(t *testing.T) { - client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + client, errs := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) require.Nil(t, errs) tests := []struct { @@ -493,7 +499,7 @@ func TestResetRemaining(t *testing.T) { Name: "test_reset_remaining", UniqueKey: "account:1234", Algorithm: tt.Algorithm, - Duration: guber.Millisecond * 100, + Duration: guber.Millisecond * 9000, Behavior: tt.Behavior, Limit: tt.Limit, Hits: 1, @@ -512,7 +518,7 @@ func TestResetRemaining(t *testing.T) { } func TestHealthCheck(t *testing.T) { - client, err := guber.DialV1Server(cluster.DaemonAt(0).GRPCListener.Addr().String()) + client, err := guber.DialV1Server(cluster.DaemonAt(0).GRPCListeners[0].Addr().String(), nil) require.NoError(t, err) // Check that the cluster is healthy to start with @@ -578,11 +584,12 @@ func TestHealthCheck(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), clock.Second*15) defer cancel() cluster.Restart(ctx) - } func TestLeakyBucketDivBug(t *testing.T) { - client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress) + defer clock.Freeze(clock.Now()).Unfreeze() + + client, err := guber.DialV1Server(cluster.GetRandomPeer().GRPCAddress, nil) require.NoError(t, err) resp, err := client.GetRateLimits(context.Background(), &guber.GetRateLimitsReq{ @@ -617,7 +624,7 @@ func TestLeakyBucketDivBug(t *testing.T) { }, }) require.NoError(t, err) - assert.Equal(t, int64(1900), resp.Responses[0].Remaining) + assert.Equal(t, int64(1899), resp.Responses[0].Remaining) assert.Equal(t, int64(2000), resp.Responses[0].Limit) } @@ -628,3 +635,30 @@ func TestGRPCGateway(t *testing.T) { } // TODO: Add a test for sending no rate limits RateLimitReqList.RateLimits = nil + +func getMetric(t testutil.TestingT, in io.Reader, name string) *model.Sample { + dec := expfmt.SampleDecoder{ + Dec: expfmt.NewDecoder(in, expfmt.FmtText), + Opts: &expfmt.DecodeOptions{ + Timestamp: model.Now(), + }, + } + + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + assert.NoError(t, err) + all = append(all, smpls...) + } + + for _, s := range all { + if strings.Contains(s.Metric.String(), name) { + return s + } + } + return nil +} diff --git a/global.go b/global.go index ae2887ce..0247dbea 100644 --- a/global.go +++ b/global.go @@ -18,6 +18,7 @@ package gubernator import ( "context" + "time" "github.com/mailgun/holster/v3/clock" "github.com/mailgun/holster/v3/syncutil" @@ -35,20 +36,22 @@ type globalManager struct { log logrus.FieldLogger instance *V1Instance - asyncMetrics prometheus.Histogram - broadcastMetrics prometheus.Histogram + asyncMetrics prometheus.Summary + broadcastMetrics prometheus.Summary } func newGlobalManager(conf BehaviorConfig, instance *V1Instance) *globalManager { gm := globalManager{ log: instance.log, - asyncMetrics: prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "async_durations", - Help: "The duration of GLOBAL async sends in seconds.", + asyncMetrics: prometheus.NewSummary(prometheus.SummaryOpts{ + Help: "The duration of GLOBAL async sends in seconds.", + Name: "gubernator_async_durations", + Objectives: map[float64]float64{0.5: 0.05, 0.99: 0.001}, }), - broadcastMetrics: prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "broadcast_durations", - Help: "The duration of GLOBAL broadcasts to peers in seconds.", + broadcastMetrics: prometheus.NewSummary(prometheus.SummaryOpts{ + Help: "The duration of GLOBAL broadcasts to peers in seconds.", + Name: "gubernator_broadcast_durations", + Objectives: map[float64]float64{0.5: 0.05, 0.99: 0.001}, }), asyncQueue: make(chan *RateLimitReq, 0), broadcastQueue: make(chan *RateLimitReq, 0), @@ -129,11 +132,11 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { continue } - p, ok := peerRequests[peer.info.GRPCAddress] + p, ok := peerRequests[peer.Info().GRPCAddress] if ok { p.req.Requests = append(p.req.Requests, r) } else { - peerRequests[peer.info.GRPCAddress] = &pair{ + peerRequests[peer.Info().GRPCAddress] = &pair{ client: peer, req: GetPeerRateLimitsReq{Requests: []*RateLimitReq{r}}, } @@ -148,11 +151,11 @@ func (gm *globalManager) sendHits(hits map[string]*RateLimitReq) { if err != nil { gm.log.WithError(err). - Errorf("error sending global hits to '%s'", p.client.info.GRPCAddress) + Errorf("error sending global hits to '%s'", p.client.Info().GRPCAddress) continue } } - gm.asyncMetrics.Observe(clock.Since(start).Seconds()) + gm.asyncMetrics.Observe(time.Since(start).Seconds()) } // runBroadcasts collects status changes for global rate limits and broadcasts the changes to each peer in the cluster. @@ -218,7 +221,7 @@ func (gm *globalManager) broadcastPeers(updates map[string]*RateLimitReq) { for _, peer := range gm.instance.GetPeerList() { // Exclude ourselves from the update - if peer.info.IsOwner { + if peer.Info().IsOwner { continue } @@ -229,11 +232,11 @@ func (gm *globalManager) broadcastPeers(updates map[string]*RateLimitReq) { if err != nil { // Skip peers that are not in a ready state if !IsNotReady(err) { - gm.log.WithError(err).Errorf("while broadcasting global updates to '%s'", peer.info.GRPCAddress) + gm.log.WithError(err).Errorf("while broadcasting global updates to '%s'", peer.Info().GRPCAddress) } continue } } - gm.broadcastMetrics.Observe(clock.Since(start).Seconds()) + gm.broadcastMetrics.Observe(time.Since(start).Seconds()) } diff --git a/go.mod b/go.mod index ec20134b..1411a37e 100644 --- a/go.mod +++ b/go.mod @@ -11,11 +11,11 @@ require ( github.com/mailgun/holster/v3 v3.14.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.1.0 - github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/common v0.6.0 github.com/segmentio/fasthash v1.0.2 github.com/sirupsen/logrus v1.4.2 github.com/stretchr/testify v1.4.0 + golang.org/x/net v0.0.0-20190923162816-aa69164e4478 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 google.golang.org/grpc v1.23.0 k8s.io/api v0.0.0-20190620084959-7cf5895f2711 @@ -23,4 +23,3 @@ require ( k8s.io/client-go v0.0.0-20190620085101-78d2af792bab k8s.io/klog v0.3.1 ) - diff --git a/grpc_stats.go b/grpc_stats.go index 9ce786b2..ba74038e 100644 --- a/grpc_stats.go +++ b/grpc_stats.go @@ -28,8 +28,8 @@ import ( type GRPCStats struct { Duration clock.Duration Method string - Failed int64 - Success int64 + Failed float64 + Success float64 } type contextKey struct{} @@ -43,18 +43,19 @@ type GRPCStatsHandler struct { wg syncutil.WaitGroup grpcRequestCount *prometheus.CounterVec - grpcRequestDuration *prometheus.HistogramVec + grpcRequestDuration *prometheus.SummaryVec } func NewGRPCStatsHandler() *GRPCStatsHandler { c := &GRPCStatsHandler{ grpcRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "grpc_request_counts", - Help: "GRPC requests by status."}, - []string{"status", "method"}), - grpcRequestDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "grpc_request_duration_milliseconds", - Help: "GRPC request durations in milliseconds.", + Name: "gubernator_grpc_request_counts", + Help: "GRPC requests by status.", + }, []string{"status", "method"}), + grpcRequestDuration: prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Name: "gubernator_grpc_request_duration_milliseconds", + Help: "GRPC request durations in milliseconds.", + Objectives: map[float64]float64{0.5: 0.05, 0.99: 0.001}, }, []string{"method"}), } c.run() @@ -67,29 +68,10 @@ func (c *GRPCStatsHandler) run() { c.wg.Until(func(done chan struct{}) bool { select { case stat := <-c.reqCh: - c.grpcRequestCount.With(prometheus.Labels{"status": "failed", "method": stat.Method}).Add(float64(stat.Failed)) - c.grpcRequestCount.With(prometheus.Labels{"status": "success", "method": stat.Method}).Add(float64(stat.Success)) - c.grpcRequestDuration.With(prometheus.Labels{"method": stat.Method}).Observe(stat.Duration.Seconds() * 1000) - - /*case <-tick.C: - // Emit stats about our cache - if c.cacheStats != nil { - stats := c.cacheStats.Stats(true) - c.client.Gauge("cache.size", stats.Size) - c.client.Incr("cache.hit", stats.Hit) - c.client.Incr("cache.miss", stats.Miss) - } - - // Emit stats about our global manager - if c.serverStats != nil { - stats := c.serverStats.Stats(true) - c.client.Gauge("global-manager.broadcast-duration", stats.BroadcastDuration) - c.client.Incr("global-manager.async-count", stats.AsyncGlobalsCount) - } - */ + c.grpcRequestCount.With(prometheus.Labels{"status": "failed", "method": stat.Method}).Add(stat.Failed) + c.grpcRequestCount.With(prometheus.Labels{"status": "success", "method": stat.Method}).Add(stat.Success) + c.grpcRequestDuration.With(prometheus.Labels{"method": stat.Method}).Observe(stat.Duration.Seconds()) case <-done: - //tick.Stop() - //c.client.Close() return false } return true diff --git a/gubernator.go b/gubernator.go index 4c2185e3..ee398cb3 100644 --- a/gubernator.go +++ b/gubernator.go @@ -50,8 +50,8 @@ type V1Instance struct { // NewV1Instance instantiate a single instance of a gubernator peer and registers this // instance with the provided GRPCServer. func NewV1Instance(conf Config) (*V1Instance, error) { - if conf.GRPCServer == nil { - return nil, errors.New("GRPCServer instance is required") + if conf.GRPCServers == nil { + return nil, errors.New("At least one GRPCServer instance is required") } if err := conf.SetDefaults(); err != nil { @@ -67,9 +67,11 @@ func NewV1Instance(conf Config) (*V1Instance, error) { s.global = newGlobalManager(conf.Behaviors, &s) s.mutliRegion = newMultiRegionManager(conf.Behaviors, &s) - // Register our server with GRPC - RegisterV1Server(conf.GRPCServer, &s) - RegisterPeersV1Server(conf.GRPCServer, &s) + // Register our instance with all GRPC servers + for _, srv := range conf.GRPCServers { + RegisterV1Server(srv, &s) + RegisterPeersV1Server(srv, &s) + } if s.conf.Loader == nil { return &s, nil @@ -167,7 +169,7 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G } // If our server instance is the owner of this rate limit - if peer.info.IsOwner { + if peer.Info().IsOwner { // Apply our rate limit algorithm to the request inOut.Out, err = s.getRateLimit(inOut.In) if err != nil { @@ -183,7 +185,7 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G } // Inform the client of the owner key of the key - inOut.Out.Metadata = map[string]string{"owner": peer.info.GRPCAddress} + inOut.Out.Metadata = map[string]string{"owner": peer.Info().GRPCAddress} out <- inOut return nil @@ -202,7 +204,7 @@ func (s *V1Instance) GetRateLimits(ctx context.Context, r *GetRateLimitsReq) (*G } // Inform the client of the owner key of the key - inOut.Out.Metadata = map[string]string{"owner": peer.info.GRPCAddress} + inOut.Out.Metadata = map[string]string{"owner": peer.Info().GRPCAddress} } out <- inOut @@ -357,7 +359,11 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { peer := s.conf.RegionPicker.GetByPeerInfo(info) // If we don't have an existing PeerClient create a new one if peer == nil { - peer = NewPeerClient(s.conf.Behaviors, info) + peer = NewPeerClient(PeerConfig{ + TLS: s.conf.PeerTLS, + Behavior: s.conf.Behaviors, + Info: info, + }) } regionPicker.Add(peer) continue @@ -365,7 +371,11 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { // If we don't have an existing PeerClient create a new one peer := s.conf.LocalPicker.GetByPeerInfo(info) if peer == nil { - peer = NewPeerClient(s.conf.Behaviors, info) + peer = NewPeerClient(PeerConfig{ + TLS: s.conf.PeerTLS, + Behavior: s.conf.Behaviors, + Info: info, + }) } localPicker.Add(peer) } @@ -386,14 +396,14 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { var shutdownPeers []*PeerClient for _, peer := range oldLocalPicker.Peers() { - if peerInfo := s.conf.LocalPicker.GetByPeerInfo(peer.info); peerInfo == nil { + if peerInfo := s.conf.LocalPicker.GetByPeerInfo(peer.Info()); peerInfo == nil { shutdownPeers = append(shutdownPeers, peer) } } for _, regionPicker := range oldRegionPicker.Pickers() { for _, peer := range regionPicker.Peers() { - if peerInfo := s.conf.RegionPicker.GetByPeerInfo(peer.info); peerInfo == nil { + if peerInfo := s.conf.RegionPicker.GetByPeerInfo(peer.Info()); peerInfo == nil { shutdownPeers = append(shutdownPeers, peer) } } @@ -415,7 +425,7 @@ func (s *V1Instance) SetPeers(peerInfo []PeerInfo) { if len(shutdownPeers) > 0 { var peers []string for _, p := range shutdownPeers { - peers = append(peers, p.info.GRPCAddress) + peers = append(peers, p.Info().GRPCAddress) } s.log.WithField("peers", peers).Debug("Peers shutdown") } diff --git a/hash.go b/hash.go index c6a6bdda..14e2f0a1 100644 --- a/hash.go +++ b/hash.go @@ -65,7 +65,7 @@ func (ch *ConsistentHash) Peers() []*PeerClient { // Adds a peer to the hash func (ch *ConsistentHash) Add(peer *PeerClient) { - hash := int(ch.hashFunc(strToBytesUnsafe(peer.info.HashKey()))) + hash := int(ch.hashFunc(strToBytesUnsafe(peer.Info().HashKey()))) ch.peerKeys = append(ch.peerKeys, hash) ch.peerMap[hash] = peer sort.Ints(ch.peerKeys) diff --git a/hash_test.go b/hash_test.go index 624f8afd..b42471d5 100644 --- a/hash_test.go +++ b/hash_test.go @@ -23,14 +23,14 @@ func TestConsistantHash(t *testing.T) { } hash := NewConsistentHash(nil) for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) } for input, addr := range cases { t.Run(input, func(t *testing.T) { peer, err := hash.Get(input) assert.Nil(t, err) - assert.Equal(t, addr, peer.info.GRPCAddress) + assert.Equal(t, addr, peer.Info().GRPCAddress) }) } @@ -40,7 +40,7 @@ func TestConsistantHash(t *testing.T) { hash := NewConsistentHash(nil) for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) } assert.Equal(t, len(hosts), hash.Size()) @@ -51,7 +51,7 @@ func TestConsistantHash(t *testing.T) { hostMap := map[string]*PeerClient{} for _, h := range hosts { - peer := &PeerClient{info: PeerInfo{GRPCAddress: h}} + peer := &PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}} hash.Add(peer) hostMap[h] = peer } @@ -85,13 +85,13 @@ func TestConsistantHash(t *testing.T) { hostMap := map[string]int{} for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) hostMap[h] = 0 } for i := range strings { peer, _ := hash.Get(strings[i]) - hostMap[peer.info.GRPCAddress]++ + hostMap[peer.Info().GRPCAddress]++ } for host, a := range hostMap { @@ -120,7 +120,7 @@ func BenchmarkConsistantHash(b *testing.B) { hash := NewConsistentHash(hashFunc) hosts := []string{"a.svc.local", "b.svc.local", "c.svc.local"} for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) } b.ResetTimer() diff --git a/k8s-deployment.yaml b/k8s-deployment.yaml index 8a45cf0b..3b661bb5 100644 --- a/k8s-deployment.yaml +++ b/k8s-deployment.yaml @@ -5,7 +5,7 @@ metadata: labels: app: gubernator spec: - replicas: 2 + replicas: 4 selector: matchLabels: app: gubernator @@ -33,6 +33,9 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP + # Use the k8s API for peer discovery + - name: GUBER_PEER_DISCOVERY_TYPE + value: "k8s" # This should match the port number GRPC is listening on # as defined by `containerPort` - name: GUBER_K8S_POD_PORT @@ -42,8 +45,8 @@ spec: - name: GUBER_K8S_ENDPOINTS_SELECTOR value: "app=gubernator" # Enable debug for diagnosing issues - #- name: GUBER_DEBUG - # value: "true" + - name: GUBER_DEBUG + value: "true" restartPolicy: Always --- apiVersion: v1 diff --git a/kubernetes.go b/kubernetes.go index aafd8206..153628c9 100644 --- a/kubernetes.go +++ b/kubernetes.go @@ -94,7 +94,7 @@ func (e *K8sPool) start() error { e.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) - e.log.Debugf("Queue (Add) '%s' - %s", key, err) + e.log.Debugf("Queue (Add) '%s' - %v", key, err) if err != nil { e.log.Errorf("while calling MetaNamespaceKeyFunc(): %s", err) return @@ -102,7 +102,7 @@ func (e *K8sPool) start() error { }, UpdateFunc: func(obj, new interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) - e.log.Debugf("Queue (Update) '%s' - %s", key, err) + e.log.Debugf("Queue (Update) '%s' - %v", key, err) if err != nil { e.log.Errorf("while calling MetaNamespaceKeyFunc(): %s", err) return @@ -111,7 +111,7 @@ func (e *K8sPool) start() error { }, DeleteFunc: func(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) - e.log.Debugf("Queue (Delete) '%s' - %s", key, err) + e.log.Debugf("Queue (Delete) '%s' - %v", key, err) if err != nil { e.log.Errorf("while calling MetaNamespaceKeyFunc(): %s", err) return diff --git a/net.go b/net.go index 1a252920..cc0f0a43 100644 --- a/net.go +++ b/net.go @@ -32,10 +32,46 @@ func ResolveHostIP(addr string) (string, error) { return addr, nil } +type netInfo struct { + IPAddresses []string + DNSNames []string +} + +// Attempts to discover all the external ips and dns names associated with the current host. +func discoverNetwork() (netInfo, error) { + var result netInfo + + var err error + result.IPAddresses, err = discoverNetworkAddresses() + if err != nil { + return result, err + } + + for _, ip := range result.IPAddresses { + records, _ := net.LookupAddr(ip) + result.DNSNames = append(result.DNSNames, records...) + } + return result, nil +} + +// Returns the first external ip address it finds func discoverIP() (string, error) { + addrs, err := discoverNetworkAddresses() + if err != nil { + return "", errors.Wrap(err, "while detecting external ip address") + } + if len(addrs) == 0 { + return "", errors.New("No external ip address found; please set `GUBER_ADVERTISE_ADDRESS`") + } + return addrs[0], err +} + +// Returns a list of net addresses by inspecting the network interfaces on the current host. +func discoverNetworkAddresses() ([]string, error) { + var results []string ifaces, err := net.Interfaces() if err != nil { - return "", err + return nil, err } for _, iface := range ifaces { if iface.Flags&net.FlagUp == 0 { @@ -46,7 +82,7 @@ func discoverIP() (string, error) { } addrs, err := iface.Addrs() if err != nil { - return "", err + return nil, err } for _, addr := range addrs { var ip net.IP @@ -63,8 +99,8 @@ func discoverIP() (string, error) { if ip == nil { continue // not an ipv4 address } - return ip.String(), nil + results = append(results, ip.String()) } } - return "", errors.New("Unable to detect external ip address; please set `GUBER_ADVERTISE_ADDRESS`?") + return results, nil } diff --git a/peer_client.go b/peer_client.go index 473e1f45..faed58e5 100644 --- a/peer_client.go +++ b/peer_client.go @@ -18,6 +18,7 @@ package gubernator import ( "context" + "crypto/tls" "fmt" "sync" @@ -25,6 +26,7 @@ import ( "github.com/mailgun/holster/v3/collections" "github.com/pkg/errors" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) type PeerPicker interface { @@ -47,9 +49,8 @@ const ( type PeerClient struct { client PeersV1Client conn *grpc.ClientConn - conf BehaviorConfig + conf PeerConfig queue chan *request - info PeerInfo lastErrs *collections.LRUCache mutex sync.RWMutex // This mutex is for verifying the closing state of the client @@ -67,12 +68,17 @@ type request struct { resp chan *response } -func NewPeerClient(conf BehaviorConfig, info PeerInfo) *PeerClient { +type PeerConfig struct { + TLS *tls.Config + Behavior BehaviorConfig + Info PeerInfo +} + +func NewPeerClient(conf PeerConfig) *PeerClient { return &PeerClient{ queue: make(chan *request, 1000), status: peerNotConnected, conf: conf, - info: info, lastErrs: collections.NewLRUCache(100), } } @@ -83,8 +89,7 @@ func (c *PeerClient) connect() error { // handle ErrClosing. Since this mutex MUST be here we take this opportunity to also see if we are connected. // Doing this here encapsulates managing the connected state to the PeerClient struct. Previously a PeerClient // was connected when `NewPeerClient()` was called however, when adding support for multi data centers having a - // PeerClient connected to every Peer in every data center continuously is not desirable, especially if nodes - // in each region are configured to all have sisters. + // PeerClient connected to every Peer in every data center continuously is not desirable. c.mutex.RLock() if c.status == peerClosing { @@ -108,10 +113,14 @@ func (c *PeerClient) connect() error { } var err error - // c.conn, err = grpc.Dial(fmt.Sprintf("%s:%s", c.info.GRPCAddress, ""), grpc.WithInsecure()) - c.conn, err = grpc.Dial(c.info.GRPCAddress, grpc.WithInsecure()) + opts := []grpc.DialOption{grpc.WithInsecure()} + if c.conf.TLS != nil { + opts = []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(c.conf.TLS))} + } + + c.conn, err = grpc.Dial(c.conf.Info.GRPCAddress, opts...) if err != nil { - return c.setLastErr(&PeerErr{err: errors.Wrapf(err, "failed to dial peer %s", c.info.GRPCAddress)}) + return c.setLastErr(&PeerErr{err: errors.Wrapf(err, "failed to dial peer %s", c.conf.Info.GRPCAddress)}) } c.client = NewPeersV1Client(c.conn) c.status = peerConnected @@ -122,9 +131,9 @@ func (c *PeerClient) connect() error { return nil } -// PeerInfo returns PeerInfo struct that describes this PeerClient -func (c *PeerClient) PeerInfo() PeerInfo { - return c.info +// Info returns PeerInfo struct that describes this PeerClient +func (c *PeerClient) Info() PeerInfo { + return c.conf.Info } // GetPeerRateLimit forwards a rate limit request to a peer. If the rate limit has `behavior == BATCHING` configured @@ -201,7 +210,7 @@ func (c *PeerClient) setLastErr(err error) error { } // Prepend client address to error - errWithHostname := errors.Wrap(err, fmt.Sprintf("from host %s", c.info.GRPCAddress)) + errWithHostname := errors.Wrap(err, fmt.Sprintf("from host %s", c.conf.Info.GRPCAddress)) key := err.Error() // Add error to the cache with a TTL of 5 minutes @@ -258,7 +267,7 @@ func (c *PeerClient) getPeerRateLimitsBatch(ctx context.Context, r *RateLimitReq // run waits for requests to be queued, when either c.batchWait time // has elapsed or the queue reaches c.batchLimit. Send what is in the queue. func (c *PeerClient) run() { - var interval = NewInterval(c.conf.BatchWait) + var interval = NewInterval(c.conf.Behavior.BatchWait) defer interval.Stop() var queue []*request @@ -277,7 +286,7 @@ func (c *PeerClient) run() { queue = append(queue, r) // Send the queue if we reached our batch limit - if len(queue) == c.conf.BatchLimit { + if len(queue) == c.conf.Behavior.BatchLimit { c.sendQueue(queue) queue = nil continue @@ -307,7 +316,7 @@ func (c *PeerClient) sendQueue(queue []*request) { req.Requests = append(req.Requests, r.request) } - ctx, cancel := context.WithTimeout(context.Background(), c.conf.BatchTimeout) + ctx, cancel := context.WithTimeout(context.Background(), c.conf.Behavior.BatchTimeout) resp, err := c.client.GetPeerRateLimits(ctx, &req) cancel() diff --git a/peer_client_test.go b/peer_client_test.go index 18d45688..b4591464 100644 --- a/peer_client_test.go +++ b/peer_client_test.go @@ -40,7 +40,10 @@ func TestPeerClientShutdown(t *testing.T) { c := cases[i] t.Run(c.Name, func(t *testing.T) { - client := gubernator.NewPeerClient(config, cluster.GetRandomPeer()) + client := gubernator.NewPeerClient(gubernator.PeerConfig{ + Info: cluster.GetRandomPeer(), + Behavior: config, + }) wg := sync.WaitGroup{} wg.Add(threads) diff --git a/proto/gubernator.proto b/proto/gubernator.proto index 22dcc721..4563b4d5 100644 --- a/proto/gubernator.proto +++ b/proto/gubernator.proto @@ -122,9 +122,9 @@ enum Behavior { // cache value. For `LEAKY_BUCKET` it sets the `Remaining` to `Limit`. RESET_REMAINING = 8; - // Enables rate limits to be pushed to other regions. Currently this is only implemented on memberlist - // pools. Also requires GUBER_DATA_CENTER to be set to different values on at least 2 instances of - // Gubernator. + // Enables rate limits to be pushed to other regions. Currently this is only implemented when using + // 'member-list' peer discovery. Also requires GUBER_DATA_CENTER to be set to different values on at + // least 2 instances of Gubernator. MULTI_REGION = 16; // TODO: Add support for LOCAL. Which would force the rate limit to be handled by the local instance diff --git a/region_picker.go b/region_picker.go index dc9f3158..f723a004 100644 --- a/region_picker.go +++ b/region_picker.go @@ -86,10 +86,10 @@ func (rp *RegionPicker) Peers() []*PeerClient { } func (rp *RegionPicker) Add(peer *PeerClient) { - picker, ok := rp.regions[peer.info.DataCenter] + picker, ok := rp.regions[peer.Info().DataCenter] if !ok { picker = rp.ReplicatedConsistentHash.New() - rp.regions[peer.info.DataCenter] = picker + rp.regions[peer.Info().DataCenter] = picker } picker.Add(peer) } diff --git a/replicated_hash.go b/replicated_hash.go index ed02042b..43c35f22 100644 --- a/replicated_hash.go +++ b/replicated_hash.go @@ -76,9 +76,9 @@ func (ch *ReplicatedConsistentHash) Peers() []*PeerClient { // Adds a peer to the hash func (ch *ReplicatedConsistentHash) Add(peer *PeerClient) { - ch.peers[peer.info.GRPCAddress] = peer + ch.peers[peer.Info().GRPCAddress] = peer - key := fmt.Sprintf("%x", md5.Sum([]byte(peer.info.GRPCAddress))) + key := fmt.Sprintf("%x", md5.Sum([]byte(peer.Info().GRPCAddress))) for i := 0; i < ch.replicas; i++ { hash := ch.hashFunc(strToBytesUnsafe(strconv.Itoa(i) + key)) ch.peerKeys = append(ch.peerKeys, peerInfo{ diff --git a/replicated_hash_test.go b/replicated_hash_test.go index d297ed29..ecf77106 100644 --- a/replicated_hash_test.go +++ b/replicated_hash_test.go @@ -18,7 +18,7 @@ func TestReplicatedConsistantHash(t *testing.T) { hash := NewReplicatedConsistentHash(nil, DefaultReplicas) for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) } assert.Equal(t, len(hosts), hash.Size()) @@ -29,7 +29,7 @@ func TestReplicatedConsistantHash(t *testing.T) { hostMap := map[string]*PeerClient{} for _, h := range hosts { - peer := &PeerClient{info: PeerInfo{GRPCAddress: h}} + peer := &PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}} hash.Add(peer) hostMap[h] = peer } @@ -62,13 +62,13 @@ func TestReplicatedConsistantHash(t *testing.T) { hostMap := map[string]int{} for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) hostMap[h] = 0 } for i := range strings { peer, _ := hash.Get(strings[i]) - hostMap[peer.info.GRPCAddress]++ + hostMap[peer.Info().GRPCAddress]++ } for host, a := range hostMap { @@ -96,7 +96,7 @@ func BenchmarkReplicatedConsistantHash(b *testing.B) { hash := NewReplicatedConsistentHash(hashFunc, DefaultReplicas) hosts := []string{"a.svc.local", "b.svc.local", "c.svc.local"} for _, h := range hosts { - hash.Add(&PeerClient{info: PeerInfo{GRPCAddress: h}}) + hash.Add(&PeerClient{conf: PeerConfig{Info: PeerInfo{GRPCAddress: h}}}) } b.ResetTimer() diff --git a/store_test.go b/store_test.go index 416074b7..d6339e6a 100644 --- a/store_test.go +++ b/store_test.go @@ -36,14 +36,14 @@ type v1Server struct { } func (s *v1Server) Close() { - s.conf.GRPCServer.GracefulStop() + s.conf.GRPCServers[0].GracefulStop() s.srv.Close() } // Start a single instance of V1Server with the provided config and listening address. func newV1Server(t *testing.T, address string, conf gubernator.Config) *v1Server { t.Helper() - conf.GRPCServer = grpc.NewServer() + conf.GRPCServers = append(conf.GRPCServers, grpc.NewServer()) srv, err := gubernator.NewV1Instance(conf) require.NoError(t, err) @@ -52,7 +52,7 @@ func newV1Server(t *testing.T, address string, conf gubernator.Config) *v1Server require.NoError(t, err) go func() { - if err := conf.GRPCServer.Serve(listener); err != nil { + if err := conf.GRPCServers[0].Serve(listener); err != nil { fmt.Printf("while serving: %s\n", err) } }() @@ -87,7 +87,7 @@ func TestLoader(t *testing.T) { assert.Equal(t, 1, loader.Called["Load()"]) assert.Equal(t, 0, loader.Called["Save()"]) - client, err := gubernator.DialV1Server(srv.listener.Addr().String()) + client, err := gubernator.DialV1Server(srv.listener.Addr().String(), nil) assert.Nil(t, err) resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ @@ -219,7 +219,7 @@ func TestStore(t *testing.T) { assert.Equal(t, 0, store.Called["OnChange()"]) assert.Equal(t, 0, store.Called["Get()"]) - client, err := gubernator.DialV1Server(srv.listener.Addr().String()) + client, err := gubernator.DialV1Server(srv.listener.Addr().String(), nil) assert.Nil(t, err) req := gubernator.RateLimitReq{ diff --git a/tls.go b/tls.go new file mode 100644 index 00000000..d149dd8b --- /dev/null +++ b/tls.go @@ -0,0 +1,416 @@ +package gubernator + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "io/ioutil" + "math/big" + "net" + "strings" + "time" + + "github.com/mailgun/holster/v3/setter" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + blockTypeEC = "EC PRIVATE KEY" + blockTypeRSA = "RSA PRIVATE KEY" + blockTypePriv = "PRIVATE KEY" + blockTypeCert = "CERTIFICATE" +) + +type TLSConfig struct { + // (Optional) The path to the Trusted Certificate Authority. + CaFile string + + // (Optional) The path to the Trusted Certificate Authority private key. + CaKeyFile string + + // (Optional) The path to the un-encrypted key for the server certificate. + KeyFile string + + // (Optional) The path to the server certificate. + CertFile string + + // (Optional) If true gubernator will generate self-signed certificates. If CaFile and CaKeyFile + // is set but no KeyFile or CertFile is set then gubernator will generate a self-signed key using + // the CaFile provided. + AutoTLS bool + + // (Optional) Sets the Client Authentication type as defined in the 'tls' package. + // Defaults to tls.NoClientCert.See the standard library tls.ClientAuthType for valid values. + // If set to anything but tls.NoClientCert then SetupTLS() attempts to load ClientAuthCaFile, + // ClientAuthKeyFile and ClientAuthCertFile and sets those certs into the ClientTLS struct. If + // none of the ClientXXXFile's are set, uses KeyFile and CertFile for client authentication. + ClientAuth tls.ClientAuthType + + // (Optional) The path to the Trusted Certificate Authority used for client auth. If ClientAuth is + // set and this field is empty, then CaFile is used to auth clients. + ClientAuthCaFile string + + // (Optional) The path to the client private key, which is used to create the ClientTLS config. If + // ClientAuth is set and this field is empty then KeyFile is used to create the ClientTLS. + ClientAuthKeyFile string + + // (Optional) The path to the client cert key, which is used to create the ClientTLS config. If + // ClientAuth is set and this field is empty then KeyFile is used to create the ClientTLS. + ClientAuthCertFile string + + // (Optional) If InsecureSkipVerify is true, TLS clients will accept any certificate + // presented by the server and any host name in that certificate. + InsecureSkipVerify bool + + // (Optional) A Logger which implements the declared logger interface (typically *logrus.Entry) + Logger logrus.FieldLogger + + // (Optional) The CA Certificate in PEM format. Used if CaFile is unset + CaPEM *bytes.Buffer + + // (Optional) The CA Private Key in PEM format. Used if CaKeyFile is unset + CaKeyPEM *bytes.Buffer + + // (Optional) The Certificate Key in PEM format. Used if KeyFile is unset. + KeyPEM *bytes.Buffer + + // (Optional) The Certificate in PEM format. Used if CertFile is unset. + CertPEM *bytes.Buffer + + // (Optional) The client auth CA Certificate in PEM format. Used if ClientAuthCaFile is unset. + ClientAuthCaPEM *bytes.Buffer + + // (Optional) The client auth private key in PEM format. Used if ClientAuthKeyFile is unset. + ClientAuthKeyPEM *bytes.Buffer + + // (Optional) The client auth Certificate in PEM format. Used if ClientAuthCertFile is unset. + ClientAuthCertPEM *bytes.Buffer + + // (Optional) The config created for use by the gubernator server. If set, all other + // fields in this struct are ignored and this config is used. If unset, gubernator.SetupTLS() + // will create a config using the above fields. + ServerTLS *tls.Config + + // (Optional) The config created for use by gubernator clients and peer communication. If set, all other + // fields in this struct are ignored and this config is used. If unset, gubernator.SetupTLS() + // will create a config using the above fields. + ClientTLS *tls.Config +} + +func fromFile(name string) (*bytes.Buffer, error) { + if name == "" { + return nil, nil + } + + b, err := ioutil.ReadFile(name) + if err != nil { + return nil, errors.Wrapf(err, "while reading file '%s'", name) + } + return bytes.NewBuffer(b), nil +} + +func SetupTLS(conf *TLSConfig) error { + var err error + + if conf == nil { + return nil + } + + // If both client and server tls configs provided, nothing to do! + if conf.ServerTLS != nil && conf.ClientTLS != nil { + return nil + } + + setter.SetDefault(&conf.Logger, logrus.WithField("category", "gubernator")) + conf.Logger.Info("Detected TLS Configuration") + + // Basic config with reasonably secure defaults + setter.SetDefault(&conf.ServerTLS, &tls.Config{ + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + }, + ClientAuth: conf.ClientAuth, + MinVersion: tls.VersionTLS10, + NextProtos: []string{ + "h2", "http/1.1", // enable HTTP/2 + }, + }) + setter.SetDefault(&conf.ClientTLS, &tls.Config{}) + + // Attempt to load any files provided + conf.CaPEM, err = fromFile(conf.CaFile) + if err != nil { + return err + } + + conf.CaKeyPEM, err = fromFile(conf.CaKeyFile) + if err != nil { + return err + } + + conf.KeyPEM, err = fromFile(conf.KeyFile) + if err != nil { + return err + } + + conf.CertPEM, err = fromFile(conf.CertFile) + if err != nil { + return err + } + + conf.ClientAuthCaPEM, err = fromFile(conf.ClientAuthCaFile) + if err != nil { + return err + } + + conf.ClientAuthKeyPEM, err = fromFile(conf.ClientAuthKeyFile) + if err != nil { + return err + } + + conf.ClientAuthCertPEM, err = fromFile(conf.ClientAuthCertFile) + if err != nil { + return err + } + + // If generated TLS certs requested + if conf.AutoTLS { + conf.Logger.Info("AutoTLS Enabled") + // Generate CA Cert and Private Key + if err := selfCA(conf); err != nil { + return errors.Wrap(err, "while generating self signed CA certs") + } + + // Generate Server Cert and Private Key + if err := selfCert(conf); err != nil { + return errors.Wrap(err, "while generating self signed server certs") + } + } + + if conf.CaPEM != nil { + rootPool, err := x509.SystemCertPool() + if err != nil { + conf.Logger.Warnf("while loading system CA Certs '%s'; using provided pool instead", err) + rootPool = x509.NewCertPool() + } + rootPool.AppendCertsFromPEM(conf.CaPEM.Bytes()) + conf.ServerTLS.RootCAs = rootPool + conf.ClientTLS.RootCAs = rootPool + } + + if conf.KeyPEM != nil && conf.CertPEM != nil { + serverCert, err := tls.X509KeyPair(conf.CertPEM.Bytes(), conf.KeyPEM.Bytes()) + if err != nil { + return errors.Wrap(err, "while parsing server certificate and private key") + } + conf.ServerTLS.Certificates = []tls.Certificate{serverCert} + conf.ClientTLS.Certificates = []tls.Certificate{serverCert} + } + + // If user asked for client auth + if conf.ClientAuth != tls.NoClientCert { + clientPool := x509.NewCertPool() + if conf.ClientAuthCaPEM != nil { + // If client auth CA was provided + clientPool.AppendCertsFromPEM(conf.ClientAuthCaPEM.Bytes()) + + } else if conf.CaPEM != nil { + // else use the servers CA + clientPool.AppendCertsFromPEM(conf.CaPEM.Bytes()) + } + + // error if neither was provided + if len(clientPool.Subjects()) == 0 { + return errors.New("client auth enabled, but no CA's provided") + } + + conf.ServerTLS.ClientCAs = clientPool + + // If client auth key/cert was provided + if conf.ClientAuthKeyPEM != nil && conf.ClientAuthCertPEM != nil { + clientCert, err := tls.X509KeyPair(conf.ClientAuthCertPEM.Bytes(), conf.ClientAuthKeyPEM.Bytes()) + if err != nil { + return errors.Wrap(err, "while parsing client certificate and private key") + } + conf.ClientTLS.Certificates = []tls.Certificate{clientCert} + } + } + + conf.ClientTLS.InsecureSkipVerify = conf.InsecureSkipVerify + return nil +} + +func selfCert(conf *TLSConfig) error { + if conf.CertPEM != nil && conf.KeyPEM != nil { + return nil + } + + network, err := discoverNetwork() + if err != nil { + return errors.Wrap(err, "while detecting ip and host names") + } + + cert := x509.Certificate{ + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + Subject: pkix.Name{Organization: []string{"gubernator"}}, + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + DNSNames: []string{"localhost"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + SerialNumber: big.NewInt(0xC0FFEE), + NotBefore: time.Now(), + BasicConstraintsValid: true, + } + + // Ensure all our names and ip addresses are included in the Certificate + for _, dnsNames := range network.DNSNames { + cert.DNSNames = append(cert.DNSNames, dnsNames) + } + + for _, ipStr := range network.IPAddresses { + if ip := net.ParseIP(ipStr); ip != nil { + cert.IPAddresses = append(cert.IPAddresses, ip) + } + } + + conf.Logger.Info("Generating Server Private Key and Certificate....") + conf.Logger.Infof("Cert DNS names: (%s)", strings.Join(cert.DNSNames, ",")) + conf.Logger.Infof("Cert IPs: (%s)", func() string { + var r []string + for i := range cert.IPAddresses { + r = append(r, cert.IPAddresses[i].String()) + } + return strings.Join(r, ",") + }()) + + // Generate a public / private key + privKey, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return errors.Wrap(err, "while generating pubic/private key pair") + } + + // Attempt to sign the generated certs with the provided CaFile + if conf.CaPEM == nil && conf.CaKeyPEM == nil { + return errors.New("unable to generate server certs without a signing CA") + } + + keyPair, err := tls.X509KeyPair(conf.CaPEM.Bytes(), conf.CaKeyPEM.Bytes()) + if err != nil { + return errors.Wrap(err, "while reading generated PEMs") + } + + if len(keyPair.Certificate) == 0 { + return errors.New("no certificates found in CA PEM") + } + + caCert, err := x509.ParseCertificate(keyPair.Certificate[0]) + if err != nil { + return errors.Wrap(err, "while parsing CA Cert") + } + + signedBytes, err := x509.CreateCertificate(rand.Reader, &cert, caCert, &privKey.PublicKey, keyPair.PrivateKey) + if err != nil { + return errors.Wrap(err, "while self signing server cert") + } + + conf.CertPEM = new(bytes.Buffer) + if err := pem.Encode(conf.CertPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: signedBytes, + }); err != nil { + return errors.Wrap(err, "while encoding CERTIFICATE PEM") + } + + b, err := x509.MarshalECPrivateKey(privKey) + if err != nil { + return errors.Wrap(err, "while encoding EC Marshalling") + } + + conf.KeyPEM = new(bytes.Buffer) + if err := pem.Encode(conf.KeyPEM, &pem.Block{ + Type: blockTypeEC, + Bytes: b, + }); err != nil { + return errors.Wrap(err, "while encoding EC KEY PEM") + } + return nil +} + +func selfCA(conf *TLSConfig) error { + ca := x509.Certificate{ + SerialNumber: big.NewInt(2319), + Subject: pkix.Name{Organization: []string{"gubernator"}}, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + var privKey *ecdsa.PrivateKey + var err error + var b []byte + + if conf.CaPEM != nil && conf.CaKeyPEM != nil { + return nil + } + + conf.Logger.Info("Generating CA Certificates....") + privKey, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return errors.Wrap(err, "while generating pubic/private key pair") + } + + b, err = x509.CreateCertificate(rand.Reader, &ca, &ca, &privKey.PublicKey, privKey) + if err != nil { + return errors.Wrap(err, "while self signing CA certificate") + } + + conf.CaPEM = new(bytes.Buffer) + if err := pem.Encode(conf.CaPEM, &pem.Block{ + Type: blockTypeCert, + Bytes: b, + }); err != nil { + return errors.Wrap(err, "while encoding CERTIFICATE PEM") + } + + b, err = x509.MarshalECPrivateKey(privKey) + if err != nil { + return errors.Wrap(err, "while marshalling EC private key") + } + + conf.CaKeyPEM = new(bytes.Buffer) + if err := pem.Encode(conf.CaKeyPEM, &pem.Block{ + Type: blockTypeEC, + Bytes: b, + }); err != nil { + return errors.Wrap(err, "while encoding EC private key into PEM") + } + return nil +} diff --git a/tls_test.go b/tls_test.go new file mode 100644 index 00000000..c005bb94 --- /dev/null +++ b/tls_test.go @@ -0,0 +1,290 @@ +package gubernator_test + +import ( + "context" + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "testing" + + "github.com/mailgun/gubernator" + "github.com/mailgun/holster/v3/clock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/http2" +) + +func spawnDaemon(t *testing.T, conf gubernator.DaemonConfig) *gubernator.Daemon { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), clock.Second*10) + d, err := gubernator.SpawnDaemon(ctx, conf) + cancel() + require.NoError(t, err) + d.SetPeers([]gubernator.PeerInfo{{GRPCAddress: conf.GRPCListenAddress, IsOwner: true}}) + return d +} + +func makeRequest(t *testing.T, conf gubernator.DaemonConfig) error { + t.Helper() + + client, err := gubernator.DialV1Server(conf.GRPCListenAddress, conf.TLS.ClientTLS) + require.NoError(t, err) + + resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ + Requests: []*gubernator.RateLimitReq{ + { + Name: "test_tls", + UniqueKey: "account:995", + Algorithm: gubernator.Algorithm_TOKEN_BUCKET, + Duration: gubernator.Second * 30, + Limit: 100, + Hits: 1, + }, + }, + }) + + if err != nil { + return err + } + rl := resp.Responses[0] + assert.Equal(t, "", rl.Error) + return nil +} + +func TestSetupTLS(t *testing.T) { + tests := []struct { + tls *gubernator.TLSConfig + name string + }{ + { + name: "user provided certificates", + tls: &gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CertFile: "certs/gubernator.pem", + KeyFile: "certs/gubernator.key", + }, + }, + { + name: "auto tls", + tls: &gubernator.TLSConfig{ + AutoTLS: true, + }, + }, + { + name: "generate server certs with user provided ca", + tls: &gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CaKeyFile: "certs/ca.key", + AutoTLS: true, + }, + }, + { + name: "client auth enabled", + tls: &gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CaKeyFile: "certs/ca.key", + AutoTLS: true, + ClientAuth: tls.RequireAndVerifyClientCert, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + conf := gubernator.DaemonConfig{ + GRPCListenAddress: "127.0.0.1:9695", + HTTPListenAddress: "127.0.0.1:9685", + TLS: tt.tls, + } + + d := spawnDaemon(t, conf) + + client, err := gubernator.DialV1Server(conf.GRPCListenAddress, tt.tls.ServerTLS) + require.NoError(t, err) + + resp, err := client.GetRateLimits(context.Background(), &gubernator.GetRateLimitsReq{ + Requests: []*gubernator.RateLimitReq{ + { + Name: "test_tls", + UniqueKey: "account:995", + Algorithm: gubernator.Algorithm_TOKEN_BUCKET, + Duration: gubernator.Second * 30, + Limit: 100, + Hits: 1, + }, + }, + }) + require.NoError(t, err) + + rl := resp.Responses[0] + assert.Equal(t, "", rl.Error) + assert.Equal(t, gubernator.Status_UNDER_LIMIT, rl.Status) + assert.Equal(t, int64(99), rl.Remaining) + d.Close() + }) + } +} + +func TestSetupTLSSkipVerify(t *testing.T) { + conf := gubernator.DaemonConfig{ + GRPCListenAddress: "127.0.0.1:9695", + HTTPListenAddress: "127.0.0.1:9685", + TLS: &gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CertFile: "certs/gubernator.pem", + KeyFile: "certs/gubernator.key", + }, + } + + d := spawnDaemon(t, conf) + defer d.Close() + + tls := &gubernator.TLSConfig{ + AutoTLS: true, + InsecureSkipVerify: true, + } + + err := gubernator.SetupTLS(tls) + require.NoError(t, err) + conf.TLS = tls + + err = makeRequest(t, conf) + require.NoError(t, err) +} + +func TestSetupTLSClientAuth(t *testing.T) { + serverTLS := gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CertFile: "certs/gubernator.pem", + KeyFile: "certs/gubernator.key", + ClientAuth: tls.RequireAndVerifyClientCert, + ClientAuthCaFile: "certs/client-auth-ca.pem", + } + + conf := gubernator.DaemonConfig{ + GRPCListenAddress: "127.0.0.1:9695", + HTTPListenAddress: "127.0.0.1:9685", + TLS: &serverTLS, + } + + d := spawnDaemon(t, conf) + defer d.Close() + + // Given generated client certs + tls := &gubernator.TLSConfig{ + AutoTLS: true, + InsecureSkipVerify: true, + } + + err := gubernator.SetupTLS(tls) + require.NoError(t, err) + conf.TLS = tls + + // Should not be allowed without a cert signed by the client CA + err = makeRequest(t, conf) + require.Error(t, err) + assert.Contains(t, err.Error(), "code = Unavailable desc") + + // Given the client auth certs + tls = &gubernator.TLSConfig{ + CertFile: "certs/client-auth.pem", + KeyFile: "certs/client-auth.key", + InsecureSkipVerify: true, + } + + err = gubernator.SetupTLS(tls) + require.NoError(t, err) + conf.TLS = tls + + // Should be allowed to connect and make requests + err = makeRequest(t, conf) + require.NoError(t, err) +} + +func TestTLSClusterWithClientAuthentication(t *testing.T) { + serverTLS := gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CertFile: "certs/gubernator.pem", + KeyFile: "certs/gubernator.key", + ClientAuth: tls.RequireAndVerifyClientCert, + } + + d1 := spawnDaemon(t, gubernator.DaemonConfig{ + GRPCListenAddress: "127.0.0.1:9695", + HTTPListenAddress: "127.0.0.1:9685", + TLS: &serverTLS, + }) + defer d1.Close() + + d2 := spawnDaemon(t, gubernator.DaemonConfig{ + GRPCListenAddress: "127.0.0.1:9696", + HTTPListenAddress: "127.0.0.1:9686", + TLS: &serverTLS, + }) + defer d2.Close() + + peers := []gubernator.PeerInfo{ + { + GRPCAddress: d1.GRPCListeners[0].Addr().String(), + HTTPAddress: d1.HTTPListener.Addr().String(), + }, + { + GRPCAddress: d2.GRPCListeners[0].Addr().String(), + HTTPAddress: d2.HTTPListener.Addr().String(), + }, + } + d1.SetPeers(peers) + d2.SetPeers(peers) + + // Should result in a remote call to d2 + err := makeRequest(t, d1.Config()) + require.NoError(t, err) + + config := d2.Config() + client := &http.Client{ + Transport: &http2.Transport{ + TLSClientConfig: config.ClientTLS(), + }, + } + + resp, err := client.Get(fmt.Sprintf("https://%s/metrics", config.HTTPListenAddress)) + require.NoError(t, err) + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + // Should have called GetPeerRateLimits on d2 + assert.Contains(t, string(b), `{method="/pb.gubernator.PeersV1/GetPeerRateLimits"} 1`) +} + +func TestHTTPSClientAuth(t *testing.T) { + conf := gubernator.DaemonConfig{ + GRPCListenAddress: "127.0.0.1:9695", + HTTPListenAddress: "127.0.0.1:9685", + TLS: &gubernator.TLSConfig{ + CaFile: "certs/ca.pem", + CertFile: "certs/gubernator.pem", + KeyFile: "certs/gubernator.key", + ClientAuth: tls.RequireAndVerifyClientCert, + }, + } + + d := spawnDaemon(t, conf) + defer d.Close() + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: conf.TLS.ClientTLS, + }, + } + + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("https://%s/v1/HealthCheck", conf.HTTPListenAddress), nil) + require.NoError(t, err) + resp, err := client.Do(req) + require.NoError(t, err) + b, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.Equal(t, `{"status":"healthy"}`, string(b)) +} diff --git a/version b/version index 9c218192..3c029ddf 100644 --- a/version +++ b/version @@ -1 +1 @@ -1.0.0-rc.1 +1.0.0-rc.3