From da40b2f26a540af8f4282a80579a2a1116eb6f22 Mon Sep 17 00:00:00 2001 From: Ian Streeter Date: Fri, 11 Sep 2020 13:25:50 +0100 Subject: [PATCH] Config file should contain camel case field names (closes #30) --- CHANGELOG | 3 +- example/micro.conf | 42 +++++++++---------- .../ConfigHelper.scala | 10 ++++- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index 92621b4..3b96703 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,6 @@ -Version 1.0.0 (2020-09-10) +Version 1.0.0 (2020-09-11) -------------------------- +Config file should contain camel case field names (#30) Return good events in the canonical event format (#28) Fix filtering on event_type and schema (#24) Use EnrichmentManager.enrichEvent to validate events (#23) diff --git a/example/micro.conf b/example/micro.conf index b35cd8e..51fa0a1 100644 --- a/example/micro.conf +++ b/example/micro.conf @@ -30,15 +30,15 @@ collector { } # Configure the P3P policy header. - p-3p { - policy-ref = "/w3c/p3p.xml" - cp = "NOI DSP COR NID PSA OUR IND COM NAV STA" + p3p { + policyRef = "/w3c/p3p.xml" + CP = "NOI DSP COR NID PSA OUR IND COM NAV STA" } # Cross domain policy configuration. # If "enabled" is set to "false", the collector will respond with a 404 to the /crossdomain.xml # route. - cross-domain { + crossDomain { enabled = false # Domains that are granted access, *.acme.com will match http://acme.com and http://sub.acme.com domains = [ "*" ] @@ -74,13 +74,13 @@ collector { # cookie domains configured above. (For example, if there is no Origin header.) # fallback-domain = "{{fallbackDomain}}" secure = false - http-only = false + httpOnly = false # The sameSite is optional. You can choose to not specify the attribute, or you can use `Strict`, # `Lax` or `None` to limit the cookie sent context. # Strict: the cookie will only be sent along with "same-site" requests. # Lax: the cookie will be sent with same-site requests, and with cross-site top-level navigation. # None: the cookie will be sent with same-site and cross-site requests. - # same-site = "{{cookieSameSite}}" + # sameSite = "{{cookieSameSite}}" } # If you have a do not track cookie in place, the Scala Stream Collector can respect it by @@ -88,7 +88,7 @@ collector { # will simply reply by a 200 saying "do not track". # The cookie name and value must match the configuration below, where the names of the cookies must # match entirely and the value could be a regular expression. - do-not-track-cookie { + doNotTrackCookie { enabled = false name = "foo" value = "bar" @@ -97,28 +97,28 @@ collector { # When enabled and the cookie specified above is missing, performs a redirect to itself to check # if third-party cookies are blocked using the specified name. If they are indeed blocked, # fallbackNetworkId is used instead of generating a new random one. - cookie-bounce { + cookieBounce { enabled = false # The name of the request parameter which will be used on redirects checking that third-party # cookies work. name = "n3pc" # Network user id to fallback to when third-party cookies are blocked. - fallback-network-user-id = "00000000-0000-4000-A000-000000000000" + fallbackNetworkUserId = "00000000-0000-4000-A000-000000000000" # Optionally, specify the name of the header containing the originating protocol for use in the # bounce redirect location. Use this if behind a load balancer that performs SSL termination. # The value of this header must be http or https. Example, if behind an AWS Classic ELB. - forwarded-protocol-header = "X-Forwarded-Proto" + # forwardedProtocolHeader = "X-Forwarded-Proto" } # When enabled, redirect prefix `r/` will be enabled and its query parameters resolved. # Otherwise the request prefixed with `r/` will be dropped with `404 Not Found` # Custom redirects configured in `paths` can still be used. - enable-default-redirect = true + # enableDefaultRedirect = true # When enabled, the redirect url passed via the `u` query parameter is scanned for a placeholder # token. All instances of that token are replaced withe the network ID. If the placeholder isn't # specified, the default value is `${SP_NUID}`. - redirect-macro { + redirectMacro { enabled = false # Optional custom placeholder token (defaults to the literal `${SP_NUID}`) placeholder = "[TOKEN]" @@ -126,9 +126,9 @@ collector { # Customize response handling for requests for the root path ("/"). # Useful if you need to redirect to web content or privacy policies regarding the use of this collector. - root-response { + rootResponse { enabled = false - status-code = 302 + statusCode = 302 # Optional, defaults to empty map headers = { Location = "https://127.0.0.1/", @@ -142,11 +142,11 @@ collector { cors { # The Access-Control-Max-Age response header indicates how long the results of a preflight # request can be cached. -1 seconds disables the cache. Chromium max is 10m, Firefox is 24h. - access-control-max-age = 5 seconds + accessControlMaxAge = 5 seconds } # Configuration of prometheus http metrics - prometheus-metrics { + prometheusMetrics { # If metrics are enabled then all requests will be logged as prometheus metrics # and '/metrics' endpoint will return the report about the requests enabled = false @@ -163,14 +163,14 @@ collector { # Whether to use the incoming event's ip as the partition key for the good stream/topic # Note: Nsq does not make use of partition key. - use-ip-address-as-partition-key = false + useIpAddressAsPartitionKey = false # Enable the chosen sink by uncommenting the appropriate configuration sink { # Choose between kinesis, googlepubsub, kafka, nsq, or stdout. # To use stdout, comment or remove everything in the "collector.streams.sink" section except # "enabled" which should be set to "stdout". - type = stdout + enabled = stdout } @@ -181,9 +181,9 @@ collector { # - the combined size of the stored records reaches byte-limit or # - the time in milliseconds since the buffer was last emptied reaches time-limit buffer { - byte-limit = 100000 - record-limit = 40 - time-limit = 1000 + byteLimit = 100000 + recordLimit = 40 + timeLimit = 1000 } } } diff --git a/src/main/scala/com.snowplowanalytics.snowplow.micro/ConfigHelper.scala b/src/main/scala/com.snowplowanalytics.snowplow.micro/ConfigHelper.scala index a11aa9e..08902fe 100644 --- a/src/main/scala/com.snowplowanalytics.snowplow.micro/ConfigHelper.scala +++ b/src/main/scala/com.snowplowanalytics.snowplow.micro/ConfigHelper.scala @@ -14,7 +14,8 @@ package com.snowplowanalytics.snowplow.micro import com.typesafe.config.{Config, ConfigFactory} -import pureconfig.loadConfigOrThrow +import pureconfig.{loadConfigOrThrow, ConfigFieldMapping, CamelCase} +import pureconfig.generic.{ProductHint, FieldCoproductHint} import pureconfig.generic.auto._ import cats.Id @@ -28,7 +29,7 @@ import scala.io.Source import java.io.File import com.snowplowanalytics.iglu.client.Client -import com.snowplowanalytics.snowplow.collectors.scalastream.model.CollectorConfig +import com.snowplowanalytics.snowplow.collectors.scalastream.model.{CollectorConfig, SinkConfig} /** Contain functions to parse the command line arguments, * to parse the configuration for the collector, Akka HTTP and Iglu @@ -36,6 +37,11 @@ import com.snowplowanalytics.snowplow.collectors.scalastream.model.CollectorConf */ private[micro] object ConfigHelper { + implicit def hint[T] = + ProductHint[T](ConfigFieldMapping(CamelCase, CamelCase)) + + implicit val sinkConfigHint = new FieldCoproductHint[SinkConfig]("enabled") + /** Parse the command line arguments and the configuration files. */ def parseConfig(args: Array[String]): (CollectorConfig, Client[Id, Json], Config) = { case class MicroConfig(