Skip to content

Commit

Permalink
Config file should contain camel case field names (closes #30)
Browse files Browse the repository at this point in the history
  • Loading branch information
Ian Streeter authored and istreeter committed Sep 14, 2020
1 parent 0e6cfe4 commit da40b2f
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 24 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
Version 1.0.0 (2020-09-10)
Version 1.0.0 (2020-09-11)
--------------------------
Config file should contain camel case field names (#30)
Return good events in the canonical event format (#28)
Fix filtering on event_type and schema (#24)
Use EnrichmentManager.enrichEvent to validate events (#23)
Expand Down
42 changes: 21 additions & 21 deletions example/micro.conf
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,15 @@ collector {
}

# Configure the P3P policy header.
p-3p {
policy-ref = "/w3c/p3p.xml"
cp = "NOI DSP COR NID PSA OUR IND COM NAV STA"
p3p {
policyRef = "/w3c/p3p.xml"
CP = "NOI DSP COR NID PSA OUR IND COM NAV STA"
}

# Cross domain policy configuration.
# If "enabled" is set to "false", the collector will respond with a 404 to the /crossdomain.xml
# route.
cross-domain {
crossDomain {
enabled = false
# Domains that are granted access, *.acme.com will match http://acme.com and http://sub.acme.com
domains = [ "*" ]
Expand Down Expand Up @@ -74,21 +74,21 @@ collector {
# cookie domains configured above. (For example, if there is no Origin header.)
# fallback-domain = "{{fallbackDomain}}"
secure = false
http-only = false
httpOnly = false
# The sameSite is optional. You can choose to not specify the attribute, or you can use `Strict`,
# `Lax` or `None` to limit the cookie sent context.
# Strict: the cookie will only be sent along with "same-site" requests.
# Lax: the cookie will be sent with same-site requests, and with cross-site top-level navigation.
# None: the cookie will be sent with same-site and cross-site requests.
# same-site = "{{cookieSameSite}}"
# sameSite = "{{cookieSameSite}}"
}

# If you have a do not track cookie in place, the Scala Stream Collector can respect it by
# completely bypassing the processing of an incoming request carrying this cookie, the collector
# will simply reply by a 200 saying "do not track".
# The cookie name and value must match the configuration below, where the names of the cookies must
# match entirely and the value could be a regular expression.
do-not-track-cookie {
doNotTrackCookie {
enabled = false
name = "foo"
value = "bar"
Expand All @@ -97,38 +97,38 @@ collector {
# When enabled and the cookie specified above is missing, performs a redirect to itself to check
# if third-party cookies are blocked using the specified name. If they are indeed blocked,
# fallbackNetworkId is used instead of generating a new random one.
cookie-bounce {
cookieBounce {
enabled = false
# The name of the request parameter which will be used on redirects checking that third-party
# cookies work.
name = "n3pc"
# Network user id to fallback to when third-party cookies are blocked.
fallback-network-user-id = "00000000-0000-4000-A000-000000000000"
fallbackNetworkUserId = "00000000-0000-4000-A000-000000000000"
# Optionally, specify the name of the header containing the originating protocol for use in the
# bounce redirect location. Use this if behind a load balancer that performs SSL termination.
# The value of this header must be http or https. Example, if behind an AWS Classic ELB.
forwarded-protocol-header = "X-Forwarded-Proto"
# forwardedProtocolHeader = "X-Forwarded-Proto"
}

# When enabled, redirect prefix `r/` will be enabled and its query parameters resolved.
# Otherwise the request prefixed with `r/` will be dropped with `404 Not Found`
# Custom redirects configured in `paths` can still be used.
enable-default-redirect = true
# enableDefaultRedirect = true

# When enabled, the redirect url passed via the `u` query parameter is scanned for a placeholder
# token. All instances of that token are replaced withe the network ID. If the placeholder isn't
# specified, the default value is `${SP_NUID}`.
redirect-macro {
redirectMacro {
enabled = false
# Optional custom placeholder token (defaults to the literal `${SP_NUID}`)
placeholder = "[TOKEN]"
}

# Customize response handling for requests for the root path ("/").
# Useful if you need to redirect to web content or privacy policies regarding the use of this collector.
root-response {
rootResponse {
enabled = false
status-code = 302
statusCode = 302
# Optional, defaults to empty map
headers = {
Location = "https://127.0.0.1/",
Expand All @@ -142,11 +142,11 @@ collector {
cors {
# The Access-Control-Max-Age response header indicates how long the results of a preflight
# request can be cached. -1 seconds disables the cache. Chromium max is 10m, Firefox is 24h.
access-control-max-age = 5 seconds
accessControlMaxAge = 5 seconds
}

# Configuration of prometheus http metrics
prometheus-metrics {
prometheusMetrics {
# If metrics are enabled then all requests will be logged as prometheus metrics
# and '/metrics' endpoint will return the report about the requests
enabled = false
Expand All @@ -163,14 +163,14 @@ collector {

# Whether to use the incoming event's ip as the partition key for the good stream/topic
# Note: Nsq does not make use of partition key.
use-ip-address-as-partition-key = false
useIpAddressAsPartitionKey = false

# Enable the chosen sink by uncommenting the appropriate configuration
sink {
# Choose between kinesis, googlepubsub, kafka, nsq, or stdout.
# To use stdout, comment or remove everything in the "collector.streams.sink" section except
# "enabled" which should be set to "stdout".
type = stdout
enabled = stdout

}

Expand All @@ -181,9 +181,9 @@ collector {
# - the combined size of the stored records reaches byte-limit or
# - the time in milliseconds since the buffer was last emptied reaches time-limit
buffer {
byte-limit = 100000
record-limit = 40
time-limit = 1000
byteLimit = 100000
recordLimit = 40
timeLimit = 1000
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ package com.snowplowanalytics.snowplow.micro

import com.typesafe.config.{Config, ConfigFactory}

import pureconfig.loadConfigOrThrow
import pureconfig.{loadConfigOrThrow, ConfigFieldMapping, CamelCase}
import pureconfig.generic.{ProductHint, FieldCoproductHint}
import pureconfig.generic.auto._

import cats.Id
Expand All @@ -28,14 +29,19 @@ import scala.io.Source
import java.io.File

import com.snowplowanalytics.iglu.client.Client
import com.snowplowanalytics.snowplow.collectors.scalastream.model.CollectorConfig
import com.snowplowanalytics.snowplow.collectors.scalastream.model.{CollectorConfig, SinkConfig}

/** Contain functions to parse the command line arguments,
* to parse the configuration for the collector, Akka HTTP and Iglu
* and to instantiate Iglu client.
*/
private[micro] object ConfigHelper {

implicit def hint[T] =
ProductHint[T](ConfigFieldMapping(CamelCase, CamelCase))

implicit val sinkConfigHint = new FieldCoproductHint[SinkConfig]("enabled")

/** Parse the command line arguments and the configuration files. */
def parseConfig(args: Array[String]): (CollectorConfig, Client[Id, Json], Config) = {
case class MicroConfig(
Expand Down

0 comments on commit da40b2f

Please sign in to comment.