Skip to content
This repository has been archived by the owner on Oct 4, 2021. It is now read-only.

Update scalafmt-core to 3.0.3 #76

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .scalafmt.conf
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
version = 2.7.5
version = 3.0.3

maxColumn = 120

Expand Down
3 changes: 1 addition & 2 deletions build.sbt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import java.nio.charset.StandardCharsets

import sbt.{IntegrationTest => SbtIntegrationTest}

val finagleVersion = "21.6.0"
Expand Down Expand Up @@ -47,13 +48,11 @@ lazy val finaglePostgresql = Project(id = "finagle-postgresql", base = file("fin
libraryDependencies ++= Seq(
"com.twitter" %% "finagle-netty4" % finagleVersion,
"com.twitter" %% "util-stats" % finagleVersion,

"org.specs2" %% "specs2-core" % specs2Version % Test,
"org.specs2" %% "specs2-scalacheck" % specs2Version % Test,
"org.specs2" %% "specs2-matcher-extra" % specs2Version % Test,
"org.typelevel" %% "jawn-parser" % "1.2.0" % Test,
"org.typelevel" %% "jawn-ast" % "1.2.0" % Test,

"org.postgresql" % "postgresql" % "42.2.23" % IntegrationTest,
"com.whisk" %% "docker-testkit-core-shaded" % dockerItVersion % IntegrationTest,
"ch.qos.logback" % "logback-classic" % "1.2.3" % IntegrationTest,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ trait ResourceFileSpec { self: PgSqlSpec =>
/**
* Converts a resource file to a local temp file that an embedded pgsql instance can read.
*
* Permissions are set such that only the current user can read/write the file, this is necessary
* for server certificates for example.
* Permissions are set such that only the current user can read/write the file, this is necessary for server
* certificates for example.
*/
def toTmpFile(name: String): java.io.File =
using(getClass.getResourceAsStream(name)) { is =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,22 @@ class TlsSpec extends PgSqlIntegrationSpec with ResourceFileSpec {
/**
* Here be dragons.
*
* A Docker mount of type "bind" will have the uid:gid of the host user inside the container.
* For example, if the host user running `docker run` is `1001:116`, the mounted file in the container will be owned by `1001:116`.
* A Docker mount of type "bind" will have the uid:gid of the host user inside the container. For example, if the host
* user running `docker run` is `1001:116`, the mounted file in the container will be owned by `1001:116`.
*
* For the TLS private key file, postgres will only accept reading it if it is owned by root or the user running postgres.
* Furthermore, it will check that the permissions are not "world readable".
* For the TLS private key file, postgres will only accept reading it if it is owned by root or the user running
* postgres. Furthermore, it will check that the permissions are not "world readable".
*
* The 2 statements above makes it difficult to provide a private key to postgres: the host user does not exist in the container
* yet, it must run own the secret key AND run postgres.
* The 2 statements above makes it difficult to provide a private key to postgres: the host user does not exist in the
* container yet, it must run own the secret key AND run postgres.
*
* The solution used is to run postgres as the host user, but this requires the following workarounds:
*
* * run the container as the host's `uid:gid`
* * mount the host `/etc/passwd` as `/etc/passwd` in the container so the host user exists
* * use a subdirectory of the default `PGDATA` value so `initdb` can successfully do its thing
* * run the container as the host's `uid:gid` * mount the host `/etc/passwd` as `/etc/passwd` in the container so the
* host user exists * use a subdirectory of the default `PGDATA` value so `initdb` can successfully do its thing
*
* When necessary, the host user's `uid` and `gid` must be provided using the `CI_UID_GID` environment variable and should
* be formatted as `"uid:gid"` (without the double quotes).
* When necessary, the host user's `uid` and `gid` must be provided using the `CI_UID_GID` environment variable and
* should be formatted as `"uid:gid"` (without the double quotes).
*
* NOTE: on OSX none of this is necessary, for some reason, the mounted files are owned by root.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,31 +19,31 @@ import org.scalacheck.Gen
import org.specs2.matcher.describe.Diffable

/**
* The strategy used here is to use the Postgres' ability to produce wire bytes from a SQL statement.
* We then read those bytes and send them through ValueReads implementation to confirm that it is able
* to deserialize the values correctly, without any of the client's machinery.
* The strategy used here is to use the Postgres' ability to produce wire bytes from a SQL statement. We then read those
* bytes and send them through ValueReads implementation to confirm that it is able to deserialize the values correctly,
* without any of the client's machinery.
*
* For example, to produce the bytes for the `Int4` type:
*
* {{{
* postgres=# SELECT int4send(1234::"int4");
* int4send
* ------------
* \x000004d2
* \x000004d2
* (1 row)
* }}}
*
* The resulting value (`\x000004d2`) is a hexadecimal string representation of the bytes that will be present on the wire.
* We use jdbc to execute the statement, extract the bytes and then we send those bytes into `ValueReads`
* and confirm that we read back the original value.
* The resulting value (`\x000004d2`) is a hexadecimal string representation of the bytes that will be present on the
* wire. We use jdbc to execute the statement, extract the bytes and then we send those bytes into `ValueReads` and
* confirm that we read back the original value.
*
* NOTE: the double quotes around the type name is required due to the "char" (OID 18) type which conflicts
* with the "bpchar" type alias, i.e.: char(n). https://stackoverflow.com/a/42484838
* NOTE: the double quotes around the type name is required due to the "char" (OID 18) type which conflicts with the
* "bpchar" type alias, i.e.: char(n). https://stackoverflow.com/a/42484838
*
* NOTE: because of the type cast from string, there are a few caveats:
*
* - the string representation must escape single quotes, e.g.: "My name's Bob" -> "My name''s Bob"
* - the `ToSqlString` trait is necessary to handle types that require finer control than `.toString`
* - the string representation must escape single quotes, e.g.: "My name's Bob" -> "My name''s Bob"
* - the `ToSqlString` trait is necessary to handle types that require finer control than `.toString`
*/
class ValueReadsSpec extends PgSqlIntegrationSpec with PropertiesSpec {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,15 @@ import org.specs2.matcher.describe.Diffable
*
* And supply the value as a statement parameter and then read the value back.
*
* NOTE: the double quotes around the type name is required due to the "char" (OID 18) type which conflicts
* with the "bpchar" type alias, i.e.: char(n). https://stackoverflow.com/a/42484838
* NOTE: the double quotes around the type name is required due to the "char" (OID 18) type which conflicts with the
* "bpchar" type alias, i.e.: char(n). https://stackoverflow.com/a/42484838
*
* Unfortunately, this relies on a lot of other machinery to work correctly, namely:
*
* * rich client
* * prepared statements
* * [[ValueReads]] must also exist for the corresponding T
* * rich client * prepared statements * [[ValueReads]] must also exist for the corresponding T
*
* Another approach would be to write the value to a table and read it back using JDBC.
* But this would make it difficult to compare the read value since we'd have to go through
* Java types.
* Another approach would be to write the value to a table and read it back using JDBC. But this would make it difficult
* to compare the read value since we'd have to go through Java types.
*/
class ValueWritesSpec extends PgSqlIntegrationSpec with PropertiesSpec {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,23 +36,24 @@ import com.twitter.util.Throw
/**
* Handles transforming the Postgres protocol to an RPC style.
*
* The Postgres protocol is not of the style `request => Future[Response]`.
* Instead, it uses a stateful protocol where each connection is in a particular state and streams of requests / responses
* take place to move the connection from one state to another.
* The Postgres protocol is not of the style `request => Future[Response]`. Instead, it uses a stateful protocol where
* each connection is in a particular state and streams of requests / responses take place to move the connection from
* one state to another.
*
* The dispatcher is responsible for managing this connection state and transforming the stream of request / response to
* a single request / response style that conforms to Finagle's request / response style.
*
* The dispatcher uses state machines to handle the connection state management.
*
* When a connection is established, the [[HandshakeMachine]] is immediately executed and takes care of authentication.
* Subsequent machines to execute are based on the client's query. For example, if the client submits a [[Request.Query]],
* then the [[SimpleQueryMachine]] will be dispatched to manage the connection's state.
* Subsequent machines to execute are based on the client's query. For example, if the client submits a
* [[Request.Query]], then the [[SimpleQueryMachine]] will be dispatched to manage the connection's state.
*
* Any unexpected error from the state machine will lead to tearing down the connection to make sure we don't
* reuse a connection in an unknown / bad state.
* Any unexpected error from the state machine will lead to tearing down the connection to make sure we don't reuse a
* connection in an unknown / bad state.
*
* @see [[StateMachine]]
* @see
* [[StateMachine]]
*/
class ClientDispatcher(
transport: Transport[Packet, Packet],
Expand Down Expand Up @@ -134,10 +135,9 @@ object ClientDispatcher {
}

/**
* Caches statements that have been successfully prepared over the connection
* managed by the underlying service (a ClientDispatcher). This decreases
* the chances of leaking prepared statements and can simplify the
* implementation of prepared statements in the presence of a connection pool.
* Caches statements that have been successfully prepared over the connection managed by the underlying service (a
* ClientDispatcher). This decreases the chances of leaking prepared statements and can simplify the implementation of
* prepared statements in the presence of a connection pool.
*/
case class PrepareCache(
svc: Service[Request, Response],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,8 @@ object Params {
}

/**
* A class eligible for configuring the maximum number of prepare
* statements. After creating `num` prepare statements, we'll start purging
* old ones.
* A class eligible for configuring the maximum number of prepare statements. After creating `num` prepare statements,
* we'll start purging old ones.
*/
case class MaxConcurrentPrepareStatements(num: Int) {
assert(num > 0, s"$num must be positive")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,11 @@ object Request {
/**
* Synthetic request to extract the current connection's parameters.
*
* During connection establishment (i.e.: before any request is sent) the backend sends a set
* of parameter status values to the client. These are accumulated in the dispatcher which isn't
* accessible by the client.
* During connection establishment (i.e.: before any request is sent) the backend sends a set of parameter status
* values to the client. These are accumulated in the dispatcher which isn't accessible by the client.
*
* This request can be used to obtain those parameters.
* Note that in order for these to be meaningful for subsequent requests, those
* must be made on the same connection. For example:
* This request can be used to obtain those parameters. Note that in order for these to be meaningful for subsequent
* requests, those must be made on the same connection. For example:
*
* {{{
* val sf: ServiceFactory[Request, Response]
Expand All @@ -30,7 +28,8 @@ object Request {
* }
* }}}
*
* @see [[Response.ConnectionParameters]]
* @see
* [[Response.ConnectionParameters]]
*/
case object ConnectionParameters extends Request
case object Sync extends Request
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ import io.netty.channel.Channel
*
* https://www.postgresql.org/docs/9.3/protocol-flow.html#AEN100021
*
* The flow is that the client should request TLS using the [[FrontendMessage.SslRequest]] message.
* The backend responds with a single, unframed byte: either 'S' or 'N'.
* The flow is that the client should request TLS using the [[FrontendMessage.SslRequest]] message. The backend responds
* with a single, unframed byte: either 'S' or 'N'.
*
* * 'S' means that the backend is willing to continue with TLS negotiation
* * 'N' means that the backend doesn't support TLS
* * 'S' means that the backend is willing to continue with TLS negotiation * 'N' means that the backend doesn't support
* TLS
*
* Once TLS negotiation is successful, this transport will insert the provided [[Framer]] into the netty pipeline,
* where it would have been inserted by [[Netty4ClientChannelInitializer]].
* Once TLS negotiation is successful, this transport will insert the provided [[Framer]] into the netty pipeline, where
* it would have been inserted by [[Netty4ClientChannelInitializer]].
*
* This unfortunately requires reaching behind Finagle's abstractions a little bit.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,14 @@ object Types {
/**
* Postgres Inet type wrapper.
*
* Postgres Inet type (https://www.postgresql.org/docs/current/datatype-net-types.html#DATATYPE-INET)
* is a tuple made of an address and a subnet (or "network mask").
* Postgres Inet type (https://www.postgresql.org/docs/current/datatype-net-types.html#DATATYPE-INET) is a tuple made
* of an address and a subnet (or "network mask").
*
* @param ipAddress the IpAddress part, e.g.: 192.168.0.1
* @param netmask the netmask, or number of bits to consider in `ipAddress`.
* This is 0 to 32 for IPv4 and 0 to 128 for IPv6.
* This is an unsigned byte value, so using a `Short`.
* @param ipAddress
* the IpAddress part, e.g.: 192.168.0.1
* @param netmask
* the netmask, or number of bits to consider in `ipAddress`. This is 0 to 32 for IPv4 and 0 to 128 for IPv6. This
* is an unsigned byte value, so using a `Short`.
*/
case class Inet(ipAddress: java.net.InetAddress, netmask: Short)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,15 +38,16 @@ import com.twitter.util.Throw
import com.twitter.util.Try

/**
* Implements part of the "Extended Query" message flow described here https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY.
* Implements part of the "Extended Query" message flow described here
* https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY.
*
* This machine is used in combination with [[PrepareMachine]]. That is, before executing this machine, a prior
* execution of [[PrepareMachine]] must have taken place.
*
* NOTE: this machine is slightly different from other ones in that it will send multiple messages on start and then
* a [[Flush]]. The reason is because the message flow is different in this case and the backend does not send
* individual responses until the [[Flush]] is received. The machine expects responses to come back in order,
* but it's not entirely clear if the backend is allowed to send them in a different order.
* NOTE: this machine is slightly different from other ones in that it will send multiple messages on start and then a
* [[Flush]]. The reason is because the message flow is different in this case and the backend does not send individual
* responses until the [[Flush]] is received. The machine expects responses to come back in order, but it's not entirely
* clear if the backend is allowed to send them in a different order.
*
* Also note that this machine is used for both executing a portal as well as resuming a previously executed one.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ import com.twitter.util.Return
import com.twitter.util.Throw

/**
* Implements the "Start-up" message flow described here https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.5.7.3
* Implements the "Start-up" message flow described here
* https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.5.7.3
*
* This process involves authenticating the client and accumulating parameters about the server's configuration for this connection.
* Failure to authenticate will produce an exception.
* A successful response [[Response.ConnectionParameters]] which includes the connection's parameters
* such as character encoding and timezone.
* This process involves authenticating the client and accumulating parameters about the server's configuration for this
* connection. Failure to authenticate will produce an exception. A successful response
* [[Response.ConnectionParameters]] which includes the connection's parameters such as character encoding and timezone.
*/
case class HandshakeMachine(credentials: Params.Credentials, database: Params.Database)
extends StateMachine[Response.ConnectionParameters] {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,16 @@ import com.twitter.util.Return
import com.twitter.util.Throw

/**
* Implements part of the "Extended Query" message flow described here https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY.
* Implements part of the "Extended Query" message flow described here
* https://www.postgresql.org/docs/current/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY.
*
* This machine is used in combination with [[ExecuteMachine]]. That is, after executing this machine,
* an execution of [[ExecuteMachine]] is required to obtain the results.
* This machine is used in combination with [[ExecuteMachine]]. That is, after executing this machine, an execution of
* [[ExecuteMachine]] is required to obtain the results.
*
* @param name the portal's name to create or overwrite.
* @param statement the statement to prepare.
* @param name
* the portal's name to create or overwrite.
* @param statement
* the statement to prepare.
*/
class PrepareMachine(name: Name, statement: String) extends StateMachine[Response.ParseComplete] {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ import com.twitter.util.Throw
/**
* An abstraction of a connection to a backend implementing the Postgres protocol.
*
* Although this isn't a particular goal of this client, it allows decoupling the protocol implementation
* from finagle. It could, in theory, be used to implement the protocol on a different transport mechanism.
* Although this isn't a particular goal of this client, it allows decoupling the protocol implementation from finagle.
* It could, in theory, be used to implement the protocol on a different transport mechanism.
*/
trait Connection {

Expand All @@ -33,7 +33,8 @@ trait Connection {
/**
* The runner connects state machines to a connection and allows dispatching machines on the connection.
*
* @param connection the connection to dispatch machines onto.
* @param connection
* the connection to dispatch machines onto.
*/
class Runner(connection: Connection) {

Expand Down Expand Up @@ -81,7 +82,8 @@ class Runner(connection: Connection) {
/**
* Runs a state machine to completion and fulfills the client response.
*
* @return a `Future` which is fulfilled when the connection is available to dispatch another machine.
* @return
* a `Future` which is fulfilled when the connection is available to dispatch another machine.
*/
def dispatch[R <: Response](machine: StateMachine[R], promise: Promise[R]): Future[Unit] =
run(machine, promise)
Expand Down
Loading