diff --git a/Dockerfile b/Dockerfile index 27b59336b1552b0100b0aced6a27c79248fbedeb..a4fa1142fec4c0bdcf6db4e9ff8a475784daa6be 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,8 +17,14 @@ FROM hbpmip/java-base:11.0.1-1 RUN apt-get update && apt-get install -y --no-install-recommends curl \ && rm -rf /var/lib/apt/lists/* /tmp/* -COPY docker/config/application.tmpl /config/application.tmpl -COPY docker/README.md docker/run.sh / +COPY docker/config/application.tmpl /opt/portal/config/application.tmpl +COPY docker/README.md docker/run.sh /opt/portal/ + +RUN addgroup portal \ + && adduser --system --disabled-password --uid 1000 --ingroup portal portal \ + && chmod +x /opt/portal/run.sh \ + && ln -s /opt/portal/run.sh /run.sh \ + && chown -R portal:portal /opt/portal COPY --from=java-build-env /project/target/portal-backend.jar /usr/share/jars/ @@ -26,6 +32,7 @@ ARG BUILD_DATE ARG VCS_REF ARG VERSION +USER portal ENV APP_NAME="Portal backend" \ APP_TYPE="Spring" \ VERSION=$VERSION \ @@ -33,6 +40,7 @@ ENV APP_NAME="Portal backend" \ CONTEXT_PATH="/services" \ BUGSNAG_KEY="dff301aa15eb795a6d8b22b600586f77" +WORKDIR /home/portal ENTRYPOINT ["/run.sh"] # 8080: Web service API, health checks on http://host:8080$CONTEXT_PATH/health diff --git a/docker/config/application.tmpl b/docker/config/application.tmpl index d37bc35132a6ed2d2df285fb8a39342515803072..0856eacbe4e8bbb2f9137d8df71c30c64f7bc342 100644 --- a/docker/config/application.tmpl +++ b/docker/config/application.tmpl @@ -61,7 +61,7 @@ frontend: logging: level: - root: INFO + root: {{ default .Env.LOG_LEVEL "INFO" }} org: springframework: web: {{ default .Env.LOGGING_LEVEL_WEB "WARN" }} diff --git a/docker/run.sh b/docker/run.sh index a87fa9d3ac414737d89b9761267a38407a43fdf9..496f485e6041a0931d1753d93427a3513873ea8e 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -1,6 +1,6 @@ #!/bin/sh -OPTS="-template /config/application.tmpl:/config/application.yml" +OPTS="-template /opt/portal/config/application.tmpl:/opt/portal/config/application.yml" if [ ! -z "$PORTAL_DB_SERVER" ]; then OPTS="$OPTS -wait tcp://$PORTAL_DB_SERVER -timeout 60s" fi @@ -24,4 +24,8 @@ if [ ! -z "$HTTPS_PROXY_PORT" ]; then fi JAVA_OPTIONS="$JAVA_OPTIONS -Daeron.term.buffer.length=100m" +export SPRING_CONFIG_LOCATION=file:/opt/portal/config/application.yml + +cd /opt/portal + dockerize $OPTS java ${JAVA_OPTIONS} -jar /usr/share/jars/portal-backend.jar diff --git a/pom.xml b/pom.xml index 7f2738f75eb701450e00aa4345828b7b8533355c..b595612cca0b5d6bbb4790c4c66929d4787719d2 100644 --- a/pom.xml +++ b/pom.xml @@ -47,7 +47,7 @@ <spring-data-jpa.version>1.10.11.RELEASE</spring-data-jpa.version> <spring-boot-starter-actuator.version>1.4.7.RELEASE</spring-boot-starter-actuator.version> <aspectjweaver.version>1.8.9</aspectjweaver.version> - <woken-messages.version>3.0.11</woken-messages.version> + <woken-messages.version>3.0.14</woken-messages.version> <javax-inject.version>1</javax-inject.version> <akka.version>2.5.22</akka.version> <spring-context.version>4.3.4.RELEASE</spring-context.version> @@ -91,6 +91,12 @@ <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-actuator</artifactId> <version>${spring-boot-starter-actuator.version}</version> + <exclusions> + <exclusion> + <groupId>org.springframework.boot</groupId> + <artifactId>spring-boot-starter-logging</artifactId> + </exclusion> + </exclusions> </dependency> <dependency> <groupId>org.springframework.data</groupId> @@ -100,10 +106,22 @@ <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-security</artifactId> + <exclusions> + <exclusion> + <groupId>org.springframework.boot</groupId> + <artifactId>spring-boot-starter-logging</artifactId> + </exclusion> + </exclusions> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> + <exclusions> + <exclusion> + <groupId>org.springframework.boot</groupId> + <artifactId>spring-boot-starter-logging</artifactId> + </exclusion> + </exclusions> </dependency> <dependency> <groupId>org.springframework.security.oauth</groupId> @@ -135,6 +153,12 @@ <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-test</artifactId> + <exclusions> + <exclusion> + <groupId>org.springframework.boot</groupId> + <artifactId>spring-boot-starter-logging</artifactId> + </exclusion> + </exclusions> <scope>test</scope> </dependency> <dependency> diff --git a/src/main/java/eu/hbp/mip/configuration/AkkaConfiguration.java b/src/main/java/eu/hbp/mip/configuration/AkkaConfiguration.java index 081d55b370612c0ddcf3666ccd532200685a9670..4531d4d3070a9a34eb2dbbb617e6edccf225706e 100644 --- a/src/main/java/eu/hbp/mip/configuration/AkkaConfiguration.java +++ b/src/main/java/eu/hbp/mip/configuration/AkkaConfiguration.java @@ -38,7 +38,7 @@ public class AkkaConfiguration { private final Config config; { - Config appConfig = ConfigFactory.parseResourcesAnySyntax("akka.conf") + Config appConfig = ConfigFactory.parseResourcesAnySyntax("application.conf") .withFallback(ConfigFactory.parseResourcesAnySyntax("kamon.conf")); config = ConfigurationLoader.appendClusterConfiguration(appConfig).resolve(); } diff --git a/src/main/resources/akka.conf b/src/main/resources/akka.conf deleted file mode 100644 index 908aa45009d3f0d1a81bb73440ed90d385773891..0000000000000000000000000000000000000000 --- a/src/main/resources/akka.conf +++ /dev/null @@ -1,10 +0,0 @@ -# Merged with defaults in woken-messages/reference.conf -akka { - actor { - # provider = "cluster" - } - - cluster { - roles = ["portal"] - } -} diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf index 08fc3d01d7950697001783de437b8d88cbdd068b..e2abfd5ae7f2a4e0e3ceb4abc888cdb065dbc79f 100644 --- a/src/main/resources/application.conf +++ b/src/main/resources/application.conf @@ -12,3 +12,9 @@ clustering { port = 4489 port = ${?CLUSTER_PORT} } + +akka { + cluster { + roles = ["portal"] + } +} diff --git a/src/main/resources/reference.conf b/src/main/resources/reference.conf deleted file mode 100644 index 64657b681283ae7495a774d2773ecc87f002ba16..0000000000000000000000000000000000000000 --- a/src/main/resources/reference.conf +++ /dev/null @@ -1,4862 +0,0 @@ - -# ============================================ # -# kamon-system-metrics reference configuration # -# ============================================ # - -kamon { - system-metrics { - - jvm { - # Enable/Disable collecting all jvm-level metrics. - enabled = yes - - # Frequency with which all JVM metrics will be updated. - refresh-interval = 1 second - - hiccup-monitor { - # Enable/Disable hiccup monitor. - enabled = yes - - # expected interval between samples resolution. By default is 1 millisecond. - sample-interval-resolution = 1 millisecond - } - } - - host { - # Enable/Disable collecting all host-level metrics. - enabled = yes - - # Frequency with which all Sigar-based metrics will be updated. Setting this value to less than 1 second - # might cause some Sigar metrics to behave incorrectly. - refresh-interval = 1 second - - # Sigar provisioner native library extract location. Use per-application-instance scoped location, such as program - # working directory. - sigar-native-folder = ${user.dir}"/native" - - # Frequency with which context-switches metrics will be updated. - context-switches-refresh-interval = 1 second - } - } - - util { - filters { - system-metric { - includes = ["**"] - } - } - } - - metric.instrument-factory.custom-settings { - "host.cpu" { - highest-trackable-value = 100 - } - - "host.process-cpu" { - highest-trackable-value = 100 - } - - "jvm.memory" { - highest-trackable-value = 68719476736 - } - - "host.file-system" { - highest-trackable-value = 107374182400 - } - - "host.load-average" { - highest-trackable-value = 10000 - } - - "host.memory" { - lowest-discernible-value = 1048576 - highest-trackable-value = 274877906944 - } - - "host.network.bytes" { - highest-trackable-value = 107374182400 - } - } -} -# ========================================== # -# kamon-scala-future Reference Configuration # -# ========================================== # - -kamon { - modules { - kamon-scala-future { - requires-aspectj = yes - } - } -} -kamon.executors { - - # Interval at which all registered executor metrics will be sampled. - sample-interval = 500 milliseconds -} -###################################################### -# Akka Http Cluster Management Reference Config File # -###################################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -akka.management { - http { - # The hostname where the HTTP Server for Http Cluster Management will be started. - # This defines the interface to use. - # InetAddress.getLocalHost.getHostAddress is used not overriden or empty - hostname = "<hostname>" - - # The port where the HTTP Server for Http Cluster Management will be bound. - # The value will need to be from 0 to 65535. - port = 8558 - - # Use this setting to bind a network interface to a different hostname or ip - # than the HTTP Server for Http Cluster Management. - # Use "0.0.0.0" to bind to all interfaces. - # akka.management.http.hostname if empty - bind-hostname = "" - - # Use this setting to bind a network interface to a different port - # than the HTTP Server for Http Cluster Management. This may be used - # when running akka nodes in a separated networks (under NATs or docker containers). - # Use 0 if you want a random available port. - # - # akka.management.http.port if empty - bind-port = "" - - # path prefix for all management routes, usually best to keep the default value here. If - # specified, you'll want to use the same value for all nodes that use akka management so - # that they can know which path to access each other on. - base-path = "" - - # Definition of management route providers which shall contribute routes to the management HTTP endpoint. - # Management route providers should be regular extensions that aditionally extend the - # `akka.management.scaladsl.ManagementRoutesProvider` or - # `akka.management.javadsl.ManagementRoutesProvider` interface. - # - # Libraries may register routes into the management routes by defining entries to this setting - # the library `reference.conf`: - # - # akka.management.http.routes { - # name = "FQCN" - # } - # - # Where the `name` of the entry should be unique to allow different route providers to be registered - # by different libraries and applications. - # - # The FQCN is the fully qualified class name of the `ManagementRoutesProvider`. - # - # By default the `akka.management.HealthCheckRoutes` is enabled, see `health-checks` section of how - # configure specific readiness and liveness checks. - # - # Route providers included by a library (from reference.conf) can be excluded by an application - # by using "" or null as the FQCN of the named entry, for example: - # - # akka.management.http.routes { - # health-checks = "" - # } - routes { - health-checks = "akka.management.HealthCheckRoutes" - } - - # Should Management route providers only expose read only endpoints? It is up to each route provider - # to adhere to this property - route-providers-read-only = true - } - - # Health checks for readiness and liveness - health-checks { - # When exposting health checks via Akka Management, the path to expost readiness checks on - readiness-path = "ready" - # When exposting health checks via Akka Management, the path to expost readiness checks on - liveness-path = "alive" - # All readiness checks are executed in parallel and given this long before the check is timed out - check-timeout = 1s - # Add readiness and liveness checks to the below config objects with the synax: - # - # name = "FQCN" - # - # For example: - # - # cluster-membership = "akka.management.cluster.scaladsl.ClusterMembershipCheck" - # - # Libraries and frameworks that contribute checks are expected to add their own checks to their reference.conf. - # Applications can add their own checks to application.conf. - readiness-checks { - - } - liveness-checks { - - } - } - -} -###################################### -# Akka Cluster Reference Config File # -###################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -akka { - - cluster { - # Initial contact points of the cluster. - # The nodes to join automatically at startup. - # Comma separated full URIs defined by a string on the form of - # "akka.tcp://system@hostname:port" - # Leave as empty if the node is supposed to be joined manually. - seed-nodes = [] - - # How long to wait for one of the seed nodes to reply to initial join request. - # When this is the first seed node and there is no positive reply from the other - # seed nodes within this timeout it will join itself to bootstrap the cluster. - # When this is not the first seed node the join attempts will be performed with - # this interval. - seed-node-timeout = 5s - - # If a join request fails it will be retried after this period. - # Disable join retry by specifying "off". - retry-unsuccessful-join-after = 10s - - # The joining of given seed nodes will by default be retried indefinitely until - # a successful join. That process can be aborted if unsuccessful by defining this - # timeout. When aborted it will run CoordinatedShutdown, which by default will - # terminate the ActorSystem. CoordinatedShutdown can also be configured to exit - # the JVM. It is useful to define this timeout if the seed-nodes are assembled - # dynamically and a restart with new seed-nodes should be tried after unsuccessful - # attempts. - shutdown-after-unsuccessful-join-seed-nodes = off - - # Should the 'leader' in the cluster be allowed to automatically mark - # unreachable nodes as DOWN after a configured time of unreachability? - # Using auto-down implies that two separate clusters will automatically be - # formed in case of network partition. - # - # Don't enable this in production, see 'Auto-downing (DO NOT USE)' section - # of Akka Cluster documentation. - # - # Disable with "off" or specify a duration to enable auto-down. - # If a downing-provider-class is configured this setting is ignored. - auto-down-unreachable-after = off - - # Time margin after which shards or singletons that belonged to a downed/removed - # partition are created in surviving partition. The purpose of this margin is that - # in case of a network partition the persistent actors in the non-surviving partitions - # must be stopped before corresponding persistent actors are started somewhere else. - # This is useful if you implement downing strategies that handle network partitions, - # e.g. by keeping the larger side of the partition and shutting down the smaller side. - # It will not add any extra safety for auto-down-unreachable-after, since that is not - # handling network partitions. - # Disable with "off" or specify a duration to enable. - down-removal-margin = off - - # Pluggable support for downing of nodes in the cluster. - # If this setting is left empty behavior will depend on 'auto-down-unreachable' in the following ways: - # * if it is 'off' the `NoDowning` provider is used and no automatic downing will be performed - # * if it is set to a duration the `AutoDowning` provider is with the configured downing duration - # - # If specified the value must be the fully qualified class name of a subclass of - # `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem` - downing-provider-class = "" - - # Artery only setting - # When a node has been gracefully removed, let this time pass (to allow for example - # cluster singleton handover to complete) and then quarantine the removed node. - quarantine-removed-node-after = 5s - - # If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network - # split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp' - # so they become part of the cluster even during a network split. The leader will - # move `Joining` members to 'WeaklyUp' after 3 rounds of 'leader-actions-interval' - # without convergence. - # The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached. - allow-weakly-up-members = on - - # The roles of this member. List of strings, e.g. roles = ["A", "B"]. - # The roles are part of the membership information and can be used by - # routers or other services to distribute work to certain member types, - # e.g. front-end and back-end nodes. - # Roles are not allowed to start with "dc-" as that is reserved for the - # special role assigned from the data-center a node belongs to (see the - # multi-data-center section below) - roles = [] - - # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster - # is shutdown for other reasons than when leaving, e.g. when downing. This - # will terminate the ActorSystem when the cluster extension is shutdown. - run-coordinated-shutdown-when-down = on - - role { - # Minimum required number of members of a certain role before the leader - # changes member status of 'Joining' members to 'Up'. Typically used together - # with 'Cluster.registerOnMemberUp' to defer some action, such as starting - # actors, until the cluster has reached a certain size. - # E.g. to require 2 nodes with role 'frontend' and 3 nodes with role 'backend': - # frontend.min-nr-of-members = 2 - # backend.min-nr-of-members = 3 - #<role-name>.min-nr-of-members = 1 - } - - # Minimum required number of members before the leader changes member status - # of 'Joining' members to 'Up'. Typically used together with - # 'Cluster.registerOnMemberUp' to defer some action, such as starting actors, - # until the cluster has reached a certain size. - min-nr-of-members = 1 - - # Enable/disable info level logging of cluster events - log-info = on - - # Enable/disable verbose info-level logging of cluster events - # for temporary troubleshooting. Defaults to 'off'. - log-info-verbose = off - - # Enable or disable JMX MBeans for management of the cluster - jmx.enabled = on - - # Enable or disable multiple JMX MBeans in the same JVM - # If this is disabled, the MBean Object name is "akka:type=Cluster" - # If this is enabled, them MBean Object names become "akka:type=Cluster,port=$clusterPortNumber" - jmx.multi-mbeans-in-same-jvm = off - - # how long should the node wait before starting the periodic tasks - # maintenance tasks? - periodic-tasks-initial-delay = 1s - - # how often should the node send out gossip information? - gossip-interval = 1s - - # discard incoming gossip messages if not handled within this duration - gossip-time-to-live = 2s - - # how often should the leader perform maintenance tasks? - leader-actions-interval = 1s - - # how often should the node move nodes, marked as unreachable by the failure - # detector, out of the membership ring? - unreachable-nodes-reaper-interval = 1s - - # How often the current internal stats should be published. - # A value of 0s can be used to always publish the stats, when it happens. - # Disable with "off". - publish-stats-interval = off - - # The id of the dispatcher to use for cluster actors. If not specified - # default dispatcher is used. - # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "" - - # Gossip to random node with newer or older state information, if any with - # this probability. Otherwise Gossip to any random live node. - # Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always. - gossip-different-view-probability = 0.8 - - # Reduced the above probability when the number of nodes in the cluster - # greater than this value. - reduce-gossip-different-view-probability = 400 - - # When a node is removed the removal is marked with a tombstone - # which is kept at least this long, after which it is pruned, if there is a partition - # longer than this it could lead to removed nodes being re-added to the cluster - prune-gossip-tombstones-after = 24h - - # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf - # [Hayashibara et al]) used by the cluster subsystem to detect unreachable - # members. - # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within - # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment, - # i.e. around 5.5 seconds with default settings. - failure-detector { - - # FQCN of the failure detector implementation. - # It must implement akka.remote.FailureDetector and have - # a public constructor with a com.typesafe.config.Config and - # akka.actor.EventStream parameter. - implementation-class = "akka.remote.PhiAccrualFailureDetector" - - # How often keep-alive heartbeat messages should be sent to each connection. - heartbeat-interval = 1 s - - # Defines the failure detector threshold. - # A low threshold is prone to generate many wrong suspicions but ensures - # a quick detection in the event of a real crash. Conversely, a high - # threshold generates fewer mistakes but needs more time to detect - # actual crashes. - threshold = 8.0 - - # Number of the samples of inter-heartbeat arrival times to adaptively - # calculate the failure timeout for connections. - max-sample-size = 1000 - - # Minimum standard deviation to use for the normal distribution in - # AccrualFailureDetector. Too low standard deviation might result in - # too much sensitivity for sudden, but normal, deviations in heartbeat - # inter arrival times. - min-std-deviation = 100 ms - - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # This margin is important to be able to survive sudden, occasional, - # pauses in heartbeat arrivals, due to for example garbage collect or - # network drop. - acceptable-heartbeat-pause = 3 s - - # Number of member nodes that each member will send heartbeat messages to, - # i.e. each node will be monitored by this number of other nodes. - monitored-by-nr-of-members = 5 - - # After the heartbeat request has been sent the first failure detection - # will start after this period, even though no heartbeat message has - # been received. - expected-response-after = 1 s - - } - - # Configures multi-dc specific heartbeating and other mechanisms, - # many of them have a direct counter-part in "one datacenter mode", - # in which case these settings would not be used at all - they only apply, - # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values. - multi-data-center { - - # Defines which data center this node belongs to. It is typically used to make islands of the - # cluster that are colocated. This can be used to make the cluster aware that it is running - # across multiple availability zones or regions. It can also be used for other logical - # grouping of nodes. - self-data-center = "default" - - - # Try to limit the number of connections between data centers. Used for gossip and heartbeating. - # This will not limit connections created for the messaging of the application. - # If the cluster does not span multiple data centers, this value has no effect. - cross-data-center-connections = 5 - - # The n oldest nodes in a data center will choose to gossip to another data center with - # this probability. Must be a value between 0.0 and 1.0 where 0.0 means never, 1.0 means always. - # When a data center is first started (nodes < 5) a higher probability is used so other data - # centers find out about the new nodes more quickly - cross-data-center-gossip-probability = 0.2 - - failure-detector { - # FQCN of the failure detector implementation. - # It must implement akka.remote.FailureDetector and have - # a public constructor with a com.typesafe.config.Config and - # akka.actor.EventStream parameter. - implementation-class = "akka.remote.DeadlineFailureDetector" - - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # This margin is important to be able to survive sudden, occasional, - # pauses in heartbeat arrivals, due to for example garbage collect or - # network drop. - acceptable-heartbeat-pause = 10 s - - # How often keep-alive heartbeat messages should be sent to each connection. - heartbeat-interval = 3 s - - # After the heartbeat request has been sent the first failure detection - # will start after this period, even though no heartbeat message has - # been received. - expected-response-after = 1 s - } - } - - # If the tick-duration of the default scheduler is longer than the - # tick-duration configured here a dedicated scheduler will be used for - # periodic tasks of the cluster, otherwise the default scheduler is used. - # See akka.scheduler settings for more details. - scheduler { - tick-duration = 33ms - ticks-per-wheel = 512 - } - - debug { - # log heartbeat events (very verbose, useful mostly when debugging heartbeating issues) - verbose-heartbeat-logging = off - - # log verbose details about gossip - verbose-gossip-logging = off - } - - configuration-compatibility-check { - - # Enforce configuration compatibility checks when joining a cluster. - # Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or - # when the cluster does not support this feature. Compatibility checks are always performed and warning and - # error messsages are logged. - # - # This is particularly useful for rolling updates on clusters that do not support that feature. Since the old - # cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able - # to 'know' if its allowed to join. - enforce-on-join = on - - checkers { - akka-cluster = "akka.cluster.JoinConfigCompatCheckCluster" - } - - # Some configuration properties might not be appropriate to transfer between nodes - # and such properties can be excluded from the configuration compatibility check by adding - # the paths of the properties to this list. Sensitive paths are grouped by key. Modules and third-party libraries - # can define their own set of sensitive paths without clashing with each other (as long they use unique keys). - # - # All properties starting with the paths defined here are excluded, i.e. you can add the path of a whole - # section here to skip everything inside that section. - sensitive-config-paths { - akka = [ - "user.home", "user.name", "user.dir", - "socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts", - "akka.remote.secure-cookie", - "akka.remote.netty.ssl.security", - "akka.remote.artery.ssl" - ] - } - - } - } - - actor.deployment.default.cluster { - # enable cluster aware router that deploys to nodes in the cluster - enabled = off - - # Maximum number of routees that will be deployed on each cluster - # member node. - # Note that max-total-nr-of-instances defines total number of routees, but - # number of routees per node will not be exceeded, i.e. if you - # define max-total-nr-of-instances = 50 and max-nr-of-instances-per-node = 2 - # it will deploy 2 routees per new member in the cluster, up to - # 25 members. - max-nr-of-instances-per-node = 1 - - # Maximum number of routees that will be deployed, in total - # on all nodes. See also description of max-nr-of-instances-per-node. - # For backwards compatibility reasons, nr-of-instances - # has the same purpose as max-total-nr-of-instances for cluster - # aware routers and nr-of-instances (if defined by user) takes - # precedence over max-total-nr-of-instances. - max-total-nr-of-instances = 10000 - - # Defines if routees are allowed to be located on the same node as - # the head router actor, or only on remote nodes. - # Useful for master-worker scenario where all routees are remote. - allow-local-routees = on - - # Use members with all specified roles, or all members if undefined or empty. - use-roles = [] - - # Deprecated, since Akka 2.5.4, replaced by use-roles - # Use members with specified role, or all members if undefined or empty. - use-role = "" - } - - # Protobuf serializer for cluster messages - actor { - serializers { - akka-cluster = "akka.cluster.protobuf.ClusterMessageSerializer" - } - - serialization-bindings { - "akka.cluster.ClusterMessage" = akka-cluster - "akka.cluster.routing.ClusterRouterPool" = akka-cluster - } - - serialization-identifiers { - "akka.cluster.protobuf.ClusterMessageSerializer" = 5 - } - - } - -} -kamon { - - environment { - - # Identifier for this service. - service = "kamon-application" - - # Identifier for the host where this service is running. If set to `auto` Kamon will resolve the hostname using - # the resolved name for localhost. - host = "auto" - - # Identifier for a particular instance of this service. If set to `auto` Kamon will use the pattern service@host. - instance = "auto" - - # Arbitraty key-value pairs that further identify the environment where this service instance is running. Typically - # these tags will be used by the reporting modules as additional tags for all metrics or spans. Take a look at each - # reporter module's configuration to ensure these tags are supported and included in the reported data. Example: - # - # kamon.environment.tags { - # env = "staging" - # region = "us-east-1" - # } - tags { - - } - } - - # FQCN of the reporter instances that should be loaded when calling `Kamon.reporters.loadReportersFromConfig()`. All - # reporter classes must have a default constructor. No metric filtering is applied to metric reporters started this way. - - # Example: `reporters = ["kamon.prometheus.PrometheusReporter", "kamon.zipkin.ZipkinReporter"]`. - reporters = [ ] - - # Pool size for the executor service that will run sampling on RangeSampler instruments. This scheduler is accesible - # through Kamon.scheduler() - scheduler-pool-size = 2 - - - metric { - - # Interval at which metric snapshots will be collected and sent to all metric reporters. - tick-interval = 60 seconds - - # When optimistic tick alignment is enabled the metrics ticker will try to schedule the ticks to happen as close as - # possible to round tick-interval units. E.g. if the tick-interval is set to 60 seconds then Kamon will try to - # schedule the ticks at the beginning of each minute; if the tick-interval is set to 20 seconds then Kamon will try - # to schedule the ticks at 0, 20, and 40 seconds of each minute. The alignment is not meant to be perfect, just to - # improve the ability to correlate the timestamp reported in ticks with logs. - optimistic-tick-alignment = yes - - # Thread pool size used by the metrics refresh scheduler. This pool is only used to periodically sampling - # range-sampler values. - refresh-scheduler-pool-size = 2 - - instrument-factory { - - # Default instrument settings for histograms and min max counters. The actual settings to be used when creating - # instruments is determined by merging the default settings, code settings and custom-settings using the following - # priorities (top wins): - # - # - any setting in the `custom-settings` section for the given category/instrument. - # - code settings provided when creating the instrument. - # - `default-settings` bellow. - # - default-settings { - histogram { - lowest-discernible-value = 1 - highest-trackable-value = 3600000000000 - significant-value-digits = 2 - } - - range-sampler { - lowest-discernible-value = 1 - highest-trackable-value = 3600000000000 - significant-value-digits = 2 - sample-interval = 200 ms - } - } - - # Custom settings for instruments of a given metric. The settings provided in this section override the default - # and manually provided settings when creating metrics. All settings are optional in this section and default - # values from the `kamon.metric.instrument-factory.default-settings` will be used in case of any setting being - # missing. - # - # Example: - # If you wish to change the highest trackable value setting of the `span.elapsed-time` metric, you should include - # the following configuration in your application.conf file: - # - # kamon.metric.instrument-factory.custom-settings { - # "span.elapsed-time" { - # highest-trackable-value = 5000 - # } - # } - # - custom-settings { - - } - } - } - - - trace { - - # Interval at which sampled finished spans will be flushed to SpanReporters. - tick-interval = 10 seconds - - # Size of the internal queue where sampled spans will stay until they get flushed. If the queue becomes full then - # sampled finished spans will be dropped in order to avoid consuming excessive amounts of memory. Each configured - # reporter has a separate queue. - reporter-queue-size = 4096 - - - # Decide whether a new, locally created Span should have the same Span Identifier as it's remote parent (if any) or - # get a new local identifier. Certain tracing systems use the same Span Identifier to represent both sides (client - # and server) of a RPC call, if you are reporting data to such systems then this option should be enabled. - # - # If you are using Zipkin, keep this option enabled. If you are using Jaeger, disable it. - join-remote-parents-with-same-span-id = no - - # Configures a sample that decides which traces should be reported to the trace backends. The possible values are: - # - always: report all traces. - # - never: don't report any trace. - # - random: randomly decide using the probability defined in the random-sampler.probability setting. - # - sampler = "random" - - # The random sampler uses the "chance" setting and a random number to take a decision, if the random number is - # on the upper (chance * 100) percent of the number spectrum the trace will be sampled. E.g. a chance of 0.01 will - # hint that 1% of all traces should be reported. - random-sampler { - - # Probability of a span being sampled. Must be a value between 0 and 1. - probability = 0.01 - } - - # The IdentityProvider used to generate Trace and Span Identifiers in Kamon. There are two default implementations - # that ship with Kamon: - # - kamon.trace.IdentityProvider$Default: Creates 8-byte identifiers for both Traces and Spans. - # - kamon.trace.IdentityProvider$DoubleSizeTraceID: Creates 16-byte identifiers for Traces and 8-byte identifiers - # for Spans. - # - # Any external implementation can be configured here, as long as it can be instantiated with a parameterless constructor. - identity-provider = "kamon.trace.IdentityProvider$Default" - - span-metrics { - - # When this option is enabled the metrics collected for Spans will automatically add a tag named "parentOperation" - # with the name of the operation on the parent Span, if any. - scope-spans-to-parent = yes - } - } - - - context { - - # Codecs are used to encode/decode Context keys when a Context must be propagated either through HTTP headers or - # Binary transports. Only broadcast keys configured bellow will be processed by the context Codec. The FQCN of - # the appropriate Codecs for each key must be provided, otherwise keys will be ignored. - # - codecs { - - # Size of the encoding buffer for the Binary Codec. - binary-buffer-size = 256 - - # Declarative definition of broadcast context keys with type Option[String]. The setting key represents the actual - # key name and the value is the HTTP header name to be used to encode/decode the context key. The key name will - # be used when coding for binary transport. The most common use case for string keys is effortless propagation of - # correlation keys or request related data (locale, user ID, etc). E.g. if wanting to propagate a "X-Request-ID" - # header this config should suffice: - # - # kamon.context.codecs.string-keys { - # request-id = "X-Request-ID" - # } - # - # If the application must read this context key they can define key with a matching name and read the value from - # the context: - # val requestIDKey = Key.broadcastString("request-id") // Do this only once, keep a reference. - # val requestID = Kamon.currentContext().get(requestIDKey) - # - string-keys { - - } - - # Codecs to be used when propagating a Context through a HTTP Headers transport. - http-headers-keys { - span = "kamon.trace.SpanCodec$B3" - } - - # Codecs to be used when propagating a Context through a Binary transport. - binary-keys { - span = "kamon.trace.SpanCodec$Colfer" - } - } - } - - - util { - filters { - - } - } -} -######################################## -# akka-http-core Reference Config File # -######################################## - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -# Akka HTTP version, checked against the runtime version of Akka HTTP. -# Loaded from generated conf file. -include "akka-http-version" - -akka.http { - - server { - # The default value of the `Server` header to produce if no - # explicit `Server`-header was included in a response. - # If this value is the empty string and no header was included in - # the request, no `Server` header will be rendered at all. - server-header = akka-http/${akka.http.version} - - # "PREVIEW" features that are not yet fully production ready. - # These flags can can change or be removed between patch releases. - preview { - # ONLY WORKS WITH `bindAndHandleAsync` (currently) - # - # If this setting is enabled AND the akka-http2-support is found - # on the classpath the usual Http().bind... method calls will bind - # using HTTP/2. Please note that you must configure HTTPS while doing so. - enable-http2 = off - } - - # The time after which an idle connection will be automatically closed. - # Set to `infinite` to completely disable idle connection timeouts. - idle-timeout = 60 s - - # Defines the default time period within which the application has to - # produce an HttpResponse for any given HttpRequest it received. - # The timeout begins to run when the *end* of the request has been - # received, so even potentially long uploads can have a short timeout. - # Set to `infinite` to completely disable request timeout checking. - # - # Make sure this timeout is smaller than the idle-timeout, otherwise, - # the idle-timeout will kick in first and reset the TCP connection - # without a response. - # - # If this setting is not `infinite` the HTTP server layer attaches a - # `Timeout-Access` header to the request, which enables programmatic - # customization of the timeout period and timeout response for each - # request individually. - request-timeout = 20 s - - # The time period within which the TCP binding process must be completed. - bind-timeout = 1s - - # Default port to bind HTTP server to when no port was explicitly given. - default-http-port = 80 - - # Default port to bind HTTPS server to when no port was explicitly given. - default-https-port = 443 - - # The time period the HTTP server implementation will keep a connection open after - # all data has been delivered to the network layer. This setting is similar to the SO_LINGER socket option - # but does not only include the OS-level socket but also covers the Akka IO / Akka Streams network stack. - # The setting is an extra precaution that prevents clients from keeping open a connection that is - # already considered completed from the server side. - # - # If the network level buffers (including the Akka Stream / Akka IO networking stack buffers) - # contains more data than can be transferred to the client in the given time when the server-side considers - # to be finished with this connection, the client may encounter a connection reset. - # - # Set to 'infinite' to disable automatic connection closure (which will risk to leak connections). - linger-timeout = 1 min - - # The maximum number of concurrently accepted connections when using the - # `Http().bindAndHandle` methods. - # - # This setting doesn't apply to the `Http().bind` method which will still - # deliver an unlimited backpressured stream of incoming connections. - # - # Note, that this setting limits the number of the connections on a best-effort basis. - # It does *not* strictly guarantee that the number of established TCP connections will never - # exceed the limit (but it will be approximately correct) because connection termination happens - # asynchronously. It also does *not* guarantee that the number of concurrently active handler - # flow materializations will never exceed the limit for the reason that it is impossible to reliably - # detect when a materialization has ended. - max-connections = 1024 - - # The maximum number of requests that are accepted (and dispatched to - # the application) on one single connection before the first request - # has to be completed. - # Incoming requests that would cause the pipelining limit to be exceeded - # are not read from the connections socket so as to build up "back-pressure" - # to the client via TCP flow control. - # A setting of 1 disables HTTP pipelining, since only one request per - # connection can be "open" (i.e. being processed by the application) at any - # time. Set to higher values to enable HTTP pipelining. - # This value must be > 0 and <= 1024. - pipelining-limit = 16 - - # Enables/disables the addition of a `Remote-Address` header - # holding the clients (remote) IP address. - remote-address-header = off - - # Enables/disables the addition of a `Raw-Request-URI` header holding the - # original raw request URI as the client has sent it. - raw-request-uri-header = off - - # Enables/disables automatic handling of HEAD requests. - # If this setting is enabled the server dispatches HEAD requests as GET - # requests to the application and automatically strips off all message - # bodies from outgoing responses. - # Note that, even when this setting is off the server will never send - # out message bodies on responses to HEAD requests. - transparent-head-requests = on - - # Enables/disables the returning of more detailed error messages to - # the client in the error response. - # Should be disabled for browser-facing APIs due to the risk of XSS attacks - # and (probably) enabled for internal or non-browser APIs. - # Note that akka-http will always produce log messages containing the full - # error details. - verbose-error-messages = off - - # The initial size of the buffer to render the response headers in. - # Can be used for fine-tuning response rendering performance but probably - # doesn't have to be fiddled with in most applications. - response-header-size-hint = 512 - - # The requested maximum length of the queue of incoming connections. - # If the server is busy and the backlog is full the OS will start dropping - # SYN-packets and connection attempts may fail. Note, that the backlog - # size is usually only a maximum size hint for the OS and the OS can - # restrict the number further based on global limits. - backlog = 100 - - # If this setting is empty the server only accepts requests that carry a - # non-empty `Host` header. Otherwise it responds with `400 Bad Request`. - # Set to a non-empty value to be used in lieu of a missing or empty `Host` - # header to make the server accept such requests. - # Note that the server will never accept HTTP/1.1 request without a `Host` - # header, i.e. this setting only affects HTTP/1.1 requests with an empty - # `Host` header as well as HTTP/1.0 requests. - # Examples: `www.spray.io` or `example.com:8080` - default-host-header = "" - - # Socket options to set for the listening socket. If a setting is left - # undefined, it will use whatever the default on the system is. - socket-options { - so-receive-buffer-size = undefined - so-send-buffer-size = undefined - so-reuse-address = undefined - so-traffic-class = undefined - tcp-keep-alive = undefined - tcp-oob-inline = undefined - tcp-no-delay = undefined - } - - # When graceful termination is enabled and used invoked with a deadline, - # after the deadline passes pending requests will be replied to with a "terminating" http response, - # instead of delivering those requests to the user-handler. - # This response is configurable here using configuration, or via code in case more a sophisticated (e.g. with response entity) - # response is needed. - # - termination-deadline-exceeded-response { - # Status code of the "terminating" response to be automatically sent to pending requests once the termination deadline is exceeded. - status = 503 # ServiceUnavailable - } - - # Modify to tweak parsing settings on the server-side only. - parsing { - # no overrides by default, see `akka.http.parsing` for default values - } - - # Enables/disables the logging of unencrypted HTTP traffic to and from the HTTP - # server for debugging reasons. - # - # Note: Use with care. Logging of unencrypted data traffic may expose secret data. - # - # Incoming and outgoing traffic will be logged in hexdump format. To enable logging, - # specify the number of bytes to log per chunk of data (the actual chunking depends - # on implementation details and networking conditions and should be treated as - # arbitrary). - # - # For logging on the client side, see akka.http.client.log-unencrypted-network-bytes. - # - # `off` : no log messages are produced - # Int : determines how many bytes should be logged per data chunk - log-unencrypted-network-bytes = off - - http2 { - # The maximum number of request per connection concurrently dispatched to the request handler. - max-concurrent-streams = 256 - - # The maximum number of bytes to receive from a request entity in a single chunk. - # - # The reasoning to limit that amount (instead of delivering all buffered data for a stream) is that - # the amount of data in the internal buffers will drive backpressure and flow control on the HTTP/2 level. Bigger - # chunks would mean that the user-level entity reader will have to buffer all that data if it cannot read it in one - # go. The implementation would not be able to backpressure further data in that case because it does not know about - # this user-level buffer. - request-entity-chunk-size = 65536 b - - # The number of request data bytes the HTTP/2 implementation is allowed to buffer internally per connection. Free - # space in this buffer is communicated to the peer using HTTP/2 flow-control messages to backpressure data if it - # isn't read fast enough. - # - # When there is no backpressure, this amount will limit the amount of in-flight data. It might need to be increased - # for high bandwidth-delay-product connections. - # - # There is a relation between the `incoming-connection-level-buffer-size` and the `incoming-stream-level-buffer-size`: - # If incoming-connection-level-buffer-size < incoming-stream-level-buffer-size * number_of_streams, then - # head-of-line blocking is possible between different streams on the same connection. - incoming-connection-level-buffer-size = 10 MB - - # The number of request data bytes the HTTP/2 implementation is allowed to buffer internally per stream. Free space - # in this buffer is communicated to the peer using HTTP/2 flow-control messages to backpressure data if it isn't - # read fast enough. - # - # When there is no backpressure, this amount will limit the amount of in-flight data per stream. It might need to - # be increased for high bandwidth-delay-product connections. - incoming-stream-level-buffer-size = 512kB - } - - websocket { - # periodic keep alive may be implemented using by sending Ping frames - # upon which the other side is expected to reply with a Pong frame, - # or by sending a Pong frame, which serves as unidirectional heartbeat. - # Valid values: - # ping - default, for bi-directional ping/pong keep-alive heartbeating - # pong - for uni-directional pong keep-alive heartbeating - # - # It is also possible to provide a payload for each heartbeat message, - # this setting can be configured programatically by modifying the websocket settings. - # See: https://doc.akka.io/docs/akka-http/current/server-side/websocket-support.html - periodic-keep-alive-mode = ping - - # Interval for sending periodic keep-alives - # The frame sent will be the one configured in akka.http.server.websocket.periodic-keep-alive-mode - # `infinite` by default, or a duration that is the max idle interval after which an keep-alive frame should be sent - # The value `infinite` means that *no* keep-alive heartbeat will be sent, as: "the allowed idle time is infinite" - periodic-keep-alive-max-idle = infinite - } - } - - client { - # The default value of the `User-Agent` header to produce if no - # explicit `User-Agent`-header was included in a request. - # If this value is the empty string and no header was included in - # the request, no `User-Agent` header will be rendered at all. - user-agent-header = akka-http/${akka.http.version} - - # The time period within which the TCP connecting process must be completed. - connecting-timeout = 10s - - # The time after which an idle connection will be automatically closed. - # Set to `infinite` to completely disable idle timeouts. - idle-timeout = 60 s - - # The initial size of the buffer to render the request headers in. - # Can be used for fine-tuning request rendering performance but probably - # doesn't have to be fiddled with in most applications. - request-header-size-hint = 512 - - # Socket options to set for the listening socket. If a setting is left - # undefined, it will use whatever the default on the system is. - socket-options { - so-receive-buffer-size = undefined - so-send-buffer-size = undefined - so-reuse-address = undefined - so-traffic-class = undefined - tcp-keep-alive = undefined - tcp-oob-inline = undefined - tcp-no-delay = undefined - } - - # Client https proxy options. When using ClientTransport.httpsProxy() with or without credentials, - # host/port must be either passed explicitly or set here. If a host is not set, the proxy will not be used. - proxy { - https { - host = "" - port = 443 - } - } - - # Modify to tweak parsing settings on the client-side only. - parsing { - # no overrides by default, see `akka.http.parsing` for default values - } - - # Enables/disables the logging of unencrypted HTTP traffic to and from the HTTP - # client for debugging reasons. - # - # Note: Use with care. Logging of unencrypted data traffic may expose secret data. - # - # Incoming and outgoing traffic will be logged in hexdump format. To enable logging, - # specify the number of bytes to log per chunk of data (the actual chunking depends - # on implementation details and networking conditions and should be treated as - # arbitrary). - # - # For logging on the server side, see akka.http.server.log-unencrypted-network-bytes. - # - # `off` : no log messages are produced - # Int : determines how many bytes should be logged per data chunk - log-unencrypted-network-bytes = off - - websocket { - # periodic keep alive may be implemented using by sending Ping frames - # upon which the other side is expected to reply with a Pong frame, - # or by sending a Pong frame, which serves as unidirectional heartbeat. - # Valid values: - # ping - default, for bi-directional ping/pong keep-alive heartbeating - # pong - for uni-directional pong keep-alive heartbeating - # - # See https://tools.ietf.org/html/rfc6455#section-5.5.2 - # and https://tools.ietf.org/html/rfc6455#section-5.5.3 for more information - periodic-keep-alive-mode = ping - - # Interval for sending periodic keep-alives - # The frame sent will be the onne configured in akka.http.server.websocket.periodic-keep-alive-mode - # `infinite` by default, or a duration that is the max idle interval after which an keep-alive frame should be sent - periodic-keep-alive-max-idle = infinite - } - } - - host-connection-pool { - # The maximum number of parallel connections that a connection pool to a - # single host endpoint is allowed to establish. Must be greater than zero. - max-connections = 4 - - # The minimum number of parallel connections that a pool should keep alive ("hot"). - # If the number of connections is falling below the given threshold, new ones are being spawned. - # You can use this setting to build a hot pool of "always on" connections. - # Default is 0, meaning there might be no active connection at given moment. - # Keep in mind that `min-connections` should be smaller than `max-connections` or equal - min-connections = 0 - - # The maximum number of times failed requests are attempted again, - # (if the request can be safely retried) before giving up and returning an error. - # Set to zero to completely disable request retries. - max-retries = 5 - - # The maximum number of open requests accepted into the pool across all - # materializations of any of its client flows. - # Protects against (accidentally) overloading a single pool with too many client flow materializations. - # Note that with N concurrent materializations the max number of open request in the pool - # will never exceed N * max-connections * pipelining-limit. - # Must be a power of 2 and > 0! - max-open-requests = 32 - - # The maximum duration for a connection to be kept alive - # This amount gets modified by a 10 percent fuzzyness to avoid the simultanous reconnections - # defaults to 'infinite' - # Note that this is only implemented in the new host connection pool - max-connection-lifetime = infinite - - # Client-side pipelining is not currently supported. See https://github.com/akka/akka-http/issues/32 - pipelining-limit = 1 - - # The minimum duration to backoff new connection attempts after the previous connection attempt failed. - # - # The pool uses an exponential randomized backoff scheme. After the first failure, the next attempt will only be - # tried after a random duration between the base connection backoff and twice the base connection backoff. If that - # attempt fails as well, the next attempt will be delayed by twice that amount. The total delay is capped using the - # `max-connection-backoff` setting. - # - # The backoff applies for the complete pool. I.e. after one failed connection attempt, further connection attempts - # to that host will backoff for all connections of the pool. After the service recovered, connections will come out - # of backoff one by one due to the random extra backoff time. This is to avoid overloading just recently recovered - # services with new connections ("thundering herd"). - # - # Example: base-connection-backoff = 100ms, max-connection-backoff = 10 seconds - # - After 1st failure, backoff somewhere between 100ms and 200ms - # - After 2nd, between 200ms and 400ms - # - After 3rd, between 200ms and 400ms - # - After 4th, between 400ms and 800ms - # - After 5th, between 800ms and 1600ms - # - After 6th, between 1600ms and 3200ms - # - After 7th, between 3200ms and 6400ms - # - After 8th, between 5000ms and 10 seconds (max capped by max-connection-backoff, min by half of that) - # - After 9th, etc., stays between 5000ms and 10 seconds - # - # This setting only applies to the new pool implementation and is ignored for the legacy one. - base-connection-backoff = 100ms - - # Maximum backoff duration between failed connection attempts. For more information see the above comment for the - # `base-connection-backoff` setting. - # - # This setting only applies to the new pool implementation and is ignored for the legacy one. - max-connection-backoff = 2 min - - # The time after which an idle connection pool (without pending requests) - # will automatically terminate itself. Set to `infinite` to completely disable idle timeouts. - idle-timeout = 30 s - - # The pool implementation to use. Currently supported are: - # - legacy: the original 10.0.x pool implementation - # - new: the pool implementation that became the default in 10.1.x and will receive fixes and new features - pool-implementation = new - - # The "new" pool implementation will fail a connection early and clear the slot if a response entity was not - # subscribed during the given time period after the response was dispatched. In busy systems the timeout might be - # too tight if a response is not picked up quick enough after it was dispatched by the pool. - response-entity-subscription-timeout = 1.second - - # Modify this section to tweak client settings only for host connection pools APIs like `Http().superPool` or - # `Http().singleRequest`. - client = { - # no overrides by default, see `akka.http.client` for default values - } - } - - # Modify to tweak default parsing settings. - # - # IMPORTANT: - # Please note that this sections settings can be overridden by the corresponding settings in: - # `akka.http.server.parsing`, `akka.http.client.parsing` or `akka.http.host-connection-pool.client.parsing`. - parsing { - # The limits for the various parts of the HTTP message parser. - max-uri-length = 2k - max-method-length = 16 - max-response-reason-length = 64 - max-header-name-length = 64 - max-header-value-length = 8k - max-header-count = 64 - max-chunk-ext-length = 256 - max-chunk-size = 1m - - # Default maximum content length which should not be exceeded by incoming request entities. - # Can be changed at runtime (to a higher or lower value) via the `HttpEntity::withSizeLimit` method. - # Note that it is not necessarily a problem to set this to a high value as all stream operations - # are always properly backpressured. - # Nevertheless you might want to apply some limit in order to prevent a single client from consuming - # an excessive amount of server resources. - # - # Set to `infinite` to completely disable entity length checks. (Even then you can still apply one - # programmatically via `withSizeLimit`.) - max-content-length = 8m - - # The maximum number of bytes to allow when reading the entire entity into memory with `toStrict` - # (which is used by the `toStrictEntity` and `extractStrictEntity` directives) - max-to-strict-bytes = 8m - - # Sets the strictness mode for parsing request target URIs. - # The following values are defined: - # - # `strict`: RFC3986-compliant URIs are required, - # a 400 response is triggered on violations - # - # `relaxed`: all visible 7-Bit ASCII chars are allowed - # - uri-parsing-mode = strict - - # Sets the parsing mode for parsing cookies. - # The following value are defined: - # - # `rfc6265`: Only RFC6265-compliant cookies are parsed. Surrounding double-quotes are accepted and - # automatically removed. Non-compliant cookies are silently discarded. - # `raw`: Raw parsing allows any non-control character but ';' to appear in a cookie value. There's no further - # post-processing applied, so that the resulting value string may contain any number of whitespace, unicode, - # double quotes, or '=' characters at any position. - # The rules for parsing the cookie name are the same ones from RFC 6265. - # - cookie-parsing-mode = rfc6265 - - # Enables/disables the logging of warning messages in case an incoming - # message (request or response) contains an HTTP header which cannot be - # parsed into its high-level model class due to incompatible syntax. - # Note that, independently of this settings, akka-http will accept messages - # with such headers as long as the message as a whole would still be legal - # under the HTTP specification even without this header. - # If a header cannot be parsed into a high-level model instance it will be - # provided as a `RawHeader`. - # If logging is enabled it is performed with the configured - # `error-logging-verbosity`. - illegal-header-warnings = on - - # Sets the list of headers for which illegal values will *not* cause warning logs to be emitted; - # - # Adding a header name to this setting list disables the logging of warning messages in case an incoming message - # contains an HTTP header which cannot be parsed into its high-level model class due to incompatible syntax. - ignore-illegal-header-for = [] - - # Parse headers into typed model classes in the Akka Http core layer. - # - # If set to `off`, only essential headers will be parsed into their model classes. All other ones will be provided - # as instances of `RawHeader`. Currently, `Connection`, `Host`, and `Expect` headers will still be provided in their - # typed model. The full list of headers still provided as modeled instances can be found in the source code of - # `akka.http.impl.engine.parsing.HttpHeaderParser.alwaysParsedHeaders`. Note that (regardless of this setting) - # some headers like `Content-Type` are treated specially and will never be provided in the list of headers. - modeled-header-parsing = on - - # Configures the verbosity with which message (request or response) parsing - # errors are written to the application log. - # - # Supported settings: - # `off` : no log messages are produced - # `simple`: a condensed single-line message is logged - # `full` : the full error details (potentially spanning several lines) are logged - error-logging-verbosity = full - - # Configures the processing mode when encountering illegal characters in - # header value of response. - # - # Supported mode: - # `error` : default mode, throw an ParsingException and terminate the processing - # `warn` : ignore the illegal characters in response header value and log a warning message - # `ignore` : just ignore the illegal characters in response header value - illegal-response-header-value-processing-mode = error - - # limits for the number of different values per header type that the - # header cache will hold - header-cache { - default = 12 - Content-MD5 = 0 - Date = 0 - If-Match = 0 - If-Modified-Since = 0 - If-None-Match = 0 - If-Range = 0 - If-Unmodified-Since = 0 - User-Agent = 32 - } - - # Enables/disables inclusion of an Tls-Session-Info header in parsed - # messages over Tls transports (i.e., HttpRequest on server side and - # HttpResponse on client side). - tls-session-info-header = off - } -} -# ========================================= # -# Kamon-Akka-Remote Reference Configuration # -# ========================================= # - -kamon { - - modules { - kamon-akka-remote { - requires-aspectj = yes - } - } - - akka-remote { - serialization-metric = yes - } - - akka { - cluster { - sharding { - # Sampling rate for sharding metrics - sampling-period = 5 seconds - } - } - } -} -akka.coordination { - - # Defaults for any lease implementation that doesn't include these properties - lease { - - # FQCN of the implementation of the Lease - lease-class = "" - - #defaults - # if the node that acquired the leases crashes, how long should the lease be held before another owner can get it - heartbeat-timeout = 120s - - # interval for communicating with the third party to confirm the lease is still held - heartbeat-interval = 12s - - # lease implementations are expected to time out acquire and release calls or document - # that they do not implement an operation timeout - lease-operation-timeout = 5s - - #defaults - } -} -#################################### -# Akka Actor Reference Config File # -#################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -# Akka version, checked against the runtime version of Akka. Loaded from generated conf file. -include "version" - -akka { - # Home directory of Akka, modules in the deploy directory will be loaded - home = "" - - # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs - # to STDOUT) - loggers = ["akka.event.Logging$DefaultLogger"] - - # Filter of log events that is used by the LoggingAdapter before - # publishing log events to the eventStream. It can perform - # fine grained filtering based on the log source. The default - # implementation filters on the `loglevel`. - # FQCN of the LoggingFilter. The Class of the FQCN must implement - # akka.event.LoggingFilter and have a public constructor with - # (akka.actor.ActorSystem.Settings, akka.event.EventStream) parameters. - logging-filter = "akka.event.DefaultLoggingFilter" - - # Specifies the default loggers dispatcher - loggers-dispatcher = "akka.actor.default-dispatcher" - - # Loggers are created and registered synchronously during ActorSystem - # start-up, and since they are actors, this timeout is used to bound the - # waiting time - logger-startup-timeout = 5s - - # Log level used by the configured loggers (see "loggers") as soon - # as they have been started; before that, see "stdout-loglevel" - # Options: OFF, ERROR, WARNING, INFO, DEBUG - loglevel = "INFO" - - # Log level for the very basic logger activated during ActorSystem startup. - # This logger prints the log messages to stdout (System.out). - # Options: OFF, ERROR, WARNING, INFO, DEBUG - stdout-loglevel = "WARNING" - - # Log the complete configuration at INFO level when the actor system is started. - # This is useful when you are uncertain of what configuration is used. - log-config-on-start = off - - # Log at info level when messages are sent to dead letters. - # Possible values: - # on: all dead letters are logged - # off: no logging of dead letters - # n: positive integer, number of dead letters that will be logged - log-dead-letters = 10 - - # Possibility to turn off logging of dead letters while the actor system - # is shutting down. Logging is only done when enabled by 'log-dead-letters' - # setting. - log-dead-letters-during-shutdown = on - - # List FQCN of extensions which shall be loaded at actor system startup. - # Library extensions are regular extensions that are loaded at startup and are - # available for third party library authors to enable auto-loading of extensions when - # present on the classpath. This is done by appending entries: - # 'library-extensions += "Extension"' in the library `reference.conf`. - # - # Should not be set by end user applications in 'application.conf', use the extensions property for that - # - library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension"] - - # List FQCN of extensions which shall be loaded at actor system startup. - # Should be on the format: 'extensions = ["foo", "bar"]' etc. - # See the Akka Documentation for more info about Extensions - extensions = [] - - # Toggles whether threads created by this ActorSystem should be daemons or not - daemonic = off - - # JVM shutdown, System.exit(-1), in case of a fatal error, - # such as OutOfMemoryError - jvm-exit-on-fatal-error = on - - # Akka installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will - # not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`. - # This property is related to `akka.coordinated-shutdown.run-by-jvm-shutdown-hook` below. - # This property makes it possible to disable all such hooks if the application itself - # or a higher level framework such as Play prefers to install the JVM shutdown hook and - # terminate the ActorSystem itself, with or without using CoordinatedShutdown. - jvm-shutdown-hooks = on - - actor { - - # Either one of "local", "remote" or "cluster" or the - # FQCN of the ActorRefProvider to be used; the below is the built-in default, - # note that "remote" and "cluster" requires the akka-remote and akka-cluster - # artifacts to be on the classpath. - provider = "local" - - # The guardian "/user" will use this class to obtain its supervisorStrategy. - # It needs to be a subclass of akka.actor.SupervisorStrategyConfigurator. - # In addition to the default there is akka.actor.StoppingSupervisorStrategy. - guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy" - - # Timeout for ActorSystem.actorOf - creation-timeout = 20s - - # Serializes and deserializes (non-primitive) messages to ensure immutability, - # this is only intended for testing. - serialize-messages = off - - # Additional serialization bindings which are enabled automatically when allow-java-serialization is disabled. - # settings are provided - java-serialization-disabled-additional-serialization-bindings = {} - - # Serializes and deserializes creators (in Props) to ensure that they can be - # sent over the network, this is only intended for testing. Purely local deployments - # as marked with deploy.scope == LocalScope are exempt from verification. - serialize-creators = off - - # Timeout for send operations to top-level actors which are in the process - # of being started. This is only relevant if using a bounded mailbox or the - # CallingThreadDispatcher for a top-level actor. - unstarted-push-timeout = 10s - - typed { - # Default timeout for typed actor methods with non-void return type - timeout = 5s - } - - # Mapping between ´deployment.router' short names to fully qualified class names - router.type-mapping { - from-code = "akka.routing.NoRouter" - round-robin-pool = "akka.routing.RoundRobinPool" - round-robin-group = "akka.routing.RoundRobinGroup" - random-pool = "akka.routing.RandomPool" - random-group = "akka.routing.RandomGroup" - balancing-pool = "akka.routing.BalancingPool" - smallest-mailbox-pool = "akka.routing.SmallestMailboxPool" - broadcast-pool = "akka.routing.BroadcastPool" - broadcast-group = "akka.routing.BroadcastGroup" - scatter-gather-pool = "akka.routing.ScatterGatherFirstCompletedPool" - scatter-gather-group = "akka.routing.ScatterGatherFirstCompletedGroup" - tail-chopping-pool = "akka.routing.TailChoppingPool" - tail-chopping-group = "akka.routing.TailChoppingGroup" - consistent-hashing-pool = "akka.routing.ConsistentHashingPool" - consistent-hashing-group = "akka.routing.ConsistentHashingGroup" - } - - deployment { - - # deployment id pattern - on the format: /parent/child etc. - default { - - # The id of the dispatcher to use for this actor. - # If undefined or empty the dispatcher specified in code - # (Props.withDispatcher) is used, or default-dispatcher if not - # specified at all. - dispatcher = "" - - # The id of the mailbox to use for this actor. - # If undefined or empty the default mailbox of the configured dispatcher - # is used or if there is no mailbox configuration the mailbox specified - # in code (Props.withMailbox) is used. - # If there is a mailbox defined in the configured dispatcher then that - # overrides this setting. - mailbox = "" - - # routing (load-balance) scheme to use - # - available: "from-code", "round-robin", "random", "smallest-mailbox", - # "scatter-gather", "broadcast" - # - or: Fully qualified class name of the router class. - # The class must extend akka.routing.CustomRouterConfig and - # have a public constructor with com.typesafe.config.Config - # and optional akka.actor.DynamicAccess parameter. - # - default is "from-code"; - # Whether or not an actor is transformed to a Router is decided in code - # only (Props.withRouter). The type of router can be overridden in the - # configuration; specifying "from-code" means that the values specified - # in the code shall be used. - # In case of routing, the actors to be routed to can be specified - # in several ways: - # - nr-of-instances: will create that many children - # - routees.paths: will route messages to these paths using ActorSelection, - # i.e. will not create children - # - resizer: dynamically resizable number of routees as specified in - # resizer below - router = "from-code" - - # number of children to create in case of a router; - # this setting is ignored if routees.paths is given - nr-of-instances = 1 - - # within is the timeout used for routers containing future calls - within = 5 seconds - - # number of virtual nodes per node for consistent-hashing router - virtual-nodes-factor = 10 - - tail-chopping-router { - # interval is duration between sending message to next routee - interval = 10 milliseconds - } - - routees { - # Alternatively to giving nr-of-instances you can specify the full - # paths of those actors which should be routed to. This setting takes - # precedence over nr-of-instances - paths = [] - } - - # To use a dedicated dispatcher for the routees of the pool you can - # define the dispatcher configuration inline with the property name - # 'pool-dispatcher' in the deployment section of the router. - # For example: - # pool-dispatcher { - # fork-join-executor.parallelism-min = 5 - # fork-join-executor.parallelism-max = 5 - # } - - # Routers with dynamically resizable number of routees; this feature is - # enabled by including (parts of) this section in the deployment - resizer { - - enabled = off - - # The fewest number of routees the router should ever have. - lower-bound = 1 - - # The most number of routees the router should ever have. - # Must be greater than or equal to lower-bound. - upper-bound = 10 - - # Threshold used to evaluate if a routee is considered to be busy - # (under pressure). Implementation depends on this value (default is 1). - # 0: number of routees currently processing a message. - # 1: number of routees currently processing a message has - # some messages in mailbox. - # > 1: number of routees with at least the configured pressure-threshold - # messages in their mailbox. Note that estimating mailbox size of - # default UnboundedMailbox is O(N) operation. - pressure-threshold = 1 - - # Percentage to increase capacity whenever all routees are busy. - # For example, 0.2 would increase 20% (rounded up), i.e. if current - # capacity is 6 it will request an increase of 2 more routees. - rampup-rate = 0.2 - - # Minimum fraction of busy routees before backing off. - # For example, if this is 0.3, then we'll remove some routees only when - # less than 30% of routees are busy, i.e. if current capacity is 10 and - # 3 are busy then the capacity is unchanged, but if 2 or less are busy - # the capacity is decreased. - # Use 0.0 or negative to avoid removal of routees. - backoff-threshold = 0.3 - - # Fraction of routees to be removed when the resizer reaches the - # backoffThreshold. - # For example, 0.1 would decrease 10% (rounded up), i.e. if current - # capacity is 9 it will request an decrease of 1 routee. - backoff-rate = 0.1 - - # Number of messages between resize operation. - # Use 1 to resize before each message. - messages-per-resize = 10 - } - - # Routers with dynamically resizable number of routees based on - # performance metrics. - # This feature is enabled by including (parts of) this section in - # the deployment, cannot be enabled together with default resizer. - optimal-size-exploring-resizer { - - enabled = off - - # The fewest number of routees the router should ever have. - lower-bound = 1 - - # The most number of routees the router should ever have. - # Must be greater than or equal to lower-bound. - upper-bound = 10 - - # probability of doing a ramping down when all routees are busy - # during exploration. - chance-of-ramping-down-when-full = 0.2 - - # Interval between each resize attempt - action-interval = 5s - - # If the routees have not been fully utilized (i.e. all routees busy) - # for such length, the resizer will downsize the pool. - downsize-after-underutilized-for = 72h - - # Duration exploration, the ratio between the largest step size and - # current pool size. E.g. if the current pool size is 50, and the - # explore-step-size is 0.1, the maximum pool size change during - # exploration will be +- 5 - explore-step-size = 0.1 - - # Probability of doing an exploration v.s. optmization. - chance-of-exploration = 0.4 - - # When downsizing after a long streak of underutilization, the resizer - # will downsize the pool to the highest utiliziation multiplied by a - # a downsize ratio. This downsize ratio determines the new pools size - # in comparison to the highest utilization. - # E.g. if the highest utilization is 10, and the down size ratio - # is 0.8, the pool will be downsized to 8 - downsize-ratio = 0.8 - - # When optimizing, the resizer only considers the sizes adjacent to the - # current size. This number indicates how many adjacent sizes to consider. - optimization-range = 16 - - # The weight of the latest metric over old metrics when collecting - # performance metrics. - # E.g. if the last processing speed is 10 millis per message at pool - # size 5, and if the new processing speed collected is 6 millis per - # message at pool size 5. Given a weight of 0.3, the metrics - # representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis - # Obviously, this number should be between 0 and 1. - weight-of-latest-metric = 0.5 - } - } - - "/IO-DNS/inet-address" { - mailbox = "unbounded" - router = "consistent-hashing-pool" - nr-of-instances = 4 - } - - "/IO-DNS/inet-address/*" { - dispatcher = "akka.actor.default-blocking-io-dispatcher" - } - - "/IO-DNS/async-dns" { - mailbox = "unbounded" - router = "round-robin-pool" - nr-of-instances = 1 - } - } - - default-dispatcher { - # Must be one of the following - # Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting - # MessageDispatcherConfigurator with a public constructor with - # both com.typesafe.config.Config parameter and - # akka.dispatch.DispatcherPrerequisites parameters. - # PinnedDispatcher must be used together with executor=thread-pool-executor. - type = "Dispatcher" - - # Which kind of ExecutorService to use for this dispatcher - # Valid options: - # - "default-executor" requires a "default-executor" section - # - "fork-join-executor" requires a "fork-join-executor" section - # - "thread-pool-executor" requires a "thread-pool-executor" section - # - "affinity-pool-executor" requires an "affinity-pool-executor" section - # - A FQCN of a class extending ExecutorServiceConfigurator - executor = "default-executor" - - # This will be used if you have set "executor = "default-executor"". - # If an ActorSystem is created with a given ExecutionContext, this - # ExecutionContext will be used as the default executor for all - # dispatchers in the ActorSystem configured with - # executor = "default-executor". Note that "default-executor" - # is the default value for executor, and therefore used if not - # specified otherwise. If no ExecutionContext is given, - # the executor configured in "fallback" will be used. - default-executor { - fallback = "fork-join-executor" - } - - # This will be used if you have set "executor = "affinity-pool-executor"" - # Underlying thread pool implementation is akka.dispatch.affinity.AffinityPool. - # This executor is classified as "ApiMayChange". - affinity-pool-executor { - # Min number of threads to cap factor-based parallelism number to - parallelism-min = 4 - - # The parallelism factor is used to determine thread pool size using the - # following formula: ceil(available processors * factor). Resulting size - # is then bounded by the parallelism-min and parallelism-max values. - parallelism-factor = 0.8 - - # Max number of threads to cap factor-based parallelism number to. - parallelism-max = 64 - - # Each worker in the pool uses a separate bounded MPSC queue. This value - # indicates the upper bound of the queue. Whenever an attempt to enqueue - # a task is made and the queue does not have capacity to accommodate - # the task, the rejection handler created by the rejection handler specified - # in "rejection-handler" is invoked. - task-queue-size = 512 - - # FQCN of the Rejection handler used in the pool. - # Must have an empty public constructor and must - # implement akka.actor.affinity.RejectionHandlerFactory. - rejection-handler = "akka.dispatch.affinity.ThrowOnOverflowRejectionHandler" - - # Level of CPU time used, on a scale between 1 and 10, during backoff/idle. - # The tradeoff is that to have low latency more CPU time must be used to be - # able to react quickly on incoming messages or send as fast as possible after - # backoff backpressure. - # Level 1 strongly prefer low CPU consumption over low latency. - # Level 10 strongly prefer low latency over low CPU consumption. - idle-cpu-level = 5 - - # FQCN of the akka.dispatch.affinity.QueueSelectorFactory. - # The Class of the FQCN must have a public constructor with a - # (com.typesafe.config.Config) parameter. - # A QueueSelectorFactory create instances of akka.dispatch.affinity.QueueSelector, - # that is responsible for determining which task queue a Runnable should be enqueued in. - queue-selector = "akka.dispatch.affinity.FairDistributionHashCache" - - # When using the "akka.dispatch.affinity.FairDistributionHashCache" queue selector - # internally the AffinityPool uses two methods to determine which task - # queue to allocate a Runnable to: - # - map based - maintains a round robin counter and a map of Runnable - # hashcodes to queues that they have been associated with. This ensures - # maximum fairness in terms of work distribution, meaning that each worker - # will get approximately equal amount of mailboxes to execute. This is suitable - # in cases where we have a small number of actors that will be scheduled on - # the pool and we want to ensure the maximum possible utilization of the - # available threads. - # - hash based - the task - queue in which the runnable should go is determined - # by using an uniformly distributed int to int hash function which uses the - # hash code of the Runnable as an input. This is preferred in situations where we - # have enough number of distinct actors to ensure statistically uniform - # distribution of work across threads or we are ready to sacrifice the - # former for the added benefit of avoiding map look-ups. - fair-work-distribution { - # The value serves as a threshold which determines the point at which the - # pool switches from the first to the second work distribution schemes. - # For example, if the value is set to 128, the pool can observe up to - # 128 unique actors and schedule their mailboxes using the map based - # approach. Once this number is reached the pool switches to hash based - # task distribution mode. If the value is set to 0, the map based - # work distribution approach is disabled and only the hash based is - # used irrespective of the number of unique actors. Valid range is - # 0 to 2048 (inclusive) - threshold = 128 - } - } - - # This will be used if you have set "executor = "fork-join-executor"" - # Underlying thread pool implementation is akka.dispatch.forkjoin.ForkJoinPool - fork-join-executor { - # Min number of threads to cap factor-based parallelism number to - parallelism-min = 8 - - # The parallelism factor is used to determine thread pool size using the - # following formula: ceil(available processors * factor). Resulting size - # is then bounded by the parallelism-min and parallelism-max values. - parallelism-factor = 3.0 - - # Max number of threads to cap factor-based parallelism number to - parallelism-max = 64 - - # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack - # like peeking mode which "pop". - task-peeking-mode = "FIFO" - } - - # This will be used if you have set "executor = "thread-pool-executor"" - # Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor - thread-pool-executor { - # Keep alive time for threads - keep-alive-time = 60s - - # Define a fixed thread pool size with this property. The corePoolSize - # and the maximumPoolSize of the ThreadPoolExecutor will be set to this - # value, if it is defined. Then the other pool-size properties will not - # be used. - # - # Valid values are: `off` or a positive integer. - fixed-pool-size = off - - # Min number of threads to cap factor-based corePoolSize number to - core-pool-size-min = 8 - - # The core-pool-size-factor is used to determine corePoolSize of the - # ThreadPoolExecutor using the following formula: - # ceil(available processors * factor). - # Resulting size is then bounded by the core-pool-size-min and - # core-pool-size-max values. - core-pool-size-factor = 3.0 - - # Max number of threads to cap factor-based corePoolSize number to - core-pool-size-max = 64 - - # Minimum number of threads to cap factor-based maximumPoolSize number to - max-pool-size-min = 8 - - # The max-pool-size-factor is used to determine maximumPoolSize of the - # ThreadPoolExecutor using the following formula: - # ceil(available processors * factor) - # The maximumPoolSize will not be less than corePoolSize. - # It is only used if using a bounded task queue. - max-pool-size-factor = 3.0 - - # Max number of threads to cap factor-based maximumPoolSize number to - max-pool-size-max = 64 - - # Specifies the bounded capacity of the task queue (< 1 == unbounded) - task-queue-size = -1 - - # Specifies which type of task queue will be used, can be "array" or - # "linked" (default) - task-queue-type = "linked" - - # Allow core threads to time out - allow-core-timeout = on - } - - # How long time the dispatcher will wait for new actors until it shuts down - shutdown-timeout = 1s - - # Throughput defines the number of messages that are processed in a batch - # before the thread is returned to the pool. Set to 1 for as fair as possible. - throughput = 5 - - # Throughput deadline for Dispatcher, set to 0 or negative for no deadline - throughput-deadline-time = 0ms - - # For BalancingDispatcher: If the balancing dispatcher should attempt to - # schedule idle actors using the same dispatcher when a message comes in, - # and the dispatchers ExecutorService is not fully busy already. - attempt-teamwork = on - - # If this dispatcher requires a specific type of mailbox, specify the - # fully-qualified class name here; the actually created mailbox will - # be a subtype of this type. The empty string signifies no requirement. - mailbox-requirement = "" - } - - default-blocking-io-dispatcher { - type = "Dispatcher" - executor = "thread-pool-executor" - throughput = 1 - - thread-pool-executor { - fixed-pool-size = 16 - } - } - - default-mailbox { - # FQCN of the MailboxType. The Class of the FQCN must have a public - # constructor with - # (akka.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.UnboundedMailbox" - - # If the mailbox is bounded then it uses this setting to determine its - # capacity. The provided value must be positive. - # NOTICE: - # Up to version 2.1 the mailbox type was determined based on this setting; - # this is no longer the case, the type must explicitly be a bounded mailbox. - mailbox-capacity = 1000 - - # If the mailbox is bounded then this is the timeout for enqueueing - # in case the mailbox is full. Negative values signify infinite - # timeout, which should be avoided as it bears the risk of dead-lock. - mailbox-push-timeout-time = 10s - - # For Actor with Stash: The default capacity of the stash. - # If negative (or zero) then an unbounded stash is used (default) - # If positive then a bounded stash is used and the capacity is set using - # the property - stash-capacity = -1 - } - - mailbox { - # Mapping between message queue semantics and mailbox configurations. - # Used by akka.dispatch.RequiresMessageQueue[T] to enforce different - # mailbox types on actors. - # If your Actor implements RequiresMessageQueue[T], then when you create - # an instance of that actor its mailbox type will be decided by looking - # up a mailbox configuration via T in this mapping - requirements { - "akka.dispatch.UnboundedMessageQueueSemantics" = - akka.actor.mailbox.unbounded-queue-based - "akka.dispatch.BoundedMessageQueueSemantics" = - akka.actor.mailbox.bounded-queue-based - "akka.dispatch.DequeBasedMessageQueueSemantics" = - akka.actor.mailbox.unbounded-deque-based - "akka.dispatch.UnboundedDequeBasedMessageQueueSemantics" = - akka.actor.mailbox.unbounded-deque-based - "akka.dispatch.BoundedDequeBasedMessageQueueSemantics" = - akka.actor.mailbox.bounded-deque-based - "akka.dispatch.MultipleConsumerSemantics" = - akka.actor.mailbox.unbounded-queue-based - "akka.dispatch.ControlAwareMessageQueueSemantics" = - akka.actor.mailbox.unbounded-control-aware-queue-based - "akka.dispatch.UnboundedControlAwareMessageQueueSemantics" = - akka.actor.mailbox.unbounded-control-aware-queue-based - "akka.dispatch.BoundedControlAwareMessageQueueSemantics" = - akka.actor.mailbox.bounded-control-aware-queue-based - "akka.event.LoggerMessageQueueSemantics" = - akka.actor.mailbox.logger-queue - } - - unbounded-queue-based { - # FQCN of the MailboxType, The Class of the FQCN must have a public - # constructor with (akka.actor.ActorSystem.Settings, - # com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.UnboundedMailbox" - } - - bounded-queue-based { - # FQCN of the MailboxType, The Class of the FQCN must have a public - # constructor with (akka.actor.ActorSystem.Settings, - # com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.BoundedMailbox" - } - - unbounded-deque-based { - # FQCN of the MailboxType, The Class of the FQCN must have a public - # constructor with (akka.actor.ActorSystem.Settings, - # com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox" - } - - bounded-deque-based { - # FQCN of the MailboxType, The Class of the FQCN must have a public - # constructor with (akka.actor.ActorSystem.Settings, - # com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox" - } - - unbounded-control-aware-queue-based { - # FQCN of the MailboxType, The Class of the FQCN must have a public - # constructor with (akka.actor.ActorSystem.Settings, - # com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.UnboundedControlAwareMailbox" - } - - bounded-control-aware-queue-based { - # FQCN of the MailboxType, The Class of the FQCN must have a public - # constructor with (akka.actor.ActorSystem.Settings, - # com.typesafe.config.Config) parameters. - mailbox-type = "akka.dispatch.BoundedControlAwareMailbox" - } - - # The LoggerMailbox will drain all messages in the mailbox - # when the system is shutdown and deliver them to the StandardOutLogger. - # Do not change this unless you know what you are doing. - logger-queue { - mailbox-type = "akka.event.LoggerMailboxType" - } - } - - debug { - # enable function of Actor.loggable(), which is to log any received message - # at DEBUG level, see the “Testing Actor Systems†section of the Akka - # Documentation at http://akka.io/docs - receive = off - - # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.) - autoreceive = off - - # enable DEBUG logging of actor lifecycle changes - lifecycle = off - - # enable DEBUG logging of all LoggingFSMs for events, transitions and timers - fsm = off - - # enable DEBUG logging of subscription changes on the eventStream - event-stream = off - - # enable DEBUG logging of unhandled messages - unhandled = off - - # enable WARN logging of misconfigured routers - router-misconfiguration = off - } - - # SECURITY BEST-PRACTICE is to disable java serialization for its multiple - # known attack surfaces. - # - # This setting is a short-cut to - # - using DisabledJavaSerializer instead of JavaSerializer - # - enable-additional-serialization-bindings = on - # - # Completely disable the use of `akka.serialization.JavaSerialization` by the - # Akka Serialization extension, instead DisabledJavaSerializer will - # be inserted which will fail explicitly if attempts to use java serialization are made. - # - # The log messages emitted by such serializer SHOULD be treated as potential - # attacks which the serializer prevented, as they MAY indicate an external operator - # attempting to send malicious messages intending to use java serialization as attack vector. - # The attempts are logged with the SECURITY marker. - # - # Please note that this option does not stop you from manually invoking java serialization - # - # The default value for this might be changed to off in future versions of Akka. - allow-java-serialization = on - - # Entries for pluggable serializers and their bindings. - serializers { - java = "akka.serialization.JavaSerializer" - bytes = "akka.serialization.ByteArraySerializer" - } - - # Class to Serializer binding. You only need to specify the name of an - # interface or abstract base class of the messages. In case of ambiguity it - # is using the most specific configured class, or giving a warning and - # choosing the “first†one. - # - # To disable one of the default serializers, assign its class to "none", like - # "java.io.Serializable" = none - serialization-bindings { - "[B" = bytes - "java.io.Serializable" = java - } - - # Additional serialization-bindings that are replacing Java serialization are - # defined in this section for backwards compatibility reasons. They are included - # by default but can be excluded for backwards compatibility with Akka 2.4.x. - # They can be disabled with enable-additional-serialization-bindings=off. - # - # This should only be needed for backwards compatibility reasons. - enable-additional-serialization-bindings = on - - # Additional serialization-bindings that are replacing Java serialization are - # defined in this section for backwards compatibility reasons. They are included - # by default but can be excluded for backwards compatibility with Akka 2.4.x. - # They can be disabled with enable-additional-serialization-bindings=off. - additional-serialization-bindings { - } - - # Log warnings when the default Java serialization is used to serialize messages. - # The default serializer uses Java serialization which is not very performant and should not - # be used in production environments unless you don't care about performance. In that case - # you can turn this off. - warn-about-java-serializer-usage = on - - # To be used with the above warn-about-java-serializer-usage - # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off, - # warnings are suppressed for classes extending NoSerializationVerificationNeeded - # to reduce noize. - warn-on-no-serialization-verification = on - - # Configuration namespace of serialization identifiers. - # Each serializer implementation must have an entry in the following format: - # `akka.actor.serialization-identifiers."FQCN" = ID` - # where `FQCN` is fully qualified class name of the serializer implementation - # and `ID` is globally unique serializer identifier number. - # Identifier values from 0 to 40 are reserved for Akka internal usage. - serialization-identifiers { - "akka.serialization.JavaSerializer" = 1 - "akka.serialization.ByteArraySerializer" = 4 - } - - # Configuration items which are used by the akka.actor.ActorDSL._ methods - dsl { - # Maximum queue size of the actor created by newInbox(); this protects - # against faulty programs which use select() and consistently miss messages - inbox-size = 1000 - - # Default timeout to assume for operations like Inbox.receive et al - default-timeout = 5s - } - } - - # Used to set the behavior of the scheduler. - # Changing the default values may change the system behavior drastically so make - # sure you know what you're doing! See the Scheduler section of the Akka - # Documentation for more details. - scheduler { - # The LightArrayRevolverScheduler is used as the default scheduler in the - # system. It does not execute the scheduled tasks on exact time, but on every - # tick, it will run everything that is (over)due. You can increase or decrease - # the accuracy of the execution timing by specifying smaller or larger tick - # duration. If you are scheduling a lot of tasks you should consider increasing - # the ticks per wheel. - # Note that it might take up to 1 tick to stop the Timer, so setting the - # tick-duration to a high value will make shutting down the actor system - # take longer. - tick-duration = 10ms - - # The timer uses a circular wheel of buckets to store the timer tasks. - # This should be set such that the majority of scheduled timeouts (for high - # scheduling frequency) will be shorter than one rotation of the wheel - # (ticks-per-wheel * ticks-duration) - # THIS MUST BE A POWER OF TWO! - ticks-per-wheel = 512 - - # This setting selects the timer implementation which shall be loaded at - # system start-up. - # The class given here must implement the akka.actor.Scheduler interface - # and offer a public constructor which takes three arguments: - # 1) com.typesafe.config.Config - # 2) akka.event.LoggingAdapter - # 3) java.util.concurrent.ThreadFactory - implementation = akka.actor.LightArrayRevolverScheduler - - # When shutting down the scheduler, there will typically be a thread which - # needs to be stopped, and this timeout determines how long to wait for - # that to happen. In case of timeout the shutdown of the actor system will - # proceed without running possibly still enqueued tasks. - shutdown-timeout = 5s - } - - io { - - # By default the select loops run on dedicated threads, hence using a - # PinnedDispatcher - pinned-dispatcher { - type = "PinnedDispatcher" - executor = "thread-pool-executor" - thread-pool-executor.allow-core-timeout = off - } - - tcp { - - # The number of selectors to stripe the served channels over; each of - # these will use one select loop on the selector-dispatcher. - nr-of-selectors = 1 - - # Maximum number of open channels supported by this TCP module; there is - # no intrinsic general limit, this setting is meant to enable DoS - # protection by limiting the number of concurrently connected clients. - # Also note that this is a "soft" limit; in certain cases the implementation - # will accept a few connections more or a few less than the number configured - # here. Must be an integer > 0 or "unlimited". - max-channels = 256000 - - # When trying to assign a new connection to a selector and the chosen - # selector is at full capacity, retry selector choosing and assignment - # this many times before giving up - selector-association-retries = 10 - - # The maximum number of connection that are accepted in one go, - # higher numbers decrease latency, lower numbers increase fairness on - # the worker-dispatcher - batch-accept-limit = 10 - - # The number of bytes per direct buffer in the pool used to read or write - # network data from the kernel. - direct-buffer-size = 128 KiB - - # The maximal number of direct buffers kept in the direct buffer pool for - # reuse. - direct-buffer-pool-limit = 1000 - - # The duration a connection actor waits for a `Register` message from - # its commander before aborting the connection. - register-timeout = 5s - - # The maximum number of bytes delivered by a `Received` message. Before - # more data is read from the network the connection actor will try to - # do other work. - # The purpose of this setting is to impose a smaller limit than the - # configured receive buffer size. When using value 'unlimited' it will - # try to read all from the receive buffer. - max-received-message-size = unlimited - - # Enable fine grained logging of what goes on inside the implementation. - # Be aware that this may log more than once per message sent to the actors - # of the tcp implementation. - trace-logging = off - - # Fully qualified config path which holds the dispatcher configuration - # to be used for running the select() calls in the selectors - selector-dispatcher = "akka.io.pinned-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # for the read/write worker actors - worker-dispatcher = "akka.actor.default-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # for the selector management actors - management-dispatcher = "akka.actor.default-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # on which file IO tasks are scheduled - file-io-dispatcher = "akka.actor.default-blocking-io-dispatcher" - - # The maximum number of bytes (or "unlimited") to transfer in one batch - # when using `WriteFile` command which uses `FileChannel.transferTo` to - # pipe files to a TCP socket. On some OS like Linux `FileChannel.transferTo` - # may block for a long time when network IO is faster than file IO. - # Decreasing the value may improve fairness while increasing may improve - # throughput. - file-io-transferTo-limit = 512 KiB - - # The number of times to retry the `finishConnect` call after being notified about - # OP_CONNECT. Retries are needed if the OP_CONNECT notification doesn't imply that - # `finishConnect` will succeed, which is the case on Android. - finish-connect-retries = 5 - - # On Windows connection aborts are not reliably detected unless an OP_READ is - # registered on the selector _after_ the connection has been reset. This - # workaround enables an OP_CONNECT which forces the abort to be visible on Windows. - # Enabling this setting on other platforms than Windows will cause various failures - # and undefined behavior. - # Possible values of this key are on, off and auto where auto will enable the - # workaround if Windows is detected automatically. - windows-connection-abort-workaround-enabled = off - } - - udp { - - # The number of selectors to stripe the served channels over; each of - # these will use one select loop on the selector-dispatcher. - nr-of-selectors = 1 - - # Maximum number of open channels supported by this UDP module Generally - # UDP does not require a large number of channels, therefore it is - # recommended to keep this setting low. - max-channels = 4096 - - # The select loop can be used in two modes: - # - setting "infinite" will select without a timeout, hogging a thread - # - setting a positive timeout will do a bounded select call, - # enabling sharing of a single thread between multiple selectors - # (in this case you will have to use a different configuration for the - # selector-dispatcher, e.g. using "type=Dispatcher" with size 1) - # - setting it to zero means polling, i.e. calling selectNow() - select-timeout = infinite - - # When trying to assign a new connection to a selector and the chosen - # selector is at full capacity, retry selector choosing and assignment - # this many times before giving up - selector-association-retries = 10 - - # The maximum number of datagrams that are read in one go, - # higher numbers decrease latency, lower numbers increase fairness on - # the worker-dispatcher - receive-throughput = 3 - - # The number of bytes per direct buffer in the pool used to read or write - # network data from the kernel. - direct-buffer-size = 128 KiB - - # The maximal number of direct buffers kept in the direct buffer pool for - # reuse. - direct-buffer-pool-limit = 1000 - - # Enable fine grained logging of what goes on inside the implementation. - # Be aware that this may log more than once per message sent to the actors - # of the tcp implementation. - trace-logging = off - - # Fully qualified config path which holds the dispatcher configuration - # to be used for running the select() calls in the selectors - selector-dispatcher = "akka.io.pinned-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # for the read/write worker actors - worker-dispatcher = "akka.actor.default-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # for the selector management actors - management-dispatcher = "akka.actor.default-dispatcher" - } - - udp-connected { - - # The number of selectors to stripe the served channels over; each of - # these will use one select loop on the selector-dispatcher. - nr-of-selectors = 1 - - # Maximum number of open channels supported by this UDP module Generally - # UDP does not require a large number of channels, therefore it is - # recommended to keep this setting low. - max-channels = 4096 - - # The select loop can be used in two modes: - # - setting "infinite" will select without a timeout, hogging a thread - # - setting a positive timeout will do a bounded select call, - # enabling sharing of a single thread between multiple selectors - # (in this case you will have to use a different configuration for the - # selector-dispatcher, e.g. using "type=Dispatcher" with size 1) - # - setting it to zero means polling, i.e. calling selectNow() - select-timeout = infinite - - # When trying to assign a new connection to a selector and the chosen - # selector is at full capacity, retry selector choosing and assignment - # this many times before giving up - selector-association-retries = 10 - - # The maximum number of datagrams that are read in one go, - # higher numbers decrease latency, lower numbers increase fairness on - # the worker-dispatcher - receive-throughput = 3 - - # The number of bytes per direct buffer in the pool used to read or write - # network data from the kernel. - direct-buffer-size = 128 KiB - - # The maximal number of direct buffers kept in the direct buffer pool for - # reuse. - direct-buffer-pool-limit = 1000 - - # Enable fine grained logging of what goes on inside the implementation. - # Be aware that this may log more than once per message sent to the actors - # of the tcp implementation. - trace-logging = off - - # Fully qualified config path which holds the dispatcher configuration - # to be used for running the select() calls in the selectors - selector-dispatcher = "akka.io.pinned-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # for the read/write worker actors - worker-dispatcher = "akka.actor.default-dispatcher" - - # Fully qualified config path which holds the dispatcher configuration - # for the selector management actors - management-dispatcher = "akka.actor.default-dispatcher" - } - - dns { - # Fully qualified config path which holds the dispatcher configuration - # for the manager and resolver router actors. - # For actual router configuration see akka.actor.deployment./IO-DNS/* - dispatcher = "akka.actor.default-dispatcher" - - # Name of the subconfig at path akka.io.dns, see inet-address below - # - # Change to `async-dns` to use the new "native" DNS resolver, - # which is also capable of resolving SRV records. - resolver = "inet-address" - - # To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records. - # To be replaced by `akka.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does) - inet-address { - # Must implement akka.io.DnsProvider - provider-object = "akka.io.InetAddressDnsProvider" - - # To set the time to cache name resolutions - # Possible values: - # default: sun.net.InetAddressCachePolicy.get() and getNegative() - # forever: cache forever - # never: no caching - # n [time unit]: positive timeout with unit, for example "30 s" - positive-ttl = default - negative-ttl = default - - # How often to sweep out expired cache entries. - # Note that this interval has nothing to do with TTLs - cache-cleanup-interval = 120s - } - - async-dns { - provider-object = "akka.io.dns.internal.AsyncDnsProvider" - - # Configures nameservers to query during DNS resolution. - # Defaults to the nameservers that would be used by the JVM by default. - # Set to a list of IPs to override the servers, e.g. [ "8.8.8.8", "8.8.4.4" ] for Google's servers - # If multiple are defined then they are tried in order until one responds - nameservers = default - - # The time that a request is allowed to live before being discarded - # given no reply. The lower bound of this should always be the amount - # of time to reasonably expect a DNS server to reply within. - # If multiple name servers are provided then each gets this long to response before trying - # the next one - resolve-timeout = 5s - - # How often to sweep out expired cache entries. - # Note that this interval has nothing to do with TTLs - cache-cleanup-interval = 120s - - # Configures the list of search domains. - # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on - # other platforms, will not make any attempt to lookup the search domains). Set to a single domain, or - # a list of domains, eg, [ "example.com", "example.net" ]. - search-domains = default - - # Any hosts that have a number of dots less than this will not be looked up directly, instead, a search on - # the search domains will be tried first. This corresponds to the ndots option in /etc/resolv.conf, see - # https://linux.die.net/man/5/resolver for more info. - # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on - # other platforms, will default to 1). - ndots = default - } - } - } - - - # CoordinatedShutdown is an extension that will perform registered - # tasks in the order that is defined by the phases. It is started - # by calling CoordinatedShutdown(system).run(). This can be triggered - # by different things, for example: - # - JVM shutdown hook will by default run CoordinatedShutdown - # - Cluster node will automatically run CoordinatedShutdown when it - # sees itself as Exiting - # - A management console or other application specific command can - # run CoordinatedShutdown - coordinated-shutdown { - # The timeout that will be used for a phase if not specified with - # 'timeout' in the phase - default-phase-timeout = 5 s - - # Terminate the ActorSystem in the last phase actor-system-terminate. - terminate-actor-system = on - - # Exit the JVM (System.exit(0)) in the last phase actor-system-terminate - # if this is set to 'on'. It is done after termination of the - # ActorSystem if terminate-actor-system=on, otherwise it is done - # immediately when the last phase is reached. - exit-jvm = off - - # Exit status to use on System.exit(int) when 'exit-jvm' is 'on'. - exit-code = 0 - - # Run the coordinated shutdown when the JVM process exits, e.g. - # via kill SIGTERM signal (SIGINT ctrl-c doesn't work). - # This property is related to `akka.jvm-shutdown-hooks` above. - run-by-jvm-shutdown-hook = on - - # When Coordinated Shutdown is triggered an instance of `Reason` is - # required. That value can be used to override the default settings. - # Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be - # overridden depending on the reason. - reason-overrides { - # Overrides are applied using the `reason.getClass.getName`. - # Overrides the `exit-code` when the `Reason` is a cluster - # Downing or a Cluster Join Unsuccessful event - "akka.actor.CoordinatedShutdown$ClusterDowningReason$" { - exit-code = -1 - } - "akka.actor.CoordinatedShutdown$ClusterJoinUnsuccessfulReason$" { - exit-code = -1 - } - } - - #//#coordinated-shutdown-phases - # CoordinatedShutdown is enabled by default and will run the tasks that - # are added to these phases by individual Akka modules and user logic. - # - # The phases are ordered as a DAG by defining the dependencies between the phases - # to make sure shutdown tasks are run in the right order. - # - # In general user tasks belong in the first few phases, but there may be use - # cases where you would want to hook in new phases or register tasks later in - # the DAG. - # - # Each phase is defined as a named config section with the - # following optional properties: - # - timeout=15s: Override the default-phase-timeout for this phase. - # - recover=off: If the phase fails the shutdown is aborted - # and depending phases will not be executed. - # - enabled=off: Skip all tasks registered in this phase. DO NOT use - # this to disable phases unless you are absolutely sure what the - # consequences are. Many of the built in tasks depend on other tasks - # having been executed in earlier phases and may break if those are disabled. - # depends-on=[]: Run the phase after the given phases - phases { - - # The first pre-defined phase that applications can add tasks to. - # Note that more phases can be added in the application's - # configuration by overriding this phase with an additional - # depends-on. - before-service-unbind { - } - - # Stop accepting new incoming connections. - # This is where you can register tasks that makes a server stop accepting new connections. Already - # established connections should be allowed to continue and complete if possible. - service-unbind { - depends-on = [before-service-unbind] - } - - # Wait for requests that are in progress to be completed. - # This is where you register tasks that will wait for already established connections to complete, potentially - # also first telling them that it is time to close down. - service-requests-done { - depends-on = [service-unbind] - } - - # Final shutdown of service endpoints. - # This is where you would add tasks that forcefully kill connections that are still around. - service-stop { - depends-on = [service-requests-done] - } - - # Phase for custom application tasks that are to be run - # after service shutdown and before cluster shutdown. - before-cluster-shutdown { - depends-on = [service-stop] - } - - # Graceful shutdown of the Cluster Sharding regions. - # This phase is not meant for users to add tasks to. - cluster-sharding-shutdown-region { - timeout = 10 s - depends-on = [before-cluster-shutdown] - } - - # Emit the leave command for the node that is shutting down. - # This phase is not meant for users to add tasks to. - cluster-leave { - depends-on = [cluster-sharding-shutdown-region] - } - - # Shutdown cluster singletons - # This is done as late as possible to allow the shard region shutdown triggered in - # the "cluster-sharding-shutdown-region" phase to complete before the shard coordinator is shut down. - # This phase is not meant for users to add tasks to. - cluster-exiting { - timeout = 10 s - depends-on = [cluster-leave] - } - - # Wait until exiting has been completed - # This phase is not meant for users to add tasks to. - cluster-exiting-done { - depends-on = [cluster-exiting] - } - - # Shutdown the cluster extension - # This phase is not meant for users to add tasks to. - cluster-shutdown { - depends-on = [cluster-exiting-done] - } - - # Phase for custom application tasks that are to be run - # after cluster shutdown and before ActorSystem termination. - before-actor-system-terminate { - depends-on = [cluster-shutdown] - } - - # Last phase. See terminate-actor-system and exit-jvm above. - # Don't add phases that depends on this phase because the - # dispatcher and scheduler of the ActorSystem have been shutdown. - # This phase is not meant for users to add tasks to. - actor-system-terminate { - timeout = 10 s - depends-on = [before-actor-system-terminate] - } - } - #//#coordinated-shutdown-phases - } - -} - -######################################## -# akka-http-cors Reference Config File # -######################################## - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -akka-http-cors { - - # If enabled, allow generic requests (that are outside the scope of the specification) - # to pass through the directive. Else, strict CORS filtering is applied and any - # invalid request will be rejected. - allow-generic-http-requests = yes - - # Indicates whether the resource supports user credentials. If enabled, the header - # `Access-Control-Allow-Credentials` is set in the response, indicating that the - # actual request can include user credentials. Examples of user credentials are: - # cookies, HTTP authentication or client-side certificates. - allow-credentials = yes - - # List of origins that the CORS filter must allow. Can also be set to `*` to allow - # access to the resource from any origin. Controls the content of the - # `Access-Control-Allow-Origin` response header: if parameter is `*` and credentials - # are not allowed, a `*` is set in `Access-Control-Allow-Origin`. Otherwise, the - # origins given in the `Origin` request header are echoed. - # - # Hostname starting with `*.` will match any sub-domain. - # The scheme and the port are always strictly matched. - # - # The actual or preflight request is rejected if any of the origins from the request - # is not allowed. - allowed-origins = "*" - - # List of request headers that can be used when making an actual request. Controls - # the content of the `Access-Control-Allow-Headers` header in a preflight response: - # if parameter is `*`, the headers from `Access-Control-Request-Headers` are echoed. - # Otherwise the parameter list is returned as part of the header. - allowed-headers = "*" - - # List of methods that can be used when making an actual request. The list is - # returned as part of the `Access-Control-Allow-Methods` preflight response header. - # - # The preflight request will be rejected if the `Access-Control-Request-Method` - # header's method is not part of the list. - allowed-methods = ["GET", "POST", "HEAD", "OPTIONS"] - - # List of headers (other than simple response headers) that browsers are allowed to access. - # If not empty, this list is returned as part of the `Access-Control-Expose-Headers` - # header in the actual response. - exposed-headers = [] - - # When set, the amount of seconds the browser is allowed to cache the results of a preflight request. - # This value is returned as part of the `Access-Control-Max-Age` preflight response header. - # If `null`, the header is not added to the preflight response. - max-age = 1800 seconds -} -# ======================================= # -# Kamon-Akka-Http Reference Configuration # -# ======================================= # - -kamon.akka-http { - - # Fully qualified name of the implementation of kamon.akka.http.AkkaHttp.OperationNameGenerator that will be used for - # assigning operation names to Server and Client operations generated by the Akka HTTP Instrumentation. If the value - # is "default" a simple generator with the following rules will be used: - # - Client operations will be named after the Host they are targetting. - # - Server operations will be named after the Path in the request. - # - # Note: It is highly recommended to use the `operationName` directive or supply your own name generator to avoid - # cardinality explosion caused by variable sections in the Path. - name-generator = default - - # Add http status codes as metric tags. The default value is false - add-http-status-code-as-metric-tag = false - - not-found-operation-name = "unhandled" - - modules { - kamon-akka-http { - requires-aspectj = yes - } - } -} -kamon { - zipkin { - host = "localhost" - port = 9411 - } -} -###################################### -# Akka Contrib Reference Config File # -###################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. -################################### -# akka-http Reference Config File # -################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -akka.http { - routing { - # Enables/disables the returning of more detailed error messages to the - # client in the error response - # Should be disabled for browser-facing APIs due to the risk of XSS attacks - # and (probably) enabled for internal or non-browser APIs - # (Note that akka-http will always produce log messages containing the full error details) - verbose-error-messages = off - - # Enables/disables ETag and `If-Modified-Since` support for FileAndResourceDirectives - file-get-conditional = on - - # Enables/disables the rendering of the "rendered by" footer in directory listings - render-vanity-footer = yes - - # The maximum size between two requested ranges. Ranges with less space in between will be coalesced. - # - # When multiple ranges are requested, a server may coalesce any of the ranges that overlap or that are separated - # by a gap that is smaller than the overhead of sending multiple parts, regardless of the order in which the - # corresponding byte-range-spec appeared in the received Range header field. Since the typical overhead between - # parts of a multipart/byteranges payload is around 80 bytes, depending on the selected representation's - # media type and the chosen boundary parameter length, it can be less efficient to transfer many small - # disjoint parts than it is to transfer the entire selected representation. - range-coalescing-threshold = 80 - - # The maximum number of allowed ranges per request. - # Requests with more ranges will be rejected due to DOS suspicion. - range-count-limit = 16 - - # The maximum number of bytes per ByteString a decoding directive will produce - # for an entity data stream. - decode-max-bytes-per-chunk = 1m - - # Maximum content length after applying a decoding directive. When the directive - # decompresses, for example, an entity compressed with gzip, the resulting stream can be much - # larger than the max-content-length. Like with max-content-length, this is not necessarilly a - # problem when consuming the entity in a streaming fashion, but does risk high memory use - # when the entity is made strict or marshalled into an in-memory object. - # This limit (like max-content-length) can be overridden on a case-by-case basis using the - # withSizeLimit directive. - decode-max-size = 8m - } - - # server-sent events - sse { - # The maximum size for parsing server-sent events. - max-event-size = 8192 - - # The maximum size for parsing lines of a server-sent event. - max-line-size = 4096 - } -} -# ======================================== # -# kamon-prometheus reference configuration # -# ======================================== # - -kamon.prometheus { - - # Enable or disable publishing the Prometheus scraping enpoint using a embedded server. - start-embedded-http-server = yes - - # Enable of disable including tags from kamon.prometheus.environment as labels - include-environment-tags = no - - buckets { - default-buckets = [ - 10, - 30, - 100, - 300, - 1000, - 3000, - 10000, - 30000, - 100000 - ] - - time-buckets = [ - 0.005, - 0.01, - 0.025, - 0.05, - 0.075, - 0.1, - 0.25, - 0.5, - 0.75, - 1, - 2.5, - 5, - 7.5, - 10 - ] - - information-buckets = [ - 512, - 1024, - 2048, - 4096, - 16384, - 65536, - 524288, - 1048576 - ] - - # Per metric overrides are possible by specifying the metric name and the histogram buckets here - custom { - // example: - // "akka.actor.processing-time" = [0.1, 1.0, 10.0] - } - } - - - embedded-server { - - # Hostname and port used by the embedded web server to publish the scraping enpoint. - hostname = 0.0.0.0 - port = 9095 - } -} -# ================================== # -# Kamon-Akka Reference Configuration # -# ================================== # - -kamon { - akka { - # If ask-pattern-timeout-warning is enabled, a WARN level log message will be generated if a future generated by the `ask` - # pattern fails with a `AskTimeoutException` and the log message will contain information depending of the strategy selected. - # strategies: - # - off: nothing to do. - # - lightweight: logs the warning when a timeout is reached using org.aspectj.lang.reflect.SourceLocation. - # - heavyweight: logs the warning when a timeout is reached using a stack trace captured at the moment the future was created. - ask-pattern-timeout-warning = off - - # Filter names from which actor groups will be created. Setting up actor groups require two steps: first, define - # a filter under the kamon.util.filters key and second, add that filter to this key. E.g.: for the following config: - # - # kamon.util.filters { - # worker-actors { - # includes = ["my-system/user/application/worker-*", "my-system/user/workers/**"] - # excludes = [ ] - # } - # } - # - # kamon.akka { - # actor-groups = [ "worker-actors" ] - # } - # - # An actor group named "worker-actors" will be created and include all the actors whose path matches the provided - # patterns. - actor-groups = [ ] - } - - util.filters { - "akka.tracked-actor" { - includes = [ ] - excludes = [ "*/system/**", "*/user/IO-**" ] - } - - "akka.tracked-router" { - includes = [ ] - excludes = [ ] - } - - "akka.tracked-dispatcher" { - includes = [ ] - excludes = [ ] - } - - "akka.traced-actor" { - includes = [ ] - excludes = [ ] - } - } - - modules { - kamon-akka { - requires-aspectj = yes - } - } -} -###################################################### -# Akka Management Cluster Reference Config File # -###################################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -akka.management { - - http.routes { - # registers http management to be included in akka-management's http endpoint - cluster-management = "akka.management.cluster.ClusterHttpManagementRouteProvider" - } - - cluster { - health-check { - # Ready health check returns 200 when cluster membership is in the following states. - # Intended to be used to indicate this node is ready for user traffic so Up/WeaklyUp - # Valid values: "Joining", "WeaklyUp", "Up", "Leaving", "Exiting", "Down", "Removed" - ready-states = ["Up", "WeaklyUp"] - } - } - -} - -# registers cluster healthcheck to be included in akka-management's http endpoint -#health -akka.management { - health-checks { - readiness-checks { - # Default health check for cluster. Overwrite the setting to replace it with - # your implementation or set it to "" (empty string) to disable this check. - cluster-membership = "akka.management.cluster.scaladsl.ClusterMembershipCheck" - } - } -} -#health -#//#shared -##################################### -# Akka Remote Reference Config File # -##################################### - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -# comments about akka.actor settings left out where they are already in akka- -# actor.jar, because otherwise they would be repeated in config rendering. -# -# For the configuration of the new remoting implementation (Artery) please look -# at the bottom section of this file as it is listed separately. - -akka { - - actor { - - serializers { - akka-containers = "akka.remote.serialization.MessageContainerSerializer" - akka-misc = "akka.remote.serialization.MiscMessageSerializer" - artery = "akka.remote.serialization.ArteryMessageSerializer" - proto = "akka.remote.serialization.ProtobufSerializer" - daemon-create = "akka.remote.serialization.DaemonMsgCreateSerializer" - primitive-long = "akka.remote.serialization.LongSerializer" - primitive-int = "akka.remote.serialization.IntSerializer" - primitive-string = "akka.remote.serialization.StringSerializer" - primitive-bytestring = "akka.remote.serialization.ByteStringSerializer" - akka-system-msg = "akka.remote.serialization.SystemMessageSerializer" - } - - serialization-bindings { - "akka.actor.ActorSelectionMessage" = akka-containers - - "akka.remote.DaemonMsgCreate" = daemon-create - - "akka.remote.artery.ArteryMessage" = artery - - # Since akka.protobuf.Message does not extend Serializable but - # GeneratedMessage does, need to use the more specific one here in order - # to avoid ambiguity. - "akka.protobuf.GeneratedMessage" = proto - - # Since com.google.protobuf.Message does not extend Serializable but - # GeneratedMessage does, need to use the more specific one here in order - # to avoid ambiguity. - # This com.google.protobuf serialization binding is only used if the class can be loaded, - # i.e. com.google.protobuf dependency has been added in the application project. - "com.google.protobuf.GeneratedMessage" = proto - - "java.util.Optional" = akka-misc - - - # The following are handled by the MiscMessageSerializer, but they are not enabled for - # compatibility reasons (it was added in Akka 2.5.[8,9,12]). Enable them by adding: - # akka.actor.serialization-bindings { - # "akka.Done" = akka-misc - # "akka.NotUsed" = akka-misc - # "akka.actor.Address" = akka-misc - # "akka.remote.UniqueAddress" = akka-misc - # } - } - - # Additional serialization-bindings that are replacing Java serialization are - # defined in this section for backwards compatibility reasons. They are included - # by default but can be excluded for backwards compatibility with Akka 2.4.x. - # They can be disabled with enable-additional-serialization-bindings=off. - additional-serialization-bindings { - "akka.actor.Identify" = akka-misc - "akka.actor.ActorIdentity" = akka-misc - "scala.Some" = akka-misc - "scala.None$" = akka-misc - "akka.actor.Status$Success" = akka-misc - "akka.actor.Status$Failure" = akka-misc - "akka.actor.ActorRef" = akka-misc - "akka.actor.PoisonPill$" = akka-misc - "akka.actor.Kill$" = akka-misc - "akka.remote.RemoteWatcher$Heartbeat$" = akka-misc - "akka.remote.RemoteWatcher$HeartbeatRsp" = akka-misc - "akka.actor.ActorInitializationException" = akka-misc - - "akka.dispatch.sysmsg.SystemMessage" = akka-system-msg - - "java.lang.String" = primitive-string - "akka.util.ByteString$ByteString1C" = primitive-bytestring - "akka.util.ByteString$ByteString1" = primitive-bytestring - "akka.util.ByteString$ByteStrings" = primitive-bytestring - "java.lang.Long" = primitive-long - "scala.Long" = primitive-long - "java.lang.Integer" = primitive-int - "scala.Int" = primitive-int - - # Java Serializer is by default used for exceptions. - # It's recommended that you implement custom serializer for exceptions that are - # sent remotely, e.g. in akka.actor.Status.Failure for ask replies. You can add - # binding to akka-misc (MiscMessageSerializerSpec) for the exceptions that have - # a constructor with single message String or constructor with message String as - # first parameter and cause Throwable as second parameter. Note that it's not - # safe to add this binding for general exceptions such as IllegalArgumentException - # because it may have a subclass without required constructor. - "java.lang.Throwable" = java - "akka.actor.IllegalActorStateException" = akka-misc - "akka.actor.ActorKilledException" = akka-misc - "akka.actor.InvalidActorNameException" = akka-misc - "akka.actor.InvalidMessageException" = akka-misc - - "akka.actor.LocalScope$" = akka-misc - "akka.remote.RemoteScope" = akka-misc - - "com.typesafe.config.impl.SimpleConfig" = akka-misc - "com.typesafe.config.Config" = akka-misc - - "akka.routing.FromConfig" = akka-misc - "akka.routing.DefaultResizer" = akka-misc - "akka.routing.BalancingPool" = akka-misc - "akka.routing.BroadcastGroup" = akka-misc - "akka.routing.BroadcastPool" = akka-misc - "akka.routing.RandomGroup" = akka-misc - "akka.routing.RandomPool" = akka-misc - "akka.routing.RoundRobinGroup" = akka-misc - "akka.routing.RoundRobinPool" = akka-misc - "akka.routing.ScatterGatherFirstCompletedGroup" = akka-misc - "akka.routing.ScatterGatherFirstCompletedPool" = akka-misc - "akka.routing.SmallestMailboxPool" = akka-misc - "akka.routing.TailChoppingGroup" = akka-misc - "akka.routing.TailChoppingPool" = akka-misc - "akka.remote.routing.RemoteRouterConfig" = akka-misc - } - - # Additional serialization bindings which are enabled automatically when allow-java-serialization is disabled. - java-serialization-disabled-additional-serialization-bindings = { - "akka.Done" = akka-misc - "akka.NotUsed" = akka-misc - "akka.actor.Address" = akka-misc - "akka.remote.UniqueAddress" = akka-misc - } - - serialization-identifiers { - "akka.remote.serialization.ProtobufSerializer" = 2 - "akka.remote.serialization.DaemonMsgCreateSerializer" = 3 - "akka.remote.serialization.MessageContainerSerializer" = 6 - "akka.remote.serialization.MiscMessageSerializer" = 16 - "akka.remote.serialization.ArteryMessageSerializer" = 17 - "akka.remote.serialization.LongSerializer" = 18 - "akka.remote.serialization.IntSerializer" = 19 - "akka.remote.serialization.StringSerializer" = 20 - "akka.remote.serialization.ByteStringSerializer" = 21 - "akka.remote.serialization.SystemMessageSerializer" = 22 - } - - deployment { - - default { - - # if this is set to a valid remote address, the named actor will be - # deployed at that node e.g. "akka.tcp://sys@host:port" - remote = "" - - target { - - # A list of hostnames and ports for instantiating the children of a - # router - # The format should be on "akka.tcp://sys@host:port", where: - # - sys is the remote actor system name - # - hostname can be either hostname or IP address the remote actor - # should connect to - # - port should be the port for the remote server on the other node - # The number of actor instances to be spawned is still taken from the - # nr-of-instances setting as for local routers; the instances will be - # distributed round-robin among the given nodes. - nodes = [] - - } - } - } - } - - remote { - ### Settings shared by classic remoting and Artery (the new implementation of remoting) - - # If set to a nonempty string remoting will use the given dispatcher for - # its internal actors otherwise the default dispatcher is used. Please note - # that since remoting can load arbitrary 3rd party drivers (see - # "enabled-transport" and "adapters" entries) it is not guaranteed that - # every module will respect this setting. - use-dispatcher = "akka.remote.default-remote-dispatcher" - - # Settings for the failure detector to monitor connections. - # For TCP it is not important to have fast failure detection, since - # most connection failures are captured by TCP itself. - # The default DeadlineFailureDetector will trigger if there are no heartbeats within - # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds - # with the default settings. - transport-failure-detector { - - # FQCN of the failure detector implementation. - # It must implement akka.remote.FailureDetector and have - # a public constructor with a com.typesafe.config.Config and - # akka.actor.EventStream parameter. - implementation-class = "akka.remote.DeadlineFailureDetector" - - # How often keep-alive heartbeat messages should be sent to each connection. - heartbeat-interval = 4 s - - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # A margin to the `heartbeat-interval` is important to be able to survive sudden, - # occasional, pauses in heartbeat arrivals, due to for example garbage collect or - # network drop. - acceptable-heartbeat-pause = 120 s - } - - # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf - # [Hayashibara et al]) used for remote death watch. - # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within - # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment, - # i.e. around 12.5 seconds with default settings. - watch-failure-detector { - - # FQCN of the failure detector implementation. - # It must implement akka.remote.FailureDetector and have - # a public constructor with a com.typesafe.config.Config and - # akka.actor.EventStream parameter. - implementation-class = "akka.remote.PhiAccrualFailureDetector" - - # How often keep-alive heartbeat messages should be sent to each connection. - heartbeat-interval = 1 s - - # Defines the failure detector threshold. - # A low threshold is prone to generate many wrong suspicions but ensures - # a quick detection in the event of a real crash. Conversely, a high - # threshold generates fewer mistakes but needs more time to detect - # actual crashes. - threshold = 10.0 - - # Number of the samples of inter-heartbeat arrival times to adaptively - # calculate the failure timeout for connections. - max-sample-size = 200 - - # Minimum standard deviation to use for the normal distribution in - # AccrualFailureDetector. Too low standard deviation might result in - # too much sensitivity for sudden, but normal, deviations in heartbeat - # inter arrival times. - min-std-deviation = 100 ms - - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # This margin is important to be able to survive sudden, occasional, - # pauses in heartbeat arrivals, due to for example garbage collect or - # network drop. - acceptable-heartbeat-pause = 10 s - - - # How often to check for nodes marked as unreachable by the failure - # detector - unreachable-nodes-reaper-interval = 1s - - # After the heartbeat request has been sent the first failure detection - # will start after this period, even though no heartbeat mesage has - # been received. - expected-response-after = 1 s - - } - - # remote deployment configuration section - deployment { - # If true, will only allow specific classes to be instanciated on this system via remote deployment - enable-whitelist = off - - whitelist = [] - } - #//#shared - } - -} - -akka { - - remote { - #//#classic - - ### Configuration for classic remoting - - # Timeout after which the startup of the remoting subsystem is considered - # to be failed. Increase this value if your transport drivers (see the - # enabled-transports section) need longer time to be loaded. - startup-timeout = 10 s - - # Timout after which the graceful shutdown of the remoting subsystem is - # considered to be failed. After the timeout the remoting system is - # forcefully shut down. Increase this value if your transport drivers - # (see the enabled-transports section) need longer time to stop properly. - shutdown-timeout = 10 s - - # Before shutting down the drivers, the remoting subsystem attempts to flush - # all pending writes. This setting controls the maximum time the remoting is - # willing to wait before moving on to shut down the drivers. - flush-wait-on-shutdown = 2 s - - # Reuse inbound connections for outbound messages - use-passive-connections = on - - # Controls the backoff interval after a refused write is reattempted. - # (Transports may refuse writes if their internal buffer is full) - backoff-interval = 5 ms - - # Acknowledgment timeout of management commands sent to the transport stack. - command-ack-timeout = 30 s - - # The timeout for outbound associations to perform the handshake. - # If the transport is akka.remote.netty.tcp or akka.remote.netty.ssl - # the configured connection-timeout for the transport will be used instead. - handshake-timeout = 15 s - - ### Security settings - - # Enable untrusted mode for full security of server managed actors, prevents - # system messages to be send by clients, e.g. messages like 'Create', - # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc. - untrusted-mode = off - - # When 'untrusted-mode=on' inbound actor selections are by default discarded. - # Actors with paths defined in this white list are granted permission to receive actor - # selections messages. - # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"] - trusted-selection-paths = [] - - # Should the remote server require that its peers share the same - # secure-cookie (defined in the 'remote' section)? Secure cookies are passed - # between during the initial handshake. Connections are refused if the initial - # message contains a mismatching cookie or the cookie is missing. - require-cookie = off - - # Deprecated since 2.4-M1 - secure-cookie = "" - - ### Logging - - # If this is "on", Akka will log all inbound messages at DEBUG level, - # if off then they are not logged - log-received-messages = off - - # If this is "on", Akka will log all outbound messages at DEBUG level, - # if off then they are not logged - log-sent-messages = off - - # Sets the log granularity level at which Akka logs remoting events. This setting - # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility - # reasons the setting "on" will default to "debug" level. Please note that the effective - # logging level is still determined by the global logging level of the actor system: - # for example debug level remoting events will be only logged if the system - # is running with debug level logging. - # Failures to deserialize received messages also fall under this flag. - log-remote-lifecycle-events = on - - # Logging of message types with payload size in bytes larger than - # this value. Maximum detected size per message type is logged once, - # with an increase threshold of 10%. - # By default this feature is turned off. Activate it by setting the property to - # a value in bytes, such as 1000b. Note that for all messages larger than this - # limit there will be extra performance and scalability cost. - log-frame-size-exceeding = off - - # Log warning if the number of messages in the backoff buffer in the endpoint - # writer exceeds this limit. It can be disabled by setting the value to off. - log-buffer-size-exceeding = 50000 - - # After failed to establish an outbound connection, the remoting will mark the - # address as failed. This configuration option controls how much time should - # be elapsed before reattempting a new connection. While the address is - # gated, all messages sent to the address are delivered to dead-letters. - # Since this setting limits the rate of reconnects setting it to a - # very short interval (i.e. less than a second) may result in a storm of - # reconnect attempts. - retry-gate-closed-for = 5 s - - # After catastrophic communication failures that result in the loss of system - # messages or after the remote DeathWatch triggers the remote system gets - # quarantined to prevent inconsistent behavior. - # This setting controls how long the Quarantine marker will be kept around - # before being removed to avoid long-term memory leaks. - # WARNING: DO NOT change this to a small value to re-enable communication with - # quarantined nodes. Such feature is not supported and any behavior between - # the affected systems after lifting the quarantine is undefined. - prune-quarantine-marker-after = 5 d - - # If system messages have been exchanged between two systems (i.e. remote death - # watch or remote deployment has been used) a remote system will be marked as - # quarantined after the two system has no active association, and no - # communication happens during the time configured here. - # The only purpose of this setting is to avoid storing system message redelivery - # data (sequence number state, etc.) for an undefined amount of time leading to long - # term memory leak. Instead, if a system has been gone for this period, - # or more exactly - # - there is no association between the two systems (TCP connection, if TCP transport is used) - # - neither side has been attempting to communicate with the other - # - there are no pending system messages to deliver - # for the amount of time configured here, the remote system will be quarantined and all state - # associated with it will be dropped. - # - # Maximum value depends on the scheduler's max limit (default 248 days) and if configured - # to a longer duration this feature will effectively be disabled. Setting the value to - # 'off' will also disable the feature. Note that if disabled there is a risk of a long - # term memory leak. - quarantine-after-silence = 2 d - - # This setting defines the maximum number of unacknowledged system messages - # allowed for a remote system. If this limit is reached the remote system is - # declared to be dead and its UID marked as tainted. - system-message-buffer-size = 20000 - - # This setting defines the maximum idle time after an individual - # acknowledgement for system messages is sent. System message delivery - # is guaranteed by explicit acknowledgement messages. These acks are - # piggybacked on ordinary traffic messages. If no traffic is detected - # during the time period configured here, the remoting will send out - # an individual ack. - system-message-ack-piggyback-timeout = 0.3 s - - # This setting defines the time after internal management signals - # between actors (used for DeathWatch and supervision) that have not been - # explicitly acknowledged or negatively acknowledged are resent. - # Messages that were negatively acknowledged are always immediately - # resent. - resend-interval = 2 s - - # Maximum number of unacknowledged system messages that will be resent - # each 'resend-interval'. If you watch many (> 1000) remote actors you can - # increase this value to for example 600, but a too large limit (e.g. 10000) - # may flood the connection and might cause false failure detection to trigger. - # Test such a configuration by watching all actors at the same time and stop - # all watched actors at the same time. - resend-limit = 200 - - # WARNING: this setting should not be not changed unless all of its consequences - # are properly understood which assumes experience with remoting internals - # or expert advice. - # This setting defines the time after redelivery attempts of internal management - # signals are stopped to a remote system that has been not confirmed to be alive by - # this system before. - initial-system-message-delivery-timeout = 3 m - - ### Transports and adapters - - # List of the transport drivers that will be loaded by the remoting. - # A list of fully qualified config paths must be provided where - # the given configuration path contains a transport-class key - # pointing to an implementation class of the Transport interface. - # If multiple transports are provided, the address of the first - # one will be used as a default address. - enabled-transports = ["akka.remote.netty.tcp"] - - # Transport drivers can be augmented with adapters by adding their - # name to the applied-adapters setting in the configuration of a - # transport. The available adapters should be configured in this - # section by providing a name, and the fully qualified name of - # their corresponding implementation. The class given here - # must implement akka.akka.remote.transport.TransportAdapterProvider - # and have public constructor without parameters. - adapters { - gremlin = "akka.remote.transport.FailureInjectorProvider" - trttl = "akka.remote.transport.ThrottlerProvider" - } - - ### Default configuration for the Netty based transport drivers - - netty.tcp { - # The class given here must implement the akka.remote.transport.Transport - # interface and offer a public constructor which takes two arguments: - # 1) akka.actor.ExtendedActorSystem - # 2) com.typesafe.config.Config - transport-class = "akka.remote.transport.netty.NettyTransport" - - # Transport drivers can be augmented with adapters by adding their - # name to the applied-adapters list. The last adapter in the - # list is the adapter immediately above the driver, while - # the first one is the top of the stack below the standard - # Akka protocol - applied-adapters = [] - - transport-protocol = tcp - - # The default remote server port clients should connect to. - # Default is 2552 (AKKA), use 0 if you want a random available port - # This port needs to be unique for each actor system on the same machine. - port = 2552 - - # The hostname or ip clients should connect to. - # InetAddress.getLocalHost.getHostAddress is used if empty - hostname = "" - - # Use this setting to bind a network interface to a different port - # than remoting protocol expects messages at. This may be used - # when running akka nodes in a separated networks (under NATs or docker containers). - # Use 0 if you want a random available port. Examples: - # - # akka.remote.netty.tcp.port = 2552 - # akka.remote.netty.tcp.bind-port = 2553 - # Network interface will be bound to the 2553 port, but remoting protocol will - # expect messages sent to port 2552. - # - # akka.remote.netty.tcp.port = 0 - # akka.remote.netty.tcp.bind-port = 0 - # Network interface will be bound to a random port, and remoting protocol will - # expect messages sent to the bound port. - # - # akka.remote.netty.tcp.port = 2552 - # akka.remote.netty.tcp.bind-port = 0 - # Network interface will be bound to a random port, but remoting protocol will - # expect messages sent to port 2552. - # - # akka.remote.netty.tcp.port = 0 - # akka.remote.netty.tcp.bind-port = 2553 - # Network interface will be bound to the 2553 port, and remoting protocol will - # expect messages sent to the bound port. - # - # akka.remote.netty.tcp.port = 2552 - # akka.remote.netty.tcp.bind-port = "" - # Network interface will be bound to the 2552 port, and remoting protocol will - # expect messages sent to the bound port. - # - # akka.remote.netty.tcp.port if empty - bind-port = "" - - # Use this setting to bind a network interface to a different hostname or ip - # than remoting protocol expects messages at. - # Use "0.0.0.0" to bind to all interfaces. - # akka.remote.netty.tcp.hostname if empty - bind-hostname = "" - - # Enables SSL support on this transport - enable-ssl = false - - # Sets the connectTimeoutMillis of all outbound connections, - # i.e. how long a connect may take until it is timed out - connection-timeout = 15 s - - # If set to "<id.of.dispatcher>" then the specified dispatcher - # will be used to accept inbound connections, and perform IO. If "" then - # dedicated threads will be used. - # Please note that the Netty driver only uses this configuration and does - # not read the "akka.remote.use-dispatcher" entry. Instead it has to be - # configured manually to point to the same dispatcher if needed. - use-dispatcher-for-io = "" - - # Sets the high water mark for the in and outbound sockets, - # set to 0b for platform default - write-buffer-high-water-mark = 0b - - # Sets the low water mark for the in and outbound sockets, - # set to 0b for platform default - write-buffer-low-water-mark = 0b - - # Sets the send buffer size of the Sockets, - # set to 0b for platform default - send-buffer-size = 256000b - - # Sets the receive buffer size of the Sockets, - # set to 0b for platform default - receive-buffer-size = 256000b - - # Maximum message size the transport will accept, but at least - # 32000 bytes. - # Please note that UDP does not support arbitrary large datagrams, - # so this setting has to be chosen carefully when using UDP. - # Both send-buffer-size and receive-buffer-size settings has to - # be adjusted to be able to buffer messages of maximum size. - maximum-frame-size = 128000b - - # Sets the size of the connection backlog - backlog = 4096 - - # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm - tcp-nodelay = on - - # Enables TCP Keepalive, subject to the O/S kernel’s configuration - tcp-keepalive = on - - # Enables SO_REUSEADDR, which determines when an ActorSystem can open - # the specified listen port (the meaning differs between *nix and Windows) - # Valid values are "on", "off" and "off-for-windows" - # due to the following Windows bug: http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4476378 - # "off-for-windows" of course means that it's "on" for all other platforms - tcp-reuse-addr = off-for-windows - - # Used to configure the number of I/O worker threads on server sockets - server-socket-worker-pool { - # Min number of threads to cap factor-based number to - pool-size-min = 2 - - # The pool size factor is used to determine thread pool size - # using the following formula: ceil(available processors * factor). - # Resulting size is then bounded by the pool-size-min and - # pool-size-max values. - pool-size-factor = 1.0 - - # Max number of threads to cap factor-based number to - pool-size-max = 2 - } - - # Used to configure the number of I/O worker threads on client sockets - client-socket-worker-pool { - # Min number of threads to cap factor-based number to - pool-size-min = 2 - - # The pool size factor is used to determine thread pool size - # using the following formula: ceil(available processors * factor). - # Resulting size is then bounded by the pool-size-min and - # pool-size-max values. - pool-size-factor = 1.0 - - # Max number of threads to cap factor-based number to - pool-size-max = 2 - } - - - } - - # DEPRECATED, since 2.5.0 - # The netty.udp transport is deprecated, please use Artery instead. - # See: https://doc.akka.io/docs/akka/current/remoting-artery.html - netty.udp = ${akka.remote.netty.tcp} - netty.udp { - transport-protocol = udp - } - - netty.ssl = ${akka.remote.netty.tcp} - netty.ssl = { - # Enable SSL/TLS encryption. - # This must be enabled on both the client and server to work. - enable-ssl = true - - # Factory of SSLEngine. - # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public - # constructor with an ActorSystem parameter. - # The default ConfigSSLEngineProvider is configured by properties in section - # akka.remote.netty.ssl.security - # - # The SSLEngineProvider can also be defined via ActorSystemSetup with - # SSLEngineProviderSetup when starting the ActorSystem. That is useful when - # the SSLEngineProvider implementation requires other external constructor - # parameters or is created before the ActorSystem is created. - # If such SSLEngineProviderSetup is defined this config property is not used. - ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider - - security { - # This is the Java Key Store used by the server connection - key-store = "keystore" - - # This password is used for decrypting the key store - key-store-password = "changeme" - - # This password is used for decrypting the key - key-password = "changeme" - - # This is the Java Key Store used by the client connection - trust-store = "truststore" - - # This password is used for decrypting the trust store - trust-store-password = "changeme" - - # Protocol to use for SSL encryption, choose from: - # TLS 1.2 is available since JDK7, and default since JDK8: - # https://blogs.oracle.com/java-platform-group/entry/java_8_will_use_tls - protocol = "TLSv1.2" - - # Example: ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] - # You need to install the JCE Unlimited Strength Jurisdiction Policy - # Files to use AES 256. - # More info here: - # http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider - enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] - - # There are two options, and the default SecureRandom is recommended: - # "" or "SecureRandom" => (default) - # "SHA1PRNG" => Can be slow because of blocking issues on Linux - # - # Setting a value here may require you to supply the appropriate cipher - # suite (see enabled-algorithms section above) - random-number-generator = "" - - # Require mutual authentication between TLS peers - # - # Without mutual authentication only the peer that actively establishes a connection (TLS client side) - # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on, - # the passive side will also request and verify a certificate from the connecting peer. - # - # To prevent man-in-the-middle attacks this setting is enabled by default. - # - # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that - # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting - # connection will not send over certificates even if asked. - # - # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side) - # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of - # Akka < 2.4.12 can therefore work like this: - # - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off" - # - then switch this flag to "on" and do again a rolling upgrade of all nodes - # The first step ensures that all nodes will send over a certificate when asked to. The second - # step will ensure that all nodes finally enforce the secure checking of client certificates. - require-mutual-authentication = on - } - } - - ### Default configuration for the failure injector transport adapter - - gremlin { - # Enable debug logging of the failure injector transport adapter - debug = off - } - - ### Default dispatcher for the remoting subsystem - - default-remote-dispatcher { - type = Dispatcher - executor = "fork-join-executor" - fork-join-executor { - parallelism-min = 2 - parallelism-factor = 0.5 - parallelism-max = 16 - } - throughput = 10 - } - - backoff-remote-dispatcher { - type = Dispatcher - executor = "fork-join-executor" - fork-join-executor { - # Min number of threads to cap factor-based parallelism number to - parallelism-min = 2 - parallelism-max = 2 - } - } - } -} -#//#classic - -akka { - - remote { - #//#artery - - ### Configuration for Artery, the new implementation of remoting - artery { - - # Enable the new remoting with this flag - enabled = off - - # Select the underlying transport implementation. - # - # Possible values: aeron-udp, tcp, tls-tcp - # - # The Aeron (UDP) transport is a high performance transport and should be used for systems - # that require high throughput and low latency. It is using more CPU than TCP when the - # system is idle or at low message rates. There is no encryption for Aeron. - # https://github.com/real-logic/aeron - # - # The TCP and TLS transport is implemented using Akka Streams TCP/TLS. This is the choice - # when encryption is needed, but it can also be used with plain TCP without TLS. It's also - # the obvious choice when UDP can't be used. - # It has very good performance (high throughput and low latency) but latency at high throughput - # might not be as good as the Aeron transport. - # It is using less CPU than Aeron when the system is idle or at low message rates. - transport = aeron-udp - - # Canonical address is the address other clients should connect to. - # Artery transport will expect messages to this address. - canonical { - - # The default remote server port clients should connect to. - # Default is 25520, use 0 if you want a random available port - # This port needs to be unique for each actor system on the same machine. - port = 25520 - - # Hostname clients should connect to. Can be set to an ip, hostname - # or one of the following special values: - # "<getHostAddress>" InetAddress.getLocalHost.getHostAddress - # "<getHostName>" InetAddress.getLocalHost.getHostName - # - hostname = "<getHostAddress>" - } - - # Use these settings to bind a network interface to a different address - # than artery expects messages at. This may be used when running Akka - # nodes in a separated networks (under NATs or in containers). If canonical - # and bind addresses are different, then network configuration that relays - # communications from canonical to bind addresses is expected. - bind { - - # Port to bind a network interface to. Can be set to a port number - # of one of the following special values: - # 0 random available port - # "" akka.remote.artery.canonical.port - # - port = "" - - # Hostname to bind a network interface to. Can be set to an ip, hostname - # or one of the following special values: - # "0.0.0.0" all interfaces - # "" akka.remote.artery.canonical.hostname - # "<getHostAddress>" InetAddress.getLocalHost.getHostAddress - # "<getHostName>" InetAddress.getLocalHost.getHostName - # - hostname = "" - - # Time to wait for Aeron/TCP to bind - bind-timeout = 3s - } - - # Periodically log out all Aeron counters. See https://github.com/real-logic/aeron/wiki/Monitoring-and-Debugging#counters - # Only used when transport is aeron-udp. - log-aeron-counters = false - - # Actor paths to use the large message stream for when a message - # is sent to them over remoting. The large message stream dedicated - # is separate from "normal" and system messages so that sending a - # large message does not interfere with them. - # Entries should be the full path to the actor. Wildcards in the form of "*" - # can be supplied at any place and matches any name at that segment - - # "/user/supervisor/actor/*" will match any direct child to actor, - # while "/supervisor/*/child" will match any grandchild to "supervisor" that - # has the name "child" - # Entries have to be specified on both the sending and receiving side. - # Messages sent to ActorSelections will not be passed through the large message - # stream, to pass such messages through the large message stream the selections - # but must be resolved to ActorRefs first. - large-message-destinations = [] - - # Enable untrusted mode, which discards inbound system messages, PossiblyHarmful and - # ActorSelection messages. E.g. remote watch and remote deployment will not work. - # ActorSelection messages can be enabled for specific paths with the trusted-selection-paths - untrusted-mode = off - - # When 'untrusted-mode=on' inbound actor selections are by default discarded. - # Actors with paths defined in this white list are granted permission to receive actor - # selections messages. - # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"] - trusted-selection-paths = [] - - # If this is "on", all inbound remote messages will be logged at DEBUG level, - # if off then they are not logged - log-received-messages = off - - # If this is "on", all outbound remote messages will be logged at DEBUG level, - # if off then they are not logged - log-sent-messages = off - - advanced { - - # Maximum serialized message size, including header data. - maximum-frame-size = 256 KiB - - # Direct byte buffers are reused in a pool with this maximum size. - # Each buffer has the size of 'maximum-frame-size'. - # This is not a hard upper limit on number of created buffers. Additional - # buffers will be created if needed, e.g. when using many outbound - # associations at the same time. Such additional buffers will be garbage - # collected, which is not as efficient as reusing buffers in the pool. - buffer-pool-size = 128 - - # Maximum serialized message size for the large messages, including header data. - # It is currently restricted to 1/8th the size of a term buffer that can be - # configured by setting the 'aeron.term.buffer.length' system property. - # See 'large-message-destinations'. - maximum-large-frame-size = 2 MiB - - # Direct byte buffers for the large messages are reused in a pool with this maximum size. - # Each buffer has the size of 'maximum-large-frame-size'. - # See 'large-message-destinations'. - # This is not a hard upper limit on number of created buffers. Additional - # buffers will be created if needed, e.g. when using many outbound - # associations at the same time. Such additional buffers will be garbage - # collected, which is not as efficient as reusing buffers in the pool. - large-buffer-pool-size = 32 - - # For enabling testing features, such as blackhole in akka-remote-testkit. - test-mode = off - - # Settings for the materializer that is used for the remote streams. - materializer = ${akka.stream.materializer} - - # If set to a nonempty string artery will use the given dispatcher for - # the ordinary and large message streams, otherwise the default dispatcher is used. - use-dispatcher = "akka.remote.default-remote-dispatcher" - - # If set to a nonempty string remoting will use the given dispatcher for - # the control stream, otherwise the default dispatcher is used. - # It can be good to not use the same dispatcher for the control stream as - # the dispatcher for the ordinary message stream so that heartbeat messages - # are not disturbed. - use-control-stream-dispatcher = "" - - # Controls whether to start the Aeron media driver in the same JVM or use external - # process. Set to 'off' when using external media driver, and then also set the - # 'aeron-dir'. - # Only used when transport is aeron-udp. - embedded-media-driver = on - - # Directory used by the Aeron media driver. It's mandatory to define the 'aeron-dir' - # if using external media driver, i.e. when 'embedded-media-driver = off'. - # Embedded media driver will use a this directory, or a temporary directory if this - # property is not defined (empty). - # Only used when transport is aeron-udp. - aeron-dir = "" - - # Whether to delete aeron embedded driver directory upon driver stop. - # Only used when transport is aeron-udp. - delete-aeron-dir = yes - - # Level of CPU time used, on a scale between 1 and 10, during backoff/idle. - # The tradeoff is that to have low latency more CPU time must be used to be - # able to react quickly on incoming messages or send as fast as possible after - # backoff backpressure. - # Level 1 strongly prefer low CPU consumption over low latency. - # Level 10 strongly prefer low latency over low CPU consumption. - # Only used when transport is aeron-udp. - idle-cpu-level = 5 - - # Total number of inbound lanes, shared among all inbound associations. A value - # greater than 1 means that deserialization can be performed in parallel for - # different destination actors. The selection of lane is based on consistent - # hashing of the recipient ActorRef to preserve message ordering per receiver. - # Lowest latency can be achieved with inbound-lanes=1 because of one less - # asynchronous boundary. - inbound-lanes = 4 - - # Number of outbound lanes for each outbound association. A value greater than 1 - # means that serialization and other work can be performed in parallel for different - # destination actors. The selection of lane is based on consistent hashing of the - # recipient ActorRef to preserve message ordering per receiver. Note that messages - # for different destination systems (hosts) are handled by different streams also - # when outbound-lanes=1. Lowest latency can be achieved with outbound-lanes=1 - # because of one less asynchronous boundary. - outbound-lanes = 1 - - # Size of the send queue for outgoing messages. Messages will be dropped if - # the queue becomes full. This may happen if you send a burst of many messages - # without end-to-end flow control. Note that there is one such queue per - # outbound association. The trade-off of using a larger queue size is that - # it consumes more memory, since the queue is based on preallocated array with - # fixed size. - outbound-message-queue-size = 3072 - - # Size of the send queue for outgoing control messages, such as system messages. - # If this limit is reached the remote system is declared to be dead and its UID - # marked as quarantined. Note that there is one such queue per outbound association. - # It is a linked queue so it will not use more memory than needed but by increasing - # too much you may risk OutOfMemoryError in the worst case. - outbound-control-queue-size = 20000 - - # Size of the send queue for outgoing large messages. Messages will be dropped if - # the queue becomes full. This may happen if you send a burst of many messages - # without end-to-end flow control. Note that there is one such queue per - # outbound association. - # It is a linked queue so it will not use more memory than needed but by increasing - # too much you may risk OutOfMemoryError, especially since the message payload - # of these messages may be large. - outbound-large-message-queue-size = 256 - - # This setting defines the maximum number of unacknowledged system messages - # allowed for a remote system. If this limit is reached the remote system is - # declared to be dead and its UID marked as quarantined. - system-message-buffer-size = 20000 - - # unacknowledged system messages are re-delivered with this interval - system-message-resend-interval = 1 second - - # Timeout of establishing outbound connections. - # Only used when transport is tcp or tls-tcp. - connection-timeout = 5 seconds - - # The timeout for outbound associations to perform the initial handshake. - # This timeout must be greater than the 'image-liveness-timeout' when - # transport is aeron-udp. - handshake-timeout = 20 seconds - - # incomplete initial handshake attempt is retried with this interval - handshake-retry-interval = 1 second - - # Handshake requests are performed periodically with this interval, - # also after the handshake has been completed to be able to establish - # a new session with a restarted destination system. - inject-handshake-interval = 1 second - - # messages that are not accepted by Aeron are dropped after retrying for this period - # Only used when transport is aeron-udp. - give-up-message-after = 60 seconds - - # System messages that are not acknowledged after re-sending for this period are - # dropped and will trigger quarantine. The value should be longer than the length - # of a network partition that you need to survive. - give-up-system-message-after = 6 hours - - # Outbound streams are stopped when they haven't been used for this duration. - # They are started again when new messages are sent. - stop-idle-outbound-after = 5 minutes - - # Outbound streams are quarantined when they haven't been used for this duration - # to cleanup resources used by the association, such as compression tables. - # This will cleanup association to crashed systems that didn't announce their - # termination. - # The value should be longer than the length of a network partition that you - # need to survive. - # The value must also be greater than stop-idle-outbound-after. - # Once every 1/10 of this duration an extra handshake message will be sent. - # Therfore it's also recommended to use a value that is greater than 10 times - # the stop-idle-outbound-after, since otherwise the idle streams will not be - # stopped. - quarantine-idle-outbound-after = 6 hours - - # Stop outbound stream of a quarantined association after this idle timeout, i.e. - # when not used any more. - stop-quarantined-after-idle = 3 seconds - - # After catastrophic communication failures that could result in the loss of system - # messages or after the remote DeathWatch triggers the remote system gets - # quarantined to prevent inconsistent behavior. - # This setting controls how long the quarantined association will be kept around - # before being removed to avoid long-term memory leaks. It must be quarantined - # and also unused for this duration before it's removed. When removed the historical - # information about which UIDs that were quarantined for that hostname:port is - # gone which could result in communication with a previously quarantined node - # if it wakes up again. Therfore this shouldn't be set too low. - remove-quarantined-association-after = 1 h - - # during ActorSystem termination the remoting will wait this long for - # an acknowledgment by the destination system that flushing of outstanding - # remote messages has been completed - shutdown-flush-timeout = 1 second - - # See 'inbound-max-restarts' - inbound-restart-timeout = 5 seconds - - # Max number of restarts within 'inbound-restart-timeout' for the inbound streams. - # If more restarts occurs the ActorSystem will be terminated. - inbound-max-restarts = 5 - - # Retry outbound connection after this backoff. - # Only used when transport is tcp or tls-tcp. - outbound-restart-backoff = 1 second - - # See 'outbound-max-restarts' - outbound-restart-timeout = 5 seconds - - # Max number of restarts within 'outbound-restart-timeout' for the outbound streams. - # If more restarts occurs the ActorSystem will be terminated. - outbound-max-restarts = 5 - - # Timeout after which aeron driver has not had keepalive messages - # from a client before it considers the client dead. - # Only used when transport is aeron-udp. - client-liveness-timeout = 20 seconds - - # Timeout for each the INACTIVE and LINGER stages an aeron image - # will be retained for when it is no longer referenced. - # This timeout must be less than the 'handshake-timeout'. - # Only used when transport is aeron-udp. - image-liveness-timeout = 10 seconds - - # Timeout after which the aeron driver is considered dead - # if it does not update its C'n'C timestamp. - # Only used when transport is aeron-udp. - driver-timeout = 20 seconds - - flight-recorder { - // FIXME it should be enabled by default when we have a good solution for naming the files - enabled = off - # Controls where the flight recorder file will be written. There are three options: - # 1. Empty: a file will be generated in the temporary directory of the OS - # 2. A relative or absolute path ending with ".afr": this file will be used - # 3. A relative or absolute path: this directory will be used, the file will get a random file name - destination = "" - } - - # compression of common strings in remoting messages, like actor destinations, serializers etc - compression { - - actor-refs { - # Max number of compressed actor-refs - # Note that compression tables are "rolling" (i.e. a new table replaces the old - # compression table once in a while), and this setting is only about the total number - # of compressions within a single such table. - # Must be a positive natural number. - max = 256 - - # interval between new table compression advertisements. - # this means the time during which we collect heavy-hitter data and then turn it into a compression table. - advertisement-interval = 1 minute - } - manifests { - # Max number of compressed manifests - # Note that compression tables are "rolling" (i.e. a new table replaces the old - # compression table once in a while), and this setting is only about the total number - # of compressions within a single such table. - # Must be a positive natural number. - max = 256 - - # interval between new table compression advertisements. - # this means the time during which we collect heavy-hitter data and then turn it into a compression table. - advertisement-interval = 1 minute - } - } - - # List of fully qualified class names of remote instruments which should - # be initialized and used for monitoring of remote messages. - # The class must extend akka.remote.artery.RemoteInstrument and - # have a public constructor with empty parameters or one ExtendedActorSystem - # parameter. - # A new instance of RemoteInstrument will be created for each encoder and decoder. - # It's only called from the stage, so if it dosn't delegate to any shared instance - # it doesn't have to be thread-safe. - # Refer to `akka.remote.artery.RemoteInstrument` for more information. - instruments = ${?akka.remote.artery.advanced.instruments} [] - - } - - # SSL configuration that is used when transport=tls-tcp. - ssl { - # Factory of SSLEngine. - # Must implement akka.remote.artery.tcp.SSLEngineProvider and have a public - # constructor with an ActorSystem parameter. - # The default ConfigSSLEngineProvider is configured by properties in section - # akka.remote.artery.ssl.config-ssl-engine - ssl-engine-provider = akka.remote.artery.tcp.ConfigSSLEngineProvider - - # Config of akka.remote.artery.tcp.ConfigSSLEngineProvider - config-ssl-engine { - - # This is the Java Key Store used by the server connection - key-store = "keystore" - - # This password is used for decrypting the key store - # Use substitution from environment variables for passwords. Don't define - # real passwords in config files. key-store-password=${SSL_KEY_STORE_PASSWORD} - key-store-password = "changeme" - - # This password is used for decrypting the key - # Use substitution from environment variables for passwords. Don't define - # real passwords in config files. key-password=${SSL_KEY_PASSWORD} - key-password = "changeme" - - # This is the Java Key Store used by the client connection - trust-store = "truststore" - - # This password is used for decrypting the trust store - # Use substitution from environment variables for passwords. Don't define - # real passwords in config files. trust-store-password=${SSL_TRUST_STORE_PASSWORD} - trust-store-password = "changeme" - - # Protocol to use for SSL encryption, choose from: - # TLS 1.2 is available since JDK7, and default since JDK8: - # https://blogs.oracle.com/java-platform-group/entry/java_8_will_use_tls - protocol = "TLSv1.2" - - # Example: ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] - # You need to install the JCE Unlimited Strength Jurisdiction Policy - # Files to use AES 256. - # More info here: - # http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider - enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] - - # There are two options, and the default SecureRandom is recommended: - # "" or "SecureRandom" => (default) - # "SHA1PRNG" => Can be slow because of blocking issues on Linux - # - # Setting a value here may require you to supply the appropriate cipher - # suite (see enabled-algorithms section above) - random-number-generator = "" - - # Require mutual authentication between TLS peers - # - # Without mutual authentication only the peer that actively establishes a connection (TLS client side) - # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on, - # the passive side will also request and verify a certificate from the connecting peer. - # - # To prevent man-in-the-middle attacks this setting is enabled by default. - require-mutual-authentication = on - - # Set this to `on` to verify hostnames with sun.security.util.HostnameChecker - hostname-verification = off - } - - } - } - } - -} -#//#artery -##################################### -# Akka Stream Reference Config File # -##################################### - -akka { - stream { - - # Default materializer settings - materializer { - - # Initial size of buffers used in stream elements - initial-input-buffer-size = 4 - # Maximum size of buffers used in stream elements - max-input-buffer-size = 16 - - # Fully qualified config path which holds the dispatcher configuration - # to be used by ActorMaterializer when creating Actors. - # When this value is left empty, the default-dispatcher will be used. - dispatcher = "" - - blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher" - - # Cleanup leaked publishers and subscribers when they are not used within a given - # deadline - subscription-timeout { - # when the subscription timeout is reached one of the following strategies on - # the "stale" publisher: - # cancel - cancel it (via `onError` or subscribing to the publisher and - # `cancel()`ing the subscription right away - # warn - log a warning statement about the stale element (then drop the - # reference to it) - # noop - do nothing (not recommended) - mode = cancel - - # time after which a subscriber / publisher is considered stale and eligible - # for cancelation (see `akka.stream.subscription-timeout.mode`) - timeout = 5s - } - - # Enable additional troubleshooting logging at DEBUG log level - debug-logging = off - - # Maximum number of elements emitted in batch if downstream signals large demand - output-burst-limit = 1000 - - # Enable automatic fusing of all graphs that are run. For short-lived streams - # this may cause an initial runtime overhead, but most of the time fusing is - # desirable since it reduces the number of Actors that are created. - # Deprecated, since Akka 2.5.0, setting does not have any effect. - auto-fusing = on - - # Those stream elements which have explicit buffers (like mapAsync, mapAsyncUnordered, - # buffer, flatMapMerge, Source.actorRef, Source.queue, etc.) will preallocate a fixed - # buffer upon stream materialization if the requested buffer size is less than this - # configuration parameter. The default is very high because failing early is better - # than failing under load. - # - # Buffers sized larger than this will dynamically grow/shrink and consume more memory - # per element than the fixed size buffers. - max-fixed-buffer-size = 1000000000 - - # Maximum number of sync messages that actor can process for stream to substream communication. - # Parameter allows to interrupt synchronous processing to get upstream/downstream messages. - # Allows to accelerate message processing that happening within same actor but keep system responsive. - sync-processing-limit = 1000 - - debug { - # Enables the fuzzing mode which increases the chance of race conditions - # by aggressively reordering events and making certain operations more - # concurrent than usual. - # This setting is for testing purposes, NEVER enable this in a production - # environment! - # To get the best results, try combining this setting with a throughput - # of 1 on the corresponding dispatchers. - fuzzing-mode = off - } - - io.tcp { - # The outgoing bytes are accumulated in a buffer while waiting for acknoledgment - # of pending write. This improves throughput for small messages (frames) without - # sacrificing latency. While waiting for the ack the stage will eagerly pull - # from upstream until the buffer exceeds this size. That means that the buffer may hold - # slightly more bytes than this limit (at most one element more). It can be set to 0 - # to disable the usage of the buffer. - write-buffer-size = 16 KiB - } - - //#stream-ref - # configure defaults for SourceRef and SinkRef - stream-ref { - # Buffer of a SinkRef that is used to batch Request elements from the other side of the stream ref - # - # The buffer will be attempted to be filled eagerly even while the local stage did not request elements, - # because the delay of requesting over network boundaries is much higher. - buffer-capacity = 32 - - # Demand is signalled by sending a cumulative demand message ("requesting messages until the n-th sequence number) - # Using a cumulative demand model allows us to re-deliver the demand message in case of message loss (which should - # be very rare in any case, yet possible -- mostly under connection break-down and re-establishment). - # - # The semantics of handling and updating the demand however are in-line with what Reactive Streams dictates. - # - # In normal operation, demand is signalled in response to arriving elements, however if no new elements arrive - # within `demand-redelivery-interval` a re-delivery of the demand will be triggered, assuming that it may have gotten lost. - demand-redelivery-interval = 1 second - - # Subscription timeout, during which the "remote side" MUST subscribe (materialize) the handed out stream ref. - # This timeout does not have to be very low in normal situations, since the remote side may also need to - # prepare things before it is ready to materialize the reference. However the timeout is needed to avoid leaking - # in-active streams which are never subscribed to. - subscription-timeout = 30 seconds - - # In order to guard the receiving end of a stream ref from never terminating (since awaiting a Completion or Failed - # message) after / before a Terminated is seen, a special timeout is applied once Terminated is received by it. - # This allows us to terminate stream refs that have been targeted to other nodes which are Downed, and as such the - # other side of the stream ref would never send the "final" terminal message. - # - # The timeout specifically means the time between the Terminated signal being received and when the local SourceRef - # determines to fail itself, assuming there was message loss or a complete partition of the completion signal. - final-termination-signal-deadline = 2 seconds - } - //#stream-ref - } - - # Deprecated, use akka.stream.materializer.blocking-io-dispatcher, this setting - # was never applied because of bug #24357 - # It must still have a valid value because used from Akka HTTP. - blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher" - - default-blocking-io-dispatcher { - type = "Dispatcher" - executor = "thread-pool-executor" - throughput = 1 - - thread-pool-executor { - fixed-pool-size = 16 - } - } - - } - - # configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections) - ssl-config { - protocol = "TLSv1.2" - } - - actor { - - serializers { - akka-stream-ref = "akka.stream.serialization.StreamRefSerializer" - } - - serialization-bindings { - "akka.stream.SinkRef" = akka-stream-ref - "akka.stream.SourceRef" = akka-stream-ref - "akka.stream.impl.streamref.StreamRefsProtocol" = akka-stream-ref - } - - serialization-identifiers { - "akka.stream.serialization.StreamRefSerializer" = 30 - } - } -} - -# ssl configuration -# folded in from former ssl-config-akka module -ssl-config { - logger = "com.typesafe.sslconfig.akka.util.AkkaLoggerBridge" -} - -############################################ -# Akka Cluster Tools Reference Config File # -############################################ - -# This is the reference config file that contains all the default settings. -# Make your edits/overrides in your application.conf. - -# //#pub-sub-ext-config -# Settings for the DistributedPubSub extension -akka.cluster.pub-sub { - # Actor name of the mediator actor, /system/distributedPubSubMediator - name = distributedPubSubMediator - - # Start the mediator on members tagged with this role. - # All members are used if undefined or empty. - role = "" - - # The routing logic to use for 'Send' - # Possible values: random, round-robin, broadcast - routing-logic = random - - # How often the DistributedPubSubMediator should send out gossip information - gossip-interval = 1s - - # Removed entries are pruned after this duration - removed-time-to-live = 120s - - # Maximum number of elements to transfer in one message when synchronizing the registries. - # Next chunk will be transferred in next round of gossip. - max-delta-elements = 3000 - - # When a message is published to a topic with no subscribers send it to the dead letters. - send-to-dead-letters-when-no-subscribers = on - - # The id of the dispatcher to use for DistributedPubSubMediator actors. - # If not specified default dispatcher is used. - # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "" -} -# //#pub-sub-ext-config - -# Protobuf serializer for cluster DistributedPubSubMeditor messages -akka.actor { - serializers { - akka-pubsub = "akka.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" - } - serialization-bindings { - "akka.cluster.pubsub.DistributedPubSubMessage" = akka-pubsub - } - serialization-identifiers { - "akka.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" = 9 - } - # adds the protobuf serialization of pub sub messages to groups - additional-serialization-bindings { - "akka.cluster.pubsub.DistributedPubSubMediator$Internal$SendToOneSubscriber" = akka-pubsub - } -} - - -# //#receptionist-ext-config -# Settings for the ClusterClientReceptionist extension -akka.cluster.client.receptionist { - # Actor name of the ClusterReceptionist actor, /system/receptionist - name = receptionist - - # Start the receptionist on members tagged with this role. - # All members are used if undefined or empty. - role = "" - - # The receptionist will send this number of contact points to the client - number-of-contacts = 3 - - # The actor that tunnel response messages to the client will be stopped - # after this time of inactivity. - response-tunnel-receive-timeout = 30s - - # The id of the dispatcher to use for ClusterReceptionist actors. - # If not specified default dispatcher is used. - # If specified you need to define the settings of the actual dispatcher. - use-dispatcher = "" - - # How often failure detection heartbeat messages should be received for - # each ClusterClient - heartbeat-interval = 2s - - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # The ClusterReceptionist is using the akka.remote.DeadlineFailureDetector, which - # will trigger if there are no heartbeats within the duration - # heartbeat-interval + acceptable-heartbeat-pause, i.e. 15 seconds with - # the default settings. - acceptable-heartbeat-pause = 13s - - # Failure detection checking interval for checking all ClusterClients - failure-detection-interval = 2s -} -# //#receptionist-ext-config - -# //#cluster-client-config -# Settings for the ClusterClient -akka.cluster.client { - # Actor paths of the ClusterReceptionist actors on the servers (cluster nodes) - # that the client will try to contact initially. It is mandatory to specify - # at least one initial contact. - # Comma separated full actor paths defined by a string on the form of - # "akka.tcp://system@hostname:port/system/receptionist" - initial-contacts = [] - - # Interval at which the client retries to establish contact with one of - # ClusterReceptionist on the servers (cluster nodes) - establishing-get-contacts-interval = 3s - - # Interval at which the client will ask the ClusterReceptionist for - # new contact points to be used for next reconnect. - refresh-contacts-interval = 60s - - # How often failure detection heartbeat messages should be sent - heartbeat-interval = 2s - - # Number of potentially lost/delayed heartbeats that will be - # accepted before considering it to be an anomaly. - # The ClusterClient is using the akka.remote.DeadlineFailureDetector, which - # will trigger if there are no heartbeats within the duration - # heartbeat-interval + acceptable-heartbeat-pause, i.e. 15 seconds with - # the default settings. - acceptable-heartbeat-pause = 13s - - # If connection to the receptionist is not established the client will buffer - # this number of messages and deliver them the connection is established. - # When the buffer is full old messages will be dropped when new messages are sent - # via the client. Use 0 to disable buffering, i.e. messages will be dropped - # immediately if the location of the singleton is unknown. - # Maximum allowed buffer size is 10000. - buffer-size = 1000 - - # If connection to the receiptionist is lost and the client has not been - # able to acquire a new connection for this long the client will stop itself. - # This duration makes it possible to watch the cluster client and react on a more permanent - # loss of connection with the cluster, for example by accessing some kind of - # service registry for an updated set of initial contacts to start a new cluster client with. - # If this is not wanted it can be set to "off" to disable the timeout and retry - # forever. - reconnect-timeout = off -} -# //#cluster-client-config - -# Protobuf serializer for ClusterClient messages -akka.actor { - serializers { - akka-cluster-client = "akka.cluster.client.protobuf.ClusterClientMessageSerializer" - } - serialization-bindings { - "akka.cluster.client.ClusterClientMessage" = akka-cluster-client - } - serialization-identifiers { - "akka.cluster.client.protobuf.ClusterClientMessageSerializer" = 15 - } -} - -# //#singleton-config -akka.cluster.singleton { - # The actor name of the child singleton actor. - singleton-name = "singleton" - - # Singleton among the nodes tagged with specified role. - # If the role is not specified it's a singleton among all nodes in the cluster. - role = "" - - # When a node is becoming oldest it sends hand-over request to previous oldest, - # that might be leaving the cluster. This is retried with this interval until - # the previous oldest confirms that the hand over has started or the previous - # oldest member is removed from the cluster (+ akka.cluster.down-removal-margin). - hand-over-retry-interval = 1s - - # The number of retries are derived from hand-over-retry-interval and - # akka.cluster.down-removal-margin (or ClusterSingletonManagerSettings.removalMargin), - # but it will never be less than this property. - # After the hand over retries and it's still not able to exchange the hand over messages - # with the previous oldest it will restart itself by throwing ClusterSingletonManagerIsStuck, - # to start from a clean state. After that it will still not start the singleton instance - # until the previous oldest node has been removed from the cluster. - # On the other side, on the previous oldest node, the same number of retries - 3 are used - # and after that the singleton instance is stopped. - # For large clusters it might be necessary to increase this to avoid too early timeouts while - # gossip dissemination of the Leaving to Exiting phase occurs. For normal leaving scenarios - # it will not be a quicker hand over by reducing this value, but in extreme failure scenarios - # the recovery might be faster. - min-number-of-hand-over-retries = 15 - - # Config path of the lease to be taken before creating the singleton actor - # if the lease is lost then the actor is restarted and it will need to re-acquire the lease - # the default is no lease - use-lease = "" - - # The interval between retries for acquiring the lease - lease-retry-interval = 5s -} -# //#singleton-config - -# //#singleton-proxy-config -akka.cluster.singleton-proxy { - # The actor name of the singleton actor that is started by the ClusterSingletonManager - singleton-name = ${akka.cluster.singleton.singleton-name} - - # The role of the cluster nodes where the singleton can be deployed. - # If the role is not specified then any node will do. - role = "" - - # Interval at which the proxy will try to resolve the singleton instance. - singleton-identification-interval = 1s - - # If the location of the singleton is unknown the proxy will buffer this - # number of messages and deliver them when the singleton is identified. - # When the buffer is full old messages will be dropped when new messages are - # sent via the proxy. - # Use 0 to disable buffering, i.e. messages will be dropped immediately if - # the location of the singleton is unknown. - # Maximum allowed buffer size is 10000. - buffer-size = 1000 -} -# //#singleton-proxy-config - -# Serializer for cluster ClusterSingleton messages -akka.actor { - serializers { - akka-singleton = "akka.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" - } - serialization-bindings { - "akka.cluster.singleton.ClusterSingletonMessage" = akka-singleton - } - serialization-identifiers { - "akka.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" = 14 - } -} -# Copyright (C) 2015 - 2018 Lightbend Inc. <https://www.lightbend.com> - -# ssl configuration -ssl-config { - - logger = "com.typesafe.sslconfig.util.NoopLogger" - - # Whether we should use the default JVM SSL configuration or not - # When false additional configuration will be applied on the context (as configured in ssl-config). - default = false - - # The ssl protocol to use - protocol = "TLSv1.2" - - # Whether revocation lists should be checked, if null, defaults to platform default setting. - checkRevocation = null - - # A sequence of URLs for obtaining revocation lists - revocationLists = [] - - # The enabled cipher suites. If empty, uses the platform default. - enabledCipherSuites = [] - - # The enabled protocols. If empty, uses the platform default. - enabledProtocols = ["TLSv1.2", "TLSv1.1", "TLSv1"] - - # The disabled signature algorithms - disabledSignatureAlgorithms = ["MD2", "MD4", "MD5"] - - # The disabled key algorithms - disabledKeyAlgorithms = ["RSA keySize < 2048", "DSA keySize < 2048", "EC keySize < 224"] - - # The debug configuration - debug = [] - - # The hostname verifier class. - # If non null, should be the fully qualify classname of a class that imlpements HostnameVerifier, - # otherwise the default will be used - hostnameVerifierClass = null - - sslParameters { - # translates to a setNeedClientAuth / setWantClientAuth calls - # "default" – leaves the (which for JDK8 means wantClientAuth and needClientAuth are set to false.) - # "none" – `setNeedClientAuth(false)` - # "want" – `setWantClientAuth(true)` - # "need" – `setNeedClientAuth(true)` - clientAuth = "default" - - # protocols (names) - protocols = [] - } - - # Configuration for the key manager - keyManager { - # The key manager algorithm. If empty, uses the platform default. - algorithm = null - - # The key stores - stores = [ - ] - # The key stores should look like this - prototype.stores { - # The store type. If null, defaults to the platform default store type, ie JKS. - type = null - - # The path to the keystore file. Either this must be non null, or data must be non null. - path = null - - # The data for the keystore. Either this must be non null, or path must be non null. - data = null - - # The password for loading the keystore. If null, uses no password. - # It's recommended to load password using environment variable - password = null - } - } - - trustManager { - # The trust manager algorithm. If empty, uses the platform default. - algorithm = null - - # The trust stores - stores = [ - ] - # The key stores should look like this - prototype.stores { - # The store type. If null, defaults to the platform default store type, ie JKS. - type = null - - # The path to the keystore file. Either this must be non null, or data must be non null. - path = null - - # The data for the keystore. Either this must be non null, or path must be non null. - data = null - - # The password for loading the truststore. If null, uses no password. - # It's recommended to load password using environment variable - password = null - } - - } - - # The loose ssl options. These allow configuring ssl to be more loose about what it accepts, - # at the cost of introducing potential security issues. - loose { - - # Whether weak protocols should be allowed - allowWeakProtocols = false - - # Whether weak ciphers should be allowed - allowWeakCiphers = false - - # If non null, overrides the platform default for whether legacy hello messsages should be allowed. - allowLegacyHelloMessages = null - - # If non null, overrides the platform defalut for whether unsafe renegotiation should be allowed. - allowUnsafeRenegotiation = null - - # Whether hostname verification should be disabled - disableHostnameVerification = false - - # Whether the SNI (Server Name Indication) TLS extension should be disabled - # This setting MAY be respected by client libraries. - # - # https://tools.ietf.org/html/rfc3546#sectiom-3.1 - disableSNI = false - - # Whether any certificate should be accepted or not - acceptAnyCertificate = false - } - - # Debug configuration - debug { - - # Turn on all debugging - all = false - - # Turn on ssl debugging - ssl = false - - # Turn certpath debugging on - certpath = false - - # Turn ocsp debugging on - ocsp = false - - # Enable per-record tracing - record = false - - # hex dump of record plaintext, requires record to be true - plaintext = false - - # print raw SSL/TLS packets, requires record to be true - packet = false - - # Print each handshake message - handshake = false - - # Print hex dump of each handshake message, requires handshake to be true - data = false - - # Enable verbose handshake message printing, requires handshake to be true - verbose = false - - # Print key generation data - keygen = false - - # Print session activity - session = false - - # Print default SSL initialization - defaultctx = false - - # Print SSLContext tracing - sslctx = false - - # Print session cache tracing - sessioncache = false - - # Print key manager tracing - keymanager = false - - # Print trust manager tracing - trustmanager = false - - # Turn pluggability debugging on - pluggability = false - - } -} - -# ========================================= # -# Shared Woken Reference Configuration # -# ========================================= # - -app { - # Name of the application - name = "" - name = ${?APP_NAME} - # Type of the application - type = "Scala" - type = ${?APP_TYPE} - # Version of the application - version = "" - version = ${?VERSION} - # Date when this application was built - buildDate = "" - buildDate = ${?BUILD_DATE} -} - -datacenter { - # Location of the datacenter - location = "dev" - location = ${?DATA_CENTER_LOCATION} - host = "" - host = ${?HOST} - host = ${?DATA_CENTER_SERVER} - # Container orchestration - containerOrchestration = "mesos" - containerOrchestration = ${?CONTAINER_ORCHESTRATION} - # Mesos properties - mesos { - containerName = "" - containerName = ${?MESOS_CONTAINER_NAME} - dockerImage = "" - dockerImage = ${?MARATHON_APP_DOCKER_IMAGE} - resourceCpu = "" - resourceCpu = ${?MARATHON_APP_RESOURCE_CPUS} - resourceMem = "" - resourceMem = ${?MARATHON_APP_RESOURCE_MEM} - labels = "" - labels = ${?MARATHON_APP_LABELS} - } -} - -bugsnag { - apiKey = "" - apiKey = ${?BUGSNAG_KEY} - # Release stage used when reporting errors. Values are dev, staging, production - releaseStage = "dev" - releaseStage = ${?RELEASE_STAGE} -} - -# Common settings for Akka - -akka { - loglevel = "WARNING" - loglevel = ${?AKKA_LOG_LEVEL} - stdout-loglevel = "WARNING" - stdout-loglevel = ${?AKKA_LOG_LEVEL} - loggers = ["akka.event.slf4j.Slf4jLogger"] - logging-filter = "akka.event.slf4j.Slf4jLoggingFilter" - - log-config-on-start = off - log-config-on-start = ${?AKKA_LOG_CONFIG} - - log-dead-letters = 10 - log-dead-letters-during-shutdown = off - - coordinated-shutdown.terminate-actor-system = on - - actor { - # provider = "cluster" - - debug { - receive = on - autoreceive = off - lifecycle = on - fsm = off - unhandled = on - event-stream = off - } - - serializers { - woken-messages-serializer = "ch.chuv.lren.woken.messages.AkkaSerializer" - } - - serialization-bindings { - "ch.chuv.lren.woken.messages.Ping" = woken-messages-serializer - "ch.chuv.lren.woken.messages.Pong" = woken-messages-serializer - "ch.chuv.lren.woken.messages.ComponentQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.ComponentQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.VersionQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.VersionResponse" = woken-messages-serializer - "ch.chuv.lren.woken.messages.datasets.DatasetsQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.datasets.DatasetsResponse" = woken-messages-serializer - "ch.chuv.lren.woken.messages.datasets.TablesQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.datasets.TablesResponse" = woken-messages-serializer - "ch.chuv.lren.woken.messages.query.MethodsQuery$" = woken-messages-serializer - "ch.chuv.lren.woken.messages.query.MethodsResponse" = woken-messages-serializer - "ch.chuv.lren.woken.messages.query.MiningQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.query.ExperimentQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.query.QueryResult" = woken-messages-serializer - "ch.chuv.lren.woken.messages.validation.ValidationQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.validation.ValidationResult" = woken-messages-serializer - "ch.chuv.lren.woken.messages.validation.ScoringQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.validation.ScoringResult" = woken-messages-serializer - "ch.chuv.lren.woken.messages.variables.VariablesForDatasetsQuery" = woken-messages-serializer - "ch.chuv.lren.woken.messages.variables.VariablesForDatasetsResponse" = woken-messages-serializer - } - - enable-additional-serialization-bindings = on - allow-java-serialization = off - - } - - remote { - log-sent-messages = on - log-received-messages = on - log-remote-lifecycle-events = on - - watch-failure-detector { - acceptable-heartbeat-pause = 20 s - } - - } - - http { - server { - idle-timeout = 300s - request-timeout = 180s - ssl-encryption = off - ssl-tracing = on - } - - client { - idle-timeout = 300s - request-timeout = 20 s - } - - host-connection-pool { - max-connections = 128 - max-open-requests = 128 - } - } - -} - -remoting { - - implementation = "artery" - # Alternative option is 'netty' - implementation = ${?AKKA_REMOTING} - -} - -clustering { - # IP address advertised by the Akka server - ip = "127.0.0.1" - ip = ${?CLUSTER_IP} - # Define the default Akka port for your app - port = 8088 - port = ${?CLUSTER_PORT} - - # Definition of the seed used to bootstrap the cluster - seed-ip = "127.0.0.1" - seed-ip = ${?CLUSTER_IP} - seed-ip = ${?WOKEN_PORT_8088_TCP_ADDR} - seed-port = 8088 - seed-port = ${?WOKEN_PORT_8088_TCP_PORT} - - # Name of the Akka cluster - cluster.name = "woken" - cluster.name = ${?CLUSTER_NAME} -}