From 54a76939de12c92fc6e3659b4760815081368d36 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Thu, 27 Mar 2014 11:03:22 +0100 Subject: [PATCH 01/54] Fixes a problem which resulted in not correctly reconnecting the client after disconnection --- README.md | 2 +- project/Build.scala | 2 +- .../scala/nl/gideondk/sentinel/Client.scala | 13 ++++++++--- .../gideondk/sentinel/RequestResponse.scala | 23 ++++++++++++++++++- .../gideondk/sentinel/ServerRequestSpec.scala | 2 +- .../nl/gideondk/sentinel/StreamingSpec.scala | 2 +- 6 files changed, 36 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 9e52e1a..71060c8 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.6.0"
+  "nl.gideondk" %% "sentinel" % "0.6.1"
 )
 
diff --git a/project/Build.scala b/project/Build.scala index 8778933..53d15ae 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.6.0", + version := "0.6.1", organization := "nl.gideondk", scalaVersion := "2.10.2", parallelExecution in Test := false, diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala index 7a04bc2..e9d2cb9 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -84,6 +84,10 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip def connected(antenna: ActorRef): Receive = { case x: Command[Cmd, Evt] ⇒ antenna forward x + + case x: Terminated ⇒ + context.stop(self) + } def disconnected: Receive = { @@ -139,21 +143,24 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco def receive = { case x: Client.ConnectToServer ⇒ + log.debug("Connecting to: " + x.addr) if (!addresses.map(_._1).contains(x)) { val router = routerProto(x.addr) context.watch(router) addresses = addresses ++ List(x.addr -> Some(router)) coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + } else { + log.debug("Client is already connected to: " + x.addr) } case Terminated(actor) ⇒ /* If router died, restart after a period of time */ - val terminatedRouter = addresses.find(_._2 == actor) + val terminatedRouter = addresses.find(_._2 == Some(actor)) terminatedRouter match { case Some(r) ⇒ - addresses = addresses diff addresses.find(_._2 == actor).toList + addresses = addresses diff addresses.find(_._2 == Some(actor)).toList coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) - log.debug("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) + log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) case None ⇒ } diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 627963d..fbc7edf 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -23,7 +23,7 @@ class RequestResponseSpec extends WordSpec with Matchers { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -70,5 +70,26 @@ class RequestResponseSpec extends WordSpec with Matchers { result.map(_.payload) should equal(items) } + + "should automatically reconnect" in new TestKitSpec { + val portNumber = TestHelpers.portNumber.getAndIncrement() + val s = server(portNumber) + val c = client(portNumber) + Thread.sleep(500) + + val action = c ? SimpleCommand(PING_PONG, "") + val result = action.run + + result.isSuccess should equal(true) + + system.stop(s.actor) + Thread.sleep(1000) + val ss = server(portNumber) + + val secAction = c ? SimpleCommand(PING_PONG, "") + val endResult = secAction.run + + endResult.isSuccess should equal(true) + } } } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala index ee9edb8..44c4df1 100644 --- a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala @@ -24,7 +24,7 @@ class ServerRequestSpec extends WordSpec with ShouldMatchers { val numberOfConnections = 16 - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, numberOfConnections, "Worker", SimpleMessage.stages, 5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, numberOfConnections, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index 587d611..3e282b9 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -23,7 +23,7 @@ class StreamingSpec extends WordSpec with ShouldMatchers { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 2, "Worker", SimpleMessage.stages, 5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 2, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) From 05bb75f5dc1946dcc84cfebf2f031cbade47227e Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 7 Apr 2014 17:13:52 +0100 Subject: [PATCH 02/54] =?UTF-8?q?Make=20default=20print=20output=20of=20Co?= =?UTF-8?q?nsumerExceptions=20a=20bit=20more=20sane=E2=80=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../scala/nl/gideondk/sentinel/processors/Consumer.scala | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 215ad6e..fe4fbdc 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -29,7 +29,9 @@ object Consumer { trait ConsumerData[Evt] - case class ConsumerException[Evt](cause: Evt) extends Exception + case class ConsumerException[Evt](cause: Evt) extends Exception { + override def toString() = "ConsumerException(" + cause + ")" + } case class DataChunk[Evt](c: Evt) extends ConsumerData[Evt] @@ -149,4 +151,4 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi } def receive = behavior -} \ No newline at end of file +} From 32919252179e381b64f4e7fc3b39ebe4043fd469 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 7 Apr 2014 17:14:00 +0100 Subject: [PATCH 03/54] Add recovery to Tasks --- src/main/scala/nl/gideondk/sentinel/Task.scala | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/main/scala/nl/gideondk/sentinel/Task.scala b/src/main/scala/nl/gideondk/sentinel/Task.scala index 6744778..603b9f1 100644 --- a/src/main/scala/nl/gideondk/sentinel/Task.scala +++ b/src/main/scala/nl/gideondk/sentinel/Task.scala @@ -2,6 +2,7 @@ package nl.gideondk.sentinel import scala.concurrent.Await import scala.concurrent.Future +import scala.concurrent.ExecutionContext import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.util.Try @@ -17,6 +18,10 @@ final case class Task[A](get: IO[Future[A]]) { def run(implicit atMost: Duration): Try[A] = Await.result((start.map(Try(_)) recover { case x ⇒ Try(throw x) }), atMost) + + def recover[U >: A](pf: PartialFunction[Throwable, U])(implicit executor: ExecutionContext) = get.map(_.recover(pf)) + + def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]])(implicit executor: ExecutionContext) = get.map(_.recoverWith(pf)) } trait TaskMonad extends Monad[Task] { From afd87bd48de4b391d1ff02105ade040a73100a45 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 8 Apr 2014 11:47:52 +0100 Subject: [PATCH 04/54] Remove Scalaz as a dependency, just use Futures for now... --- project/Build.scala | 9 +- .../scala/nl/gideondk/sentinel/Client.scala | 18 ++-- .../scala/nl/gideondk/sentinel/Server.scala | 44 +++++----- .../scala/nl/gideondk/sentinel/Task.scala | 82 ------------------- .../sentinel/processors/Producer.scala | 5 +- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 23 +++--- .../gideondk/sentinel/RequestResponse.scala | 21 +++-- .../gideondk/sentinel/ServerRequestSpec.scala | 18 ++-- .../nl/gideondk/sentinel/StreamingSpec.scala | 20 ++--- .../scala/nl/gideondk/sentinel/TaskSpec.scala | 51 ------------ .../nl/gideondk/sentinel/TestHelpers.scala | 7 +- 11 files changed, 77 insertions(+), 221 deletions(-) delete mode 100644 src/main/scala/nl/gideondk/sentinel/Task.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/TaskSpec.scala diff --git a/project/Build.scala b/project/Build.scala index 53d15ae..78347f3 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -14,20 +14,18 @@ object ApplicationBuild extends Build { "Sonatype OSS Releases" at "http://oss.sonatype.org/content/repositories/releases/", "Sonatype OSS Snapshots" at "http://oss.sonatype.org/content/repositories/snapshots/", - + "Typesafe Snapshots" at "http://repo.typesafe.com/typesafe/snapshots/", "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"), - + publishTo := Some(Resolver.file("file", new File("/Users/gideondk/Development/gideondk-mvn-repo"))) ) val appDependencies = Seq( - "org.scalaz" %% "scalaz-core" % "7.0.3", - "org.scalaz" %% "scalaz-effect" % "7.0.3", - "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test", "com.typesafe.play" %% "play-iteratees" % "2.2.0", + "com.typesafe.akka" % "akka-actor_2.10" % "2.3.0", "com.typesafe.akka" %% "akka-testkit" % "2.3.0" ) @@ -66,4 +64,3 @@ object Format { setPreference(SpacesWithinPatternBinders, true) } } - diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala index e9d2cb9..a2453ad 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -19,30 +19,30 @@ trait Client[Cmd, Evt] { def actor: ActorRef - def ?(command: Cmd)(implicit context: ExecutionContext): Task[Evt] = ask(command) + def ?(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = ask(command) - def ?->>(command: Cmd)(implicit context: ExecutionContext): Task[Enumerator[Evt]] = askStream(command) + def ?->>(command: Cmd)(implicit context: ExecutionContext): Future[Enumerator[Evt]] = askStream(command) - def ?<<-(command: Cmd, source: Enumerator[Cmd])(implicit context: ExecutionContext): Task[Evt] = sendStream(command, source) + def ?<<-(command: Cmd, source: Enumerator[Cmd])(implicit context: ExecutionContext): Future[Evt] = sendStream(command, source) - def ?<<-(source: Enumerator[Cmd])(implicit context: ExecutionContext): Task[Evt] = sendStream(source) + def ?<<-(source: Enumerator[Cmd])(implicit context: ExecutionContext): Future[Evt] = sendStream(source) - def ask(command: Cmd)(implicit context: ExecutionContext): Task[Evt] = Task { + def ask(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = { val promise = Promise[Evt]() actor ! Command.Ask(command, ReplyRegistration(promise)) promise.future } - def askStream(command: Cmd)(implicit context: ExecutionContext): Task[Enumerator[Evt]] = Task { + def askStream(command: Cmd)(implicit context: ExecutionContext): Future[Enumerator[Evt]] = { val promise = Promise[Enumerator[Evt]]() actor ! Command.AskStream(command, StreamReplyRegistration(promise)) promise.future } - def sendStream(command: Cmd, source: Enumerator[Cmd]): Task[Evt] = + def sendStream(command: Cmd, source: Enumerator[Cmd]): Future[Evt] = sendStream(Enumerator(command) >>> source) - def sendStream(source: Enumerator[Cmd]): Task[Evt] = Task { + def sendStream(source: Enumerator[Cmd]): Future[Evt] = { val promise = Promise[Evt]() actor ! Command.SendStream(source, ReplyRegistration(promise)) promise.future @@ -174,4 +174,4 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco case _ ⇒ } -} \ No newline at end of file +} diff --git a/src/main/scala/nl/gideondk/sentinel/Server.scala b/src/main/scala/nl/gideondk/sentinel/Server.scala index 4c90652..90e9f75 100644 --- a/src/main/scala/nl/gideondk/sentinel/Server.scala +++ b/src/main/scala/nl/gideondk/sentinel/Server.scala @@ -15,35 +15,35 @@ import akka.pattern.ask trait Server[Cmd, Evt] { def actor: ActorRef - def ?**(command: Cmd)(implicit context: ExecutionContext): Task[List[Evt]] = askAll(command) + def ?**(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = askAll(command) - def ?*(command: Cmd)(implicit context: ExecutionContext): Task[List[Evt]] = askAllHosts(command) + def ?*(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = askAllHosts(command) - def ?(command: Cmd)(implicit context: ExecutionContext): Task[Evt] = askAny(command) + def ?(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = askAny(command) - def askAll(command: Cmd)(implicit context: ExecutionContext): Task[List[Evt]] = Task { + def askAll(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = { val promise = Promise[List[Evt]]() actor ! ServerCommand.AskAll(command, promise) promise.future } - def askAllHosts(command: Cmd)(implicit context: ExecutionContext): Task[List[Evt]] = Task { + def askAllHosts(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = { val promise = Promise[List[Evt]]() actor ! ServerCommand.AskAllHosts(command, promise) promise.future } - def askAny(command: Cmd)(implicit context: ExecutionContext): Task[Evt] = Task { + def askAny(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = { val promise = Promise[Evt]() actor ! ServerCommand.AskAny(command, promise) promise.future } - def connectedSockets(implicit timeout: Timeout): Task[Int] = Task { + def connectedSockets(implicit timeout: Timeout): Future[Int] = { (actor ? ServerMetric.ConnectedSockets).mapTo[Int] } - def connectedHosts(implicit timeout: Timeout): Task[Int] = Task { + def connectedHosts(implicit timeout: Timeout): Future[Int] = { (actor ? ServerMetric.ConnectedHosts).mapTo[Int] } } @@ -67,17 +67,23 @@ class ServerCore[Cmd, Evt](port: Int, description: String, stages: ⇒ PipelineS } def receiveCommands: Receive = { - case x: ServerCommand.AskAll[Cmd, Evt] if connections.values.toList.length > 0 ⇒ - val futures = Task.sequence(connections.values.toList.flatten.map(wrapAtenna).map(_ ? x.payload)).start - x.promise.completeWith(futures) - - case x: ServerCommand.AskAllHosts[Cmd, Evt] if connections.values.toList.length > 0 ⇒ - val futures = Task.sequence(connections.values.toList.map(x ⇒ Random.shuffle(x.toList).head).map(wrapAtenna).map(_ ? x.payload)).start - x.promise.completeWith(futures) - - case x: ServerCommand.AskAny[Cmd, Evt] if connections.values.toList.length > 0 ⇒ - val future = (wrapAtenna(Random.shuffle(connections.values.toList.flatten).head) ? x.payload).start - x.promise.completeWith(future) + case x: ServerCommand.AskAll[Cmd, Evt] ⇒ + if (connections.values.toList.length > 0) { + val futures = Future.sequence(connections.values.toList.flatten.map(wrapAtenna).map(_ ? x.payload)) + x.promise.completeWith(futures) + } else x.promise.failure(new Exception("No clients connected")) + + case x: ServerCommand.AskAllHosts[Cmd, Evt] ⇒ + if (connections.values.toList.length > 0) { + val futures = Future.sequence(connections.values.toList.map(x ⇒ Random.shuffle(x.toList).head).map(wrapAtenna).map(_ ? x.payload)) + x.promise.completeWith(futures) + } else x.promise.failure(new Exception("No clients connected")) + + case x: ServerCommand.AskAny[Cmd, Evt] ⇒ + if (connections.values.toList.length > 0) { + val future = (wrapAtenna(Random.shuffle(connections.values.toList.flatten).head) ? x.payload) + x.promise.completeWith(future) + } else x.promise.failure(new Exception("No clients connected")) case ServerMetric.ConnectedSockets ⇒ sender ! connections.values.flatten.toList.length diff --git a/src/main/scala/nl/gideondk/sentinel/Task.scala b/src/main/scala/nl/gideondk/sentinel/Task.scala deleted file mode 100644 index 603b9f1..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Task.scala +++ /dev/null @@ -1,82 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.Await -import scala.concurrent.Future -import scala.concurrent.ExecutionContext -import scala.concurrent.ExecutionContext.Implicits.global -import scala.concurrent.duration.Duration -import scala.util.Try - -import scalaz._ -import scalaz.Scalaz._ -import scalaz.effect.IO - -final case class Task[A](get: IO[Future[A]]) { - self ⇒ - def start: Future[A] = get.unsafePerformIO - - def run(implicit atMost: Duration): Try[A] = Await.result((start.map(Try(_)) recover { - case x ⇒ Try(throw x) - }), atMost) - - def recover[U >: A](pf: PartialFunction[Throwable, U])(implicit executor: ExecutionContext) = get.map(_.recover(pf)) - - def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]])(implicit executor: ExecutionContext) = get.map(_.recoverWith(pf)) -} - -trait TaskMonad extends Monad[Task] { - def point[A](a: ⇒ A): Task[A] = Task((Future(a)).point[IO]) - - def bind[A, B](fa: Task[A])(f: A ⇒ Task[B]) = - Task(Monad[IO].point(fa.get.unsafePerformIO.flatMap { - x ⇒ - f(x).get.unsafePerformIO - })) -} - -trait TaskCatchable extends Catchable[Task] with TaskMonad { - def fail[A](e: Throwable): Task[A] = Task(Future.failed(e)) - - def attempt[A](t: Task[A]): Task[Throwable \/ A] = map(t)(x ⇒ \/-(x)) -} - -trait TaskComonad extends Comonad[Task] with TaskMonad { - implicit protected def atMost: Duration - - def cobind[A, B](fa: Task[A])(f: Task[A] ⇒ B): Task[B] = point(f(fa)) - - def cojoin[A](a: Task[A]): Task[Task[A]] = point(a) - - def copoint[A](fa: Task[A]): A = fa.run.get -} - -trait TaskFunctions { - - import scalaz._ - import Scalaz._ - - def apply[A](a: ⇒ Future[A]): Task[A] = Task(Monad[IO].point(a)) - - def sequence[A](z: List[Task[A]]): Task[List[A]] = - Task(z.map(_.get).sequence[IO, Future[A]].map(x ⇒ Future.sequence(x))) - - def sequenceSuccesses[A](z: List[Task[A]]): Task[List[A]] = - Task(z.map(_.get).sequence[IO, Future[A]].map { - x ⇒ - Future.sequence(x.map(f ⇒ f.map(Some(_)) recover { - case x ⇒ None - })).map(_.filter(_.isDefined).map(_.get)) - }) - -} - -trait TaskImplementation extends TaskFunctions { - implicit def taskMonadInstance = new TaskMonad {} - - implicit def taskComonadInstance(implicit d: Duration) = new TaskComonad { - override protected val atMost = d - } -} - -object Task extends TaskImplementation { -} diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala index 51a8ad9..bac5d39 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala @@ -10,9 +10,6 @@ import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } import akka.pattern.ask import akka.util.Timeout -import scalaz._ -import Scalaz._ - import play.api.libs.iteratee._ import nl.gideondk.sentinel._ @@ -144,4 +141,4 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi } def receive = handleRequestAndResponse -} \ No newline at end of file +} diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index ae2b72c..9be71b6 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -5,11 +5,10 @@ import scala.concurrent.ExecutionContext.Implicits.global import org.scalatest.WordSpec import org.scalatest.matchers.ShouldMatchers -import scalaz._ -import Scalaz._ - import akka.actor._ import akka.routing._ + +import scala.concurrent._ import scala.concurrent.duration._ import protocols._ @@ -24,7 +23,6 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) s } @@ -34,12 +32,13 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { val s = server(portNumber) val c = client(portNumber) + Thread.sleep(500) val action = c ? SimpleCommand(PING_PONG, "") val serverAction = (s ?* SimpleCommand(PING_PONG, "")).map(_.head) - val responses = Task.sequence(List(action, serverAction)) + val responses = Future.sequence(List(action, serverAction)) - val results = responses.copoint + val results = Await.result(responses, 5 seconds) results.length should equal(2) results.distinct.length should equal(1) @@ -53,17 +52,17 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { val numberOfRequests = 1000 - val actions = Task.sequenceSuccesses(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) - val secActions = Task.sequenceSuccesses(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) - val serverActions = Task.sequenceSuccesses(List.fill(numberOfRequests)((s ?** SimpleCommand(PING_PONG, "")))) + val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) + val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) + val serverActions = Future.sequence(List.fill(numberOfRequests)((s ?** SimpleCommand(PING_PONG, "")))) - val combined = Task.sequence(List(actions, serverActions.map(_.flatten), secActions)) + val combined = Future.sequence(List(actions, serverActions.map(_.flatten), secActions)) - val results = combined.copoint + val results = Await.result(combined, 5 seconds) results(0).length should equal(numberOfRequests) results(2).length should equal(numberOfRequests) results(1).length should equal(numberOfRequests * 2) } } -} \ No newline at end of file +} diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index fbc7edf..08b6f27 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -5,14 +5,13 @@ import scala.concurrent.ExecutionContext.Implicits.global import org.scalatest.WordSpec import org.scalatest.matchers.{ Matchers, ShouldMatchers } -import scalaz._ -import Scalaz._ - import akka.actor._ import akka.routing._ import scala.concurrent.duration._ import scala.concurrent._ +import scala.util.Try + import play.api.libs.iteratee._ import protocols._ @@ -38,7 +37,7 @@ class RequestResponseSpec extends WordSpec with Matchers { val c = client(portNumber) val action = c ? SimpleCommand(PING_PONG, "") - val result = action.run + val result = Try(Await.result(action, 5 seconds)) result.isSuccess should equal(true) } @@ -50,8 +49,8 @@ class RequestResponseSpec extends WordSpec with Matchers { val numberOfRequests = 20 * 1000 - val action = Task.sequenceSuccesses(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) - val result = action.run + val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) + val result = Try(Await.result(action, 5 seconds)) result.get.length should equal(numberOfRequests) result.isSuccess should equal(true) @@ -65,8 +64,8 @@ class RequestResponseSpec extends WordSpec with Matchers { val numberOfRequests = 90 * 1000 val items = List.range(0, numberOfRequests).map(_.toString) - val action = Task.sequenceSuccesses(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) - val result = action.run.get + val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) + val result = Await.result(action, 5 seconds) result.map(_.payload) should equal(items) } @@ -78,7 +77,7 @@ class RequestResponseSpec extends WordSpec with Matchers { Thread.sleep(500) val action = c ? SimpleCommand(PING_PONG, "") - val result = action.run + val result = Try(Await.result(action, 5 seconds)) result.isSuccess should equal(true) @@ -87,9 +86,9 @@ class RequestResponseSpec extends WordSpec with Matchers { val ss = server(portNumber) val secAction = c ? SimpleCommand(PING_PONG, "") - val endResult = secAction.run + val endResult = Try(Await.result(secAction, 5 seconds)) endResult.isSuccess should equal(true) } } -} \ No newline at end of file +} diff --git a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala index 44c4df1..128ce10 100644 --- a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala @@ -5,11 +5,9 @@ import scala.concurrent.ExecutionContext.Implicits.global import org.scalatest.WordSpec import org.scalatest.matchers.ShouldMatchers -import scalaz._ -import Scalaz._ - import akka.actor._ import akka.routing._ +import scala.concurrent._ import scala.concurrent.duration._ import protocols._ @@ -40,7 +38,7 @@ class ServerRequestSpec extends WordSpec with ShouldMatchers { Thread.sleep(500) val action = (s ? SimpleCommand(PING_PONG, "")) - val result = action.copoint + val result = Await.result(action, 5 seconds) result should equal(SimpleReply("PONG")) } @@ -55,7 +53,7 @@ class ServerRequestSpec extends WordSpec with ShouldMatchers { Thread.sleep(500) val action = (s ?* SimpleCommand(PING_PONG, "")) - val result = action.copoint + val result = Await.result(action, 5 seconds) result.length should equal(1) } @@ -70,7 +68,7 @@ class ServerRequestSpec extends WordSpec with ShouldMatchers { Thread.sleep(500) val action = (s ?** SimpleCommand(PING_PONG, "")) - val result = action.copoint + val result = Await.result(action, 5 seconds) result.length should equal(numberOfClients * numberOfConnections) } @@ -84,18 +82,18 @@ class ServerRequestSpec extends WordSpec with ShouldMatchers { Thread.sleep(500) - val connectedSockets = (s connectedSockets).copoint + val connectedSockets = Await.result((s connectedSockets), 5 seconds) connectedSockets should equal(numberOfClients * numberOfConnections) - val connectedHosts = (s connectedHosts).copoint + val connectedHosts = Await.result((s connectedHosts), 5 seconds) connectedHosts should equal(1) val toBeKilledActors = clients.splitAt(3)._1.map(_.actor) toBeKilledActors.foreach(x ⇒ x ! PoisonPill) Thread.sleep(500) - val stillConnectedSockets = (s connectedSockets).copoint + val stillConnectedSockets = Await.result((s connectedSockets), 5 seconds) stillConnectedSockets should equal(2 * numberOfConnections) } } -} \ No newline at end of file +} diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index 3e282b9..3ba2fd7 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -5,14 +5,12 @@ import scala.concurrent.ExecutionContext.Implicits.global import org.scalatest.WordSpec import org.scalatest.matchers.ShouldMatchers -import scalaz._ -import Scalaz._ - import akka.actor._ import akka.routing._ import scala.concurrent.duration._ import scala.concurrent._ +import scala.util.Try import play.api.libs.iteratee._ import protocols._ @@ -43,7 +41,7 @@ class StreamingSpec extends WordSpec with ShouldMatchers { val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) - val result = action.run + val result = Try(Await.result(action, 5 seconds)) result.isSuccess should equal(true) result.get.payload.toInt should equal(localLength) @@ -57,8 +55,8 @@ class StreamingSpec extends WordSpec with ShouldMatchers { val count = 500 val action = c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString) - val stream = action.copoint - val result = Await.result(stream |>>> Iteratee.getChunks, 5 seconds) + val f = action.flatMap(_ |>>> Iteratee.getChunks) + val result = Await.result(f, 5 seconds) result.length should equal(count) } @@ -70,9 +68,9 @@ class StreamingSpec extends WordSpec with ShouldMatchers { val count = 500 val numberOfActions = 8 - val actions = Task.sequenceSuccesses(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ Task(x |>>> Iteratee.getChunks)))) + val actions = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - val result = actions.map(_.flatten).copoint + val result = Await.result(actions.map(_.flatten), 5 seconds) result.length should equal(count * numberOfActions) } @@ -87,12 +85,12 @@ class StreamingSpec extends WordSpec with ShouldMatchers { val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) val numberOfActions = 8 - val actions = Task.sequenceSuccesses(List.fill(numberOfActions)(c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)))) + val actions = Future.sequence(List.fill(numberOfActions)(c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)))) val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) - val result = actions.copoint + val result = Await.result(actions, 5 seconds) result.map(_.payload.toInt).sum should equal(localLength * numberOfActions) } } -} \ No newline at end of file +} diff --git a/src/test/scala/nl/gideondk/sentinel/TaskSpec.scala b/src/test/scala/nl/gideondk/sentinel/TaskSpec.scala deleted file mode 100644 index 199854b..0000000 --- a/src/test/scala/nl/gideondk/sentinel/TaskSpec.scala +++ /dev/null @@ -1,51 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global -import scala.concurrent.Future -import scala.concurrent.duration.{ Duration, SECONDS } - -import org.scalatest._ - -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import Task.taskComonadInstance -import scalaz.Scalaz._ - -class TaskSpec extends WordSpec with ShouldMatchers { - implicit val timeout = Duration(10, SECONDS) - - "A Task" should { - "be able to be run correctly" in { - val task = Task(Future(1)) - task.copoint should equal(1) - } - - "be able to be sequenced correctly" in { - val tasks = Task.sequence((for (i ← 0 to 9) yield i.point[Task]).toList) - tasks.copoint.length should equal(10) - } - - "should short circuit in case of a sequenced failure" in { - val s1 = 1.point[Task] - val s2 = 2.point[Task] - val f1: Task[Int] = Task(Future.failed(new Exception(""))) - - val tasks = Task.sequence(List(s1, f1, s2)) - tasks.run.isFailure - } - - "should only return successes when sequenced for successes" in { - val s1 = 1.point[Task] - val s2 = 2.point[Task] - val f1: Task[Int] = Task(Future.failed(new Exception(""))) - - val f: Task[Int] ⇒ String = ((t: Task[Int]) ⇒ t.copoint + "123") - s1.cobind(f) - - val tasks = Task.sequenceSuccesses(List(s1, f1, s2)) - tasks.run.get.length == 2 - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index a38b037..940c682 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -13,11 +13,6 @@ import akka.io.{ LengthFieldFrame, PipelineContext, SymmetricPipePair, Symmetric import akka.routing.RoundRobinRouter import akka.util.ByteString -import Task._ - -import scalaz._ -import Scalaz._ - import akka.actor._ import akka.testkit._ import scala.concurrent.duration._ @@ -74,4 +69,4 @@ object LargerPayloadTestHelper { ByteString(stringB.toString().getBytes()) } -} \ No newline at end of file +} From 985204fc751f01fe484ea9e5b8d819c5aa2e42a2 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 8 Apr 2014 15:27:50 +0100 Subject: [PATCH 05/54] Update version number / README --- README.md | 68 +++++++++---------- project/Build.scala | 2 +- .../gideondk/sentinel/RequestResponse.scala | 2 +- 3 files changed, 33 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 71060c8..ea8d563 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The implementation focusses on raw performance, using pipelines through multiple sockets represented by multiple workers (both client / server side). Sentinel is designed for usage in persistent connection environments, making it (currently) less suited for things like HTTP and best suited for DB clients / RPC stacks. -Sentinel brings a unique symmetrical design through *Antennas*, resulting in the same request and response handling on both clients and servers. This not only makes it simple to share code on both sides, but also opens the possibility to inverse request & response flow from server to client. +Sentinel brings a unique symmetrical design through *Antennas*, resulting in the same request and response handling on both clients and servers. This not only makes it simple to share code on both sides, but also opens the possibility to inverse request & response flow from server to client. In its current state, it's being used internally as a platform to test performance strategies for CPU and IO bound services. In the nearby future, Sentinel will fuel both [Raiku](http://github.com/gideondk/raiku) as other soon-to-be-released Akka based libraries. @@ -22,7 +22,6 @@ In overall, treat Sentinel as pre-release alpha software. * Easy initialization of TCP servers and clients for default or custom router worker strategies; * Supervision (and restart / reconnection functionality) on clients for a defined number of workers; -* Sequencing and continuing multiple client operations using `Tasks`; * Streaming requests and responses (currently) based on Play Iteratees; * Direct server to client communication through symmetrical signal handling design. @@ -47,22 +46,22 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.6.1"
+  "nl.gideondk" %% "sentinel" % "0.7.0"
 )
 
## Architecture -The internal structure of Sentinel relies on a *Antenna* actor. The Antenna represents the connection between a client and a server and handles both the outgoing commands as incoming replies and handles the events received from the underlying *TCP* actors. +The internal structure of Sentinel relies on a *Antenna* actor. The Antenna represents the connection between a client and a server and handles both the outgoing commands as incoming replies and handles the events received from the underlying *TCP* actors. -Within the antenna structure, two child actors are defined. One used for consuming replies from the connected host and one for the production of values for the connected host. +Within the antenna structure, two child actors are defined. One used for consuming replies from the connected host and one for the production of values for the connected host. Both clients as servers share the same antenna construction, which results in a symmetrical design for sending and receiving commands. When a message is received from the opposing host, a *resolver* is used to determine the action or reaction on the received event. Based on the used protocol (as defined in the underlying protocol pipeline), a host can process the event and decide whether the consume the received event or to respond with new values (as in a normal request -> response way). Once, for instance, a command is sent to a client (for a response from the connected server), the payload is sent to the opposing host and a reply-registration is set within the consumer part of the antenna. This registration and accompanying promise is completed with the consequential response from the server. ## Actions -The handle incoming events, multiple actions are defined which can be used to implement logic on top of the used protocol. Actions are split into consumer actions and producers actions, which make a antenna able to: +The handle incoming events, multiple actions are defined which can be used to implement logic on top of the used protocol. Actions are split into consumer actions and producers actions, which make a antenna able to: ### Consumer Actions `AcceptSignal`: Accept and consume a incoming signal and apply it on a pending registration @@ -85,16 +84,16 @@ The handle incoming events, multiple actions are defined which can be used to im `ProduceStream`: Produces a stream (Enumerator) for the requesting hosts ## Synchronicity -Normally, Sentinel clients connect to servers through multiple sockets to increase parallel performance on top of the synchronous nature of *TCP* sockets. Producers and consumers implement a state machine to correctly respond to running incoming and outgoing streams, handling messages which don't impose treats to the message flow and stashing messages which could leak into the running streams. +Normally, Sentinel clients connect to servers through multiple sockets to increase parallel performance on top of the synchronous nature of *TCP* sockets. Producers and consumers implement a state machine to correctly respond to running incoming and outgoing streams, handling messages which don't impose treats to the message flow and stashing messages which could leak into the running streams. Because of the synchronous nature of the underlying semantics, you have to handle each receiving signal in a appropriate way. Not handling all signals correctly could result in values ending up in incorrect registrations etc. -## Initialization +## Initialization ### Pipelines The Pipeline implementation available in Akka 2.2 is becoming obsolete in Akka 2.3 to be replaced with a (better) alternative later on in Akka 2.4. As it seemed that pipelines aren't the best solution for Akka, this currently leaves Akka 2.3 without a reactive *protocol layer*. To bridge the period until a definite solution is available, the "older" pipeline implementation is packaged along with Sentinel. - -The pipeline implementation focusses on the definition of pipes for both incoming as outgoing messages. In these pipelines, a definition is made how incoming or outgoing messages are parsed and formatted. + +The pipeline implementation focusses on the definition of pipes for both incoming as outgoing messages. In these pipelines, a definition is made how incoming or outgoing messages are parsed and formatted. Each of these *stages* can easily be composed into a bigger stage (`A => B >> B => C`) taking a the input of the first stage and outputting the format of the last stage. Within Sentinel, the eventual output send to the IO workers is in the standard `ByteString` format, making it necessary that the end stage of the pipeline always outputs content of the `ByteString` type: @@ -103,14 +102,14 @@ case class PingPongMessageFormat(s: String) class PingPongMessageStage extends SymmetricPipelineStage[PipelineContext, PingPongMessageFormat, ByteString] { - + override def apply(ctx: PipelineContext) = new SymmetricPipePair[PingPongMessageFormat, ByteString] { implicit val byteOrder = ctx.byteOrder - + override val commandPipeline = { msg: PingPongMessageFormat ⇒ Seq(Right(ByteString(msg.s))) } - + override val eventPipeline = { bs: ByteString ⇒ Seq(Left(PingPongMessageFormat(new String(bs.toArray)))) } @@ -128,7 +127,7 @@ import SimpleMessage._ trait DefaultSimpleMessageHandler extends Resolver[SimpleMessageFormat, SimpleMessageFormat] { def process = { case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream - + case x: SimpleError ⇒ ConsumerAction.AcceptError case x: SimpleReply ⇒ ConsumerAction.AcceptSignal } @@ -144,23 +143,23 @@ object SimpleServerHandler extends DefaultSimpleMessageHandler { override def process = super.process orElse { case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - + case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ s: Enumerator[SimpleStreamChunk] ⇒ s |>>> Iteratee.fold(0) { (b, a) ⇒ b + a.payload.length } map (x ⇒ SimpleReply(x.toString)) } - + case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ val count = payload.toInt Future((Enumerator(List.range(0, count): _*) &> Enumeratee.map(x ⇒ SimpleStreamChunk(x.toString))) >>> Enumerator(SimpleStreamChunk(""))) } - + case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } } } ``` -Like illustrated, the `ProducerAction.Signal` producer action makes it able to respond with a Async response. Taking a function which handles the incoming event and producing a new value, wrapped in a `Future`. +Like illustrated, the `ProducerAction.Signal` producer action makes it able to respond with a Async response. Taking a function which handles the incoming event and producing a new value, wrapped in a `Future`. `ProducerAction.ConsumeStream` takes a function handling the incoming event and the Enumerator with the consequential chunks, resulting in a new value wrapped in a `Future` @@ -173,12 +172,12 @@ After the definition of the pipeline, a client is easily created: Client.randomRouting("localhost", 9999, 4, "Ping Client", stages = stages, resolver = resolver) ``` -Defining the host and port where the client should connect to, the amount of workers used to handle commands / events, description of the client and the earlier defined context, stages and resolver (for the complete list of parameters, check the code for the moment). - -You can use the `randomRouting` / `roundRobinRouting` methods depending on the routing strategy you want to use to communicate to the workers. For a more custom approach the `apply` method is available, which lets you define a router strategy yourself. +Defining the host and port where the client should connect to, the amount of workers used to handle commands / events, description of the client and the earlier defined context, stages and resolver (for the complete list of parameters, check the code for the moment). + +You can use the `randomRouting` / `roundRobinRouting` methods depending on the routing strategy you want to use to communicate to the workers. For a more custom approach the `apply` method is available, which lets you define a router strategy yourself. ### Server -When the stages and resolver are defined, creation of a server is very straight forward: +When the stages and resolver are defined, creation of a server is very straight forward: ```scala Server(portNumber, SimpleServerHandler, "Server", SimpleMessage.stages) @@ -188,31 +187,27 @@ This will automatically start the server with the corresponding stages and handl ## Client usage -Once a client and / or server has been set up, the `?` method can be used on the client to send a command to the connected server. Results are wrapped into a `Task` containing the type `Evt` defined in the incoming stage of the client. +Once a client and / or server has been set up, the `?` method can be used on the client to send a command to the connected server. Results are wrapped into a `Future` containing the type `Evt` defined in the incoming stage of the client. ```scala PingPongTestHelper.pingClient ? PingPongMessageFormat("PING") -res0: Task[PingPongMessageFormat] +res0: Future[PingPongMessageFormat] ``` -`Task` combines a `Try`, `Future` and `IO` Monad into one type: exceptions will be caught in the Try, all async actions are abstracted into a future monad and all IO actions are as pure as possible by using the Scalaz IO monad. - -Use `run` to expose the Future, or use `start(d: Duration)` to perform IO and wait (blocking) on the future. - -This bare bone approach to sending / receiving messages is focussed on the idea that a higher-level API on top of Sentinel is responsible to make client usage more comfortable. +The bare bone approach to sending / receiving messages is focussed on the idea that a higher-level API on top of Sentinel is responsible to make client usage more comfortable. ### Streamed requests / responses Sentinels structure for streaming requests and responses works best with protocols which somehow *pad* chunks and terminators. As the resolver has to be sure whether to consume a stream chunk and when to end the incoming stream, length based header structures are difficult to implement. Unstructured binary stream chunks can however be matched by protocol implementations if they are fundamentally different then other chunks, simply ignoring initial length headers and for instance breaking on *zero terminators* could be a way to implement *non-padded* stream chunks. -#### Sending +#### Sending It's possible to stream content towards Sentinel clients by using the the `?<<-` command, expecting the command to be send to the server, accompanied by the actual stream: ```scala c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) -res0: Task[SimpleCommand] +res0: Future[SimpleCommand] c ?<<- Enumerator((SimpleCommand(TOTAL_CHUNK_SIZE, "") ++ chunks): _*) -res1: Task[SimpleCommand] +res1: Future[SimpleCommand] ``` @@ -223,13 +218,13 @@ In the same manner, a stream can be requested from the server: ```scala c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString) -res0: Task[Enumerator[SimpleCommand]] +res0: Future[Enumerator[SimpleCommand]] ``` ## Server usage -Although functionality will be expanded in the future, it's currently also possible to send requests from the server to the connected clients. This can be used for retrieval of client information on servers request, but could also be used as a retrieval pattern where clients are dormant after request, but respond to requests when necessary (retrieving sensor info per example). +Although functionality will be expanded in the future, it's currently also possible to send requests from the server to the connected clients. This can be used for retrieval of client information on servers request, but could also be used as a retrieval pattern where clients are dormant after request, but respond to requests when necessary (retrieving sensor info per example). -The following commands can be used to retrieve information: +The following commands can be used to retrieve information: `?`: Sends command to *one* (randomly chosen) connected socket for a answer, resulting in one event. @@ -237,7 +232,7 @@ The following commands can be used to retrieve information: `?**`: Sends a command to all connected sockets, resulting in a list of events from all connected sockets. -Simple server metrics are available through the `connectedSockets` and `connectedHosts` commands, returning a `Task[Int]` containing the corresponding count. +Simple server metrics are available through the `connectedSockets` and `connectedHosts` commands, returning a `Future[Int]` containing the corresponding count. # License Copyright © 2014 Gideon de Kok @@ -250,4 +245,3 @@ Unless required by applicable law or agreed to in writing, software distributed [![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/gideondk/sentinel/trend.png)](https://bitdeli.com/free "Bitdeli Badge") - diff --git a/project/Build.scala b/project/Build.scala index 78347f3..a8b7c86 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.6.1", + version := "0.7.0", organization := "nl.gideondk", scalaVersion := "2.10.2", parallelExecution in Test := false, diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 08b6f27..69b19bd 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -22,7 +22,7 @@ class RequestResponseSpec extends WordSpec with Matchers { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 320, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) From 687e46fab8a5bedd07242451a7d128e65e41fdfd Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 29 Apr 2014 08:40:10 +0200 Subject: [PATCH 06/54] Fix a problem which could result in idling consumers when both streaming requests as normal requests were active. --- README.md | 2 +- project/Build.scala | 2 +- .../sentinel/processors/Consumer.scala | 52 ++++++++++--------- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 7 ++- .../gideondk/sentinel/RequestResponse.scala | 2 +- .../nl/gideondk/sentinel/StreamingSpec.scala | 18 +++++++ 6 files changed, 54 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index ea8d563..996eb54 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.0"
+  "nl.gideondk" %% "sentinel" % "0.7.1"
 )
 
diff --git a/project/Build.scala b/project/Build.scala index a8b7c86..9f5ef40 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.0", + version := "0.7.1", organization := "nl.gideondk", scalaVersion := "2.10.2", parallelExecution in Test := false, diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index fe4fbdc..141f60f 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -10,7 +10,6 @@ import akka.pattern.ask import akka.util.Timeout import play.api.libs.iteratee._ - import nl.gideondk.sentinel._ object Consumer { @@ -53,7 +52,6 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi var buffer = Queue[Promise[ConsumerData[Evt]]]() var registrations = Queue[Registration[Evt, _]]() - var currentPromise: Option[Promise[Evt]] = None var runningSource: Option[Enumerator[Evt]] = None @@ -86,7 +84,7 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi } } - def popAndSetHook = { + def popAndSetHook: Unit = { val worker = self val registration = registrations.head registrations = registrations.tail @@ -94,16 +92,20 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi implicit val timeout = streamChunkTimeout registration match { - case x: ReplyRegistration[Evt] ⇒ x.promise.completeWith((self ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future.flatMap { - _ match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - } - })) - case x: StreamReplyRegistration[Evt] ⇒ - val resource = Enumerator.generateM { + case x: ReplyRegistration[Evt] ⇒ { + x.promise.completeWith(nextChunk.future.flatMap { + _ match { + case x: DataChunk[Evt] ⇒ + Future.successful(x.c) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + } + }) + if (registrations.headOption.isDefined) popAndSetHook + } + + case x: StreamReplyRegistration[Evt] ⇒ { + val resource = Enumerator.generateM[Evt] { (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { _ match { case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) @@ -115,13 +117,24 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi runningSource = Some(resource) x.promise success resource + } } } + def nextChunk() = buffer.headOption match { + case Some(p) ⇒ + buffer = buffer.tail + p + case None ⇒ + val p = Promise[ConsumerData[Evt]]() + hooks :+= p + p + } + def handleRegistrations: Receive = { case rc: Registration[Evt, _] ⇒ registrations :+= rc - if (runningSource.isEmpty && currentPromise.isEmpty) popAndSetHook + if (runningSource.isEmpty) popAndSetHook } var behavior: Receive = handleRegistrations orElse { @@ -131,16 +144,7 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi sender ! () case AskNextChunk ⇒ - val promise = buffer.headOption match { - case Some(p) ⇒ - buffer = buffer.tail - p - case None ⇒ - val p = Promise[ConsumerData[Evt]]() - hooks :+= p - p - } - sender ! promise + sender ! nextChunk() case x: ConsumerActionAndData[Evt] ⇒ processAction(x.data, x.action) diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index 9be71b6..dc0cf7c 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -19,7 +19,7 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { implicit val duration = Duration(25, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -47,10 +47,13 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) + Thread.sleep(500) + val c = client(portNumber) val secC = client(portNumber) + Thread.sleep(500) - val numberOfRequests = 1000 + val numberOfRequests = 100 val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 69b19bd..caf3db4 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -22,7 +22,7 @@ class RequestResponseSpec extends WordSpec with Matchers { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 320, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 64, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index 3ba2fd7..c919370 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -75,6 +75,24 @@ class StreamingSpec extends WordSpec with ShouldMatchers { result.length should equal(count * numberOfActions) } + "be able to receive multiple streams and normal commands simultaneously from a server" in new TestKitSpec { + val portNumber = TestHelpers.portNumber.getAndIncrement() + val s = server(portNumber) + val c = client(portNumber) + + val count = 500 + val numberOfActions = 8 + + val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) + val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, ""))) + + val actions = Future.sequence(List(streamAction, action)) + + val result = Try(Await.result(actions.map(_.flatten), 5 seconds)) + + result.isSuccess should equal(true) + } + "be able to receive send streams simultaneously to a server" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) From 76b42061b063a2cd91c0081145cd727e7ef3f0a1 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 29 Apr 2014 12:13:48 +0200 Subject: [PATCH 07/54] Update Scala version to 2.11 --- project/.Build.scala.swp | Bin 0 -> 12288 bytes project/Build.scala | 10 +++++----- .../nl/gideondk/sentinel/RequestResponse.scala | 6 +----- .../gideondk/sentinel/ServerRequestSpec.scala | 2 +- .../nl/gideondk/sentinel/StreamingSpec.scala | 2 +- .../nl/gideondk/sentinel/TestHelpers.scala | 17 +++-------------- 6 files changed, 11 insertions(+), 26 deletions(-) create mode 100644 project/.Build.scala.swp diff --git a/project/.Build.scala.swp b/project/.Build.scala.swp new file mode 100644 index 0000000000000000000000000000000000000000..1ae8d44f909388666b6ebbe5e2be696b877367ec GIT binary patch literal 12288 zcmeI2&u<(x6vy52qZBA0I3t;%NhXC>Z&BMO3;k4fVuMg(|5%|V5C{bBfXPn0)apv5C{YUfj}S-2m}K6Issc8V((!PhpT}sRNsg8d{?*OD-Z|-0)apv z5C{YUfj}S-2m}IwKp+qZ1nxlsqQlr%4>0!QgGe6#|F8f4|NU{set^D*Zb8?fYtRPt zD%60ELf<{c*cZ^7&>PUd#~8Z_eF7~*|2)dr-_Q-{I`lr2K`%hhLw`NO*q_kX&_~dR z&<9WpIuAV!{qitl-#}kNpF^KPpF%ew18L|b=m>NlbQt>fC~Tk`5J8jBZx5j#=wqmW zo`rrrg8ZRt&>D0c`WY;Kf*cdiYbjZ0Gd?=TV@Pk&lE`~9&3lVd8Cnq-q1_;p-X^!&+oLFR zoY%d+JDqnM&*i?fDLSO4JFVAI?M0P&x26@^UdYd;+Sv1GuUL-H{v2@Uww#D5KDQ6S z(rN{;MUNJ6)Nu1)CZ+v(cO-F%x(C}XseVgR>9Htn>77+NLn$KcPNABKic6XmN?Y#FN9q&>mBsGEmu_4P z3){r&C`2i)4EUCiyfWDxlAq!4Fj$o}?&q+K_zDfoXe@sE%=ieSU0{W{lntVeB-gB; zPEPVBy*=UjtA@~4S!@Rd8PTQKv~1jL*^wkL0M&9QLV3x<#JuR>}jaafiAB zh5x^iIrA1O0H)nkqSrWTxFEwx&`Q~zI1_0HH&K25HJaL)if9)avAHQCe&Pg=+^=a% z#%`t-Pk1yJpN!`s^uhUoKB8@v9`W#f?e!_77*hot80LPkxM3)d&V&}{PYL6tGF zsmx-`P107m#lGsZ_k%*6C%fX8+3pR$S`BGe%6ArhhelOkKKUVjkA4W&II?0p?&00r zZ^aI4Z8`igoUtSm1_cQyWzu-z`En>%t}CEnfn;Kfa=-7_#s#)nDM`M3nc97~r}H%D zYp7bkuerB&ZRrd65(>!~VfSz~JnL(|>=s-_o?h`U2H&1bALfQC9s1A2r{dXR zOOD;%yNxV*7uYWV&J{d0qV4P$BfF337pVT>gUQKycbqQ@nYMjO{^&fX%b53$;pH86 R&1%2(yZU;4=QiAR_8*%=MZf?6 literal 0 HcmV?d00001 diff --git a/project/Build.scala b/project/Build.scala index 9f5ef40..9a89db2 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -7,7 +7,7 @@ object ApplicationBuild extends Build { name := "sentinel", version := "0.7.1", organization := "nl.gideondk", - scalaVersion := "2.10.2", + scalaVersion := "2.11.0", parallelExecution in Test := false, resolvers ++= Seq(Resolver.mavenLocal, "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master", @@ -22,12 +22,12 @@ object ApplicationBuild extends Build { ) val appDependencies = Seq( - "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test", + "org.scalatest" %% "scalatest" % "2.1.4" % "test", - "com.typesafe.play" %% "play-iteratees" % "2.2.0", + "com.typesafe.play" % "play-iteratees_2.10" % "2.2.0", - "com.typesafe.akka" % "akka-actor_2.10" % "2.3.0", - "com.typesafe.akka" %% "akka-testkit" % "2.3.0" + "com.typesafe.akka" %% "akka-actor" % "2.3.2", + "com.typesafe.akka" %% "akka-testkit" % "2.3.2" ) lazy val root = Project(id = "sentinel", diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index caf3db4..9fc453a 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -3,20 +3,16 @@ package nl.gideondk.sentinel import scala.concurrent.ExecutionContext.Implicits.global import org.scalatest.WordSpec -import org.scalatest.matchers.{ Matchers, ShouldMatchers } import akka.actor._ -import akka.routing._ import scala.concurrent.duration._ import scala.concurrent._ import scala.util.Try -import play.api.libs.iteratee._ - import protocols._ -class RequestResponseSpec extends WordSpec with Matchers { +class RequestResponseSpec extends WordSpec { import SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala index 128ce10..f8db82c 100644 --- a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala @@ -13,7 +13,7 @@ import scala.concurrent.duration._ import protocols._ import akka.util.Timeout -class ServerRequestSpec extends WordSpec with ShouldMatchers { +class ServerRequestSpec extends WordSpec { import SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index c919370..1d49751 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -15,7 +15,7 @@ import play.api.libs.iteratee._ import protocols._ -class StreamingSpec extends WordSpec with ShouldMatchers { +class StreamingSpec extends WordSpec { import SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index 940c682..bef1dd9 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -1,31 +1,20 @@ package nl.gideondk.sentinel -import scala.concurrent._ -import scala.concurrent.ExecutionContext.Implicits.global - -import scala.util.Try - -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpec +import org.scalatest.{ Suite, BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.ShouldMatchers -import akka.io.{ LengthFieldFrame, PipelineContext, SymmetricPipePair, SymmetricPipelineStage } -import akka.routing.RoundRobinRouter +import akka.io.SymmetricPipelineStage import akka.util.ByteString import akka.actor._ import akka.testkit._ -import scala.concurrent.duration._ -import scala.concurrent._ import java.util.concurrent.atomic.AtomicInteger import protocols._ -import java.net.InetSocketAddress - abstract class TestKitSpec extends TestKit(ActorSystem()) - with WordSpec + with Suite with ShouldMatchers with BeforeAndAfterAll with ImplicitSender { From 4bff2040d0d65d79053e97488bcf79e2114cdf0e Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 11 May 2014 15:09:32 +0200 Subject: [PATCH 08/54] Run stream consumers in separate actor --- README.md | 2 +- project/.Build.scala.swp | Bin 12288 -> 0 bytes project/Build.scala | 2 +- src/main/resources/application.conf | 2 +- .../sentinel/processors/Consumer.scala | 222 ++++++++++++------ .../sentinel/processors/Producer.scala | 7 +- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 7 +- .../gideondk/sentinel/RequestResponse.scala | 14 ++ .../nl/gideondk/sentinel/StreamingSpec.scala | 23 +- 9 files changed, 192 insertions(+), 87 deletions(-) delete mode 100644 project/.Build.scala.swp diff --git a/README.md b/README.md index 996eb54..45097de 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.1"
+  "nl.gideondk" %% "sentinel" % "0.7.2"
 )
 
diff --git a/project/.Build.scala.swp b/project/.Build.scala.swp deleted file mode 100644 index 1ae8d44f909388666b6ebbe5e2be696b877367ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2&u<(x6vy52qZBA0I3t;%NhXC>Z&BMO3;k4fVuMg(|5%|V5C{bBfXPn0)apv5C{YUfj}S-2m}K6Issc8V((!PhpT}sRNsg8d{?*OD-Z|-0)apv z5C{YUfj}S-2m}IwKp+qZ1nxlsqQlr%4>0!QgGe6#|F8f4|NU{set^D*Zb8?fYtRPt zD%60ELf<{c*cZ^7&>PUd#~8Z_eF7~*|2)dr-_Q-{I`lr2K`%hhLw`NO*q_kX&_~dR z&<9WpIuAV!{qitl-#}kNpF^KPpF%ew18L|b=m>NlbQt>fC~Tk`5J8jBZx5j#=wqmW zo`rrrg8ZRt&>D0c`WY;Kf*cdiYbjZ0Gd?=TV@Pk&lE`~9&3lVd8Cnq-q1_;p-X^!&+oLFR zoY%d+JDqnM&*i?fDLSO4JFVAI?M0P&x26@^UdYd;+Sv1GuUL-H{v2@Uww#D5KDQ6S z(rN{;MUNJ6)Nu1)CZ+v(cO-F%x(C}XseVgR>9Htn>77+NLn$KcPNABKic6XmN?Y#FN9q&>mBsGEmu_4P z3){r&C`2i)4EUCiyfWDxlAq!4Fj$o}?&q+K_zDfoXe@sE%=ieSU0{W{lntVeB-gB; zPEPVBy*=UjtA@~4S!@Rd8PTQKv~1jL*^wkL0M&9QLV3x<#JuR>}jaafiAB zh5x^iIrA1O0H)nkqSrWTxFEwx&`Q~zI1_0HH&K25HJaL)if9)avAHQCe&Pg=+^=a% z#%`t-Pk1yJpN!`s^uhUoKB8@v9`W#f?e!_77*hot80LPkxM3)d&V&}{PYL6tGF zsmx-`P107m#lGsZ_k%*6C%fX8+3pR$S`BGe%6ArhhelOkKKUVjkA4W&II?0p?&00r zZ^aI4Z8`igoUtSm1_cQyWzu-z`En>%t}CEnfn;Kfa=-7_#s#)nDM`M3nc97~r}H%D zYp7bkuerB&ZRrd65(>!~VfSz~JnL(|>=s-_o?h`U2H&1bALfQC9s1A2r{dXR zOOD;%yNxV*7uYWV&J{d0qV4P$BfF337pVT>gUQKycbqQ@nYMjO{^&fX%b53$;pH86 R&1%2(yZU;4=QiAR_8*%=MZf?6 diff --git a/project/Build.scala b/project/Build.scala index 9a89db2..2d42f6e 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.1", + version := "0.7.2", organization := "nl.gideondk", scalaVersion := "2.11.0", parallelExecution in Test := false, diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf index 8b0979d..839d2c4 100644 --- a/src/main/resources/application.conf +++ b/src/main/resources/application.conf @@ -2,7 +2,7 @@ akka.log-dead-letters-during-shutdown = off akka.log-dead-letters = off akka { - //loglevel = DEBUG + // loglevel = DEBUG io { tcp { // trace-logging = on diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 141f60f..6b5d5e6 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -26,6 +26,8 @@ object Consumer { case object ReleaseStreamConsumer extends StreamConsumerMessage + case object TimeoutStreamConsumer extends StreamConsumerMessage + trait ConsumerData[Evt] case class ConsumerException[Evt](cause: Evt) extends Exception { @@ -34,35 +36,155 @@ object Consumer { case class DataChunk[Evt](c: Evt) extends ConsumerData[Evt] + case class StreamChunk[Evt](c: Evt) extends ConsumerData[Evt] + case class ErrorChunk[Evt](c: Evt) extends ConsumerData[Evt] case class EndOfStream[Evt]() extends ConsumerData[Evt] } -class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTimeout: Timeout = Timeout(5 seconds)) extends Actor with ActorLogging { +class StreamHandler[Cmd, Evt](streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { + import Registration._ + import Consumer._ + import ConsumerAction._ + import context.dispatcher + + context.setReceiveTimeout(streamConsumerTimeout.duration) + + var hook: Option[Promise[ConsumerData[Evt]]] = None + var buffer = Queue[ConsumerData[Evt]]() + + override def postStop() = { + hook.foreach(_.failure(new Exception("Actor quit unexpectedly"))) + } + + def receive: Receive = { + case ReleaseStreamConsumer ⇒ + context.stop(self) + sender ! () + + case AskNextChunk ⇒ + sender ! nextStreamChunk + + case chunk: ConsumerData[Evt] ⇒ + hook match { + case Some(x) ⇒ + x.success(chunk) + hook = None + case None ⇒ + buffer :+= chunk + } + + case ReceiveTimeout ⇒ { + context.stop(self) + } + } + + def nextStreamChunk = { + buffer.headOption match { + case Some(c) ⇒ + buffer = buffer.tail + Promise[ConsumerData[Evt]]().success(c) + case None ⇒ + val p = Promise[ConsumerData[Evt]]() + hook = Some(p) + p + } + } +} + +class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], + streamChunkTimeout: Timeout = Timeout(120 seconds), + streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { import Registration._ import Consumer._ import ConsumerAction._ import context.dispatcher - var hooks = Queue[Promise[ConsumerData[Evt]]]() - var buffer = Queue[Promise[ConsumerData[Evt]]]() + implicit val timeout = streamChunkTimeout - var registrations = Queue[Registration[Evt, _]]() + var replyRegistrations = Queue[ReplyRegistration[Evt]]() + var streamRegistrations = Queue[StreamReplyRegistration[Evt]]() - var runningSource: Option[Enumerator[Evt]] = None + var streamBuffer = Queue[ConsumerData[Evt]]() + + var currentRunningStream: Option[ActorRef] = None + + override def postStop() = { + replyRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) + streamRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) + } def processAction(data: Evt, action: ConsumerAction) = { + def handleConsumerData(cd: ConsumerData[Evt]) = { - hooks.headOption match { + val registration = replyRegistrations.head + replyRegistrations = replyRegistrations.tail + + registration.promise.completeWith(cd match { + case x: DataChunk[Evt] ⇒ + Future.successful(x.c) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + }) + } + + def handleStreamData(cd: ConsumerData[Evt]) = { + currentRunningStream match { case Some(x) ⇒ - x.success(cd) - hooks = hooks.tail + cd match { + case x: EndOfStream[Evt] ⇒ currentRunningStream = None + case _ ⇒ () + } + + x ! cd + case None ⇒ - buffer :+= Promise.successful(cd) + streamRegistrations.headOption match { + case Some(registration) ⇒ + val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) + currentRunningStream = Some(streamHandler) + + val worker = streamHandler + + // TODO: handle stream chunk timeout better + val resource = Enumerator.generateM[Evt] { + (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { + _ match { + case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) + case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) + case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) + } + } + } + + def dequeueStreamBuffer(): Unit = { + streamBuffer.headOption match { + case Some(x) ⇒ + streamBuffer = streamBuffer.tail + x match { + case x: EndOfStream[Evt] ⇒ + worker ! x + case x ⇒ + worker ! x + dequeueStreamBuffer() + } + case None ⇒ () + } + } + + dequeueStreamBuffer() + worker ! cd + + streamRegistrations = streamRegistrations.tail + registration.promise success resource + + case None ⇒ + streamBuffer :+= cd + } } } @@ -70,89 +192,37 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi case AcceptSignal ⇒ handleConsumerData(DataChunk(data)) case AcceptError ⇒ - handleConsumerData(ErrorChunk(data)) + currentRunningStream match { + case Some(x) ⇒ handleStreamData(ErrorChunk(data)) + case None ⇒ handleConsumerData(ErrorChunk(data)) + } case ConsumeStreamChunk ⇒ - handleConsumerData(DataChunk(data)) // Should eventually seperate data chunks and stream chunks for better socket consistency handling + handleStreamData(StreamChunk(data)) // Should eventually seperate data chunks and stream chunks for better socket consistency handling case EndStream ⇒ - handleConsumerData(EndOfStream[Evt]()) + handleStreamData(EndOfStream[Evt]()) case ConsumeChunkAndEndStream ⇒ - handleConsumerData(DataChunk(data)) - handleConsumerData(EndOfStream[Evt]()) + handleStreamData(StreamChunk(data)) + handleStreamData(EndOfStream[Evt]()) case Ignore ⇒ () } } - def popAndSetHook: Unit = { - val worker = self - val registration = registrations.head - registrations = registrations.tail - - implicit val timeout = streamChunkTimeout - - registration match { - case x: ReplyRegistration[Evt] ⇒ { - x.promise.completeWith(nextChunk.future.flatMap { - _ match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - } - }) - if (registrations.headOption.isDefined) popAndSetHook - } - - case x: StreamReplyRegistration[Evt] ⇒ { - val resource = Enumerator.generateM[Evt] { - (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { - _ match { - case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) - case x: DataChunk[Evt] ⇒ Future(Some(x.c)) - case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) - } - } - } - - runningSource = Some(resource) - x.promise success resource - } - } - } + def handleRegistrations: Receive = { + case rc: ReplyRegistration[Evt] ⇒ + replyRegistrations :+= rc - def nextChunk() = buffer.headOption match { - case Some(p) ⇒ - buffer = buffer.tail - p - case None ⇒ - val p = Promise[ConsumerData[Evt]]() - hooks :+= p - p - } + case rc: StreamReplyRegistration[Evt] ⇒ + streamRegistrations :+= rc - def handleRegistrations: Receive = { - case rc: Registration[Evt, _] ⇒ - registrations :+= rc - if (runningSource.isEmpty) popAndSetHook } var behavior: Receive = handleRegistrations orElse { - case ReleaseStreamConsumer ⇒ - runningSource = None - if (registrations.headOption.isDefined) popAndSetHook - sender ! () - - case AskNextChunk ⇒ - sender ! nextChunk() - - case x: ConsumerActionAndData[Evt] ⇒ processAction(x.data, x.action) + case x: ConsumerActionAndData[Evt] ⇒ + processAction(x.data, x.action) } - override def postStop() = { - hooks.foreach(_.failure(new Exception("Actor quit unexpectedly"))) - } - def receive = behavior } diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala index bac5d39..33a7ce7 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala @@ -75,9 +75,9 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi case x: ProduceStream[Evt, Cmd] ⇒ initStreamProducer(data, x.f) case x: ConsumeStream[Evt, Cmd] ⇒ - val imcomingStreamPromise = Promise[Enumerator[Evt]]() - context.parent ! Registration.StreamReplyRegistration(imcomingStreamPromise) - imcomingStreamPromise.future flatMap ((s) ⇒ initStreamConsumer(data, x.f(_)(s))) + val incomingStreamPromise = Promise[Enumerator[Evt]]() + context.parent ! Registration.StreamReplyRegistration(incomingStreamPromise) + incomingStreamPromise.future flatMap ((s) ⇒ initStreamConsumer(data, x.f(_)(s))) } future.onFailure { @@ -118,6 +118,7 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi case x: HandleAsyncResult[Cmd] ⇒ context.parent ! Reply.Response(x.response) case x: HandleStreamResult[Cmd] ⇒ val worker = self + // TODO: What to do when producing Enumerator times out, send error, close stream and continue producing? implicit val timeout = streamChunkTimeout (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index dc0cf7c..a6055b0 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -47,13 +47,12 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) - Thread.sleep(500) val c = client(portNumber) val secC = client(portNumber) - Thread.sleep(500) + Thread.sleep(1000) - val numberOfRequests = 100 + val numberOfRequests = 10 val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) @@ -61,6 +60,8 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { val combined = Future.sequence(List(actions, serverActions.map(_.flatten), secActions)) + val aa = Await.result(actions, 5 seconds) + val results = Await.result(combined, 5 seconds) results(0).length should equal(numberOfRequests) diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 9fc453a..8ec3897 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -66,6 +66,20 @@ class RequestResponseSpec extends WordSpec { result.map(_.payload) should equal(items) } + // "test a" in new TestKitSpec { + // val portNumber = TestHelpers.portNumber.getAndIncrement() + // val s = server(portNumber) + // val c = client(portNumber) + // + // val numberOfRequests = 90 * 1000 + // + // val items = List.range(0, numberOfRequests).map(_.toString) + // val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) + // val result = Await.result(action, 5 seconds) + // + // result.map(_.payload) should equal(items) + // } + "should automatically reconnect" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index 1d49751..b063e17 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -21,7 +21,7 @@ class StreamingSpec extends WordSpec { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 2, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -93,6 +93,25 @@ class StreamingSpec extends WordSpec { result.isSuccess should equal(true) } + "be able to handle slow or idle consumers while retrieving streams from a server" in new TestKitSpec { + val portNumber = TestHelpers.portNumber.getAndIncrement() + val s = server(portNumber) + val c = client(portNumber) + + val count = 500 + val numberOfActions = 8 + + val newAct = for { + takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) |>>> Iteratee.getChunks) + takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) &> Enumeratee.map(x ⇒ throw new Exception("")) |>>> Iteratee.getChunks).recover { case e ⇒ () } + act ← c ? SimpleCommand(PING_PONG, "") + } yield act + + val result = Try(Await.result(newAct, 5 seconds)) + + result.isSuccess should equal(true) + } + "be able to receive send streams simultaneously to a server" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) @@ -102,7 +121,7 @@ class StreamingSpec extends WordSpec { val chunks = List.fill(count)(SimpleStreamChunk("ABCDEF")) ++ List(SimpleStreamChunk("")) val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) - val numberOfActions = 8 + val numberOfActions = 2 val actions = Future.sequence(List.fill(numberOfActions)(c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)))) val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) From ac349aeabfff7967afad4b5d6d3bd5fea1f89775 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 11 May 2014 15:31:28 +0200 Subject: [PATCH 09/54] Stop producing actor when enumeration fails --- .../scala/nl/gideondk/sentinel/processors/Consumer.scala | 2 +- .../scala/nl/gideondk/sentinel/processors/Producer.scala | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 6b5d5e6..7cd00d7 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -95,7 +95,7 @@ class StreamHandler[Cmd, Evt](streamConsumerTimeout: Timeout = Timeout(10 second } } -class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], +class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTimeout: Timeout = Timeout(120 seconds), streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { import Registration._ diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala index 33a7ce7..aba73ec 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala @@ -121,7 +121,12 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi // TODO: What to do when producing Enumerator times out, send error, close stream and continue producing? implicit val timeout = streamChunkTimeout - (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) + val consumer = (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) + consumer.onFailure { + case e ⇒ + log.error(e, e.getMessage) + context.stop(self) + } context.become(handleRequestAndStreamResponse) From ff64409857fd902cbba0b5ea49236ed37f18cb39 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 11 May 2014 21:07:43 +0200 Subject: [PATCH 10/54] Minor bugfixes --- README.md | 2 +- project/Build.scala | 2 +- src/main/resources/application.conf | 4 +-- src/main/scala/akka/io/Pipelines.scala | 9 ++++-- .../scala/nl/gideondk/sentinel/Client.scala | 13 +++++--- .../sentinel/processors/Consumer.scala | 2 +- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 3 +- .../gideondk/sentinel/RequestResponse.scala | 31 ++++++------------- .../nl/gideondk/sentinel/StreamingSpec.scala | 3 ++ .../nl/gideondk/sentinel/TestHelpers.scala | 4 +-- .../sentinel/protocols/SimpleMessage.scala | 2 +- 11 files changed, 37 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 45097de..e4bd8bb 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.2"
+  "nl.gideondk" %% "sentinel" % "0.7.3"
 )
 
diff --git a/project/Build.scala b/project/Build.scala index 2d42f6e..2120a43 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.2", + version := "0.7.3", organization := "nl.gideondk", scalaVersion := "2.11.0", parallelExecution in Test := false, diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf index 839d2c4..21480cd 100644 --- a/src/main/resources/application.conf +++ b/src/main/resources/application.conf @@ -2,10 +2,10 @@ akka.log-dead-letters-during-shutdown = off akka.log-dead-letters = off akka { - // loglevel = DEBUG + //loglevel = DEBUG io { tcp { -// trace-logging = on + // trace-logging = on } } } \ No newline at end of file diff --git a/src/main/scala/akka/io/Pipelines.scala b/src/main/scala/akka/io/Pipelines.scala index eace52a..54144aa 100644 --- a/src/main/scala/akka/io/Pipelines.scala +++ b/src/main/scala/akka/io/Pipelines.scala @@ -946,9 +946,12 @@ class LengthFieldFrame(maxSize: Int, */ override def commandPipeline = { bs: ByteString ⇒ - val length = - if (lengthIncludesHeader) bs.length + headerSize else bs.length - if (length > maxSize) Seq() + val length = if (lengthIncludesHeader) bs.length + headerSize else bs.length + + if (length < 0 || length > maxSize) + throw new IllegalArgumentException( + s"received too large frame of size $length (max = $maxSize)") + else { val bb = ByteString.newBuilder bb.putLongPart(length, headerSize) diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala index a2453ad..326a210 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -60,7 +60,7 @@ object Client { def apply[Cmd, Evt](serverHost: String, serverPort: Int, routerConfig: RouterConfig, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver)(lowBytes, highBytes, maxBufferSize)), name = "sentinel-client-" + java.util.UUID.randomUUID.toString) + val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString) core ! Client.ConnectToServer(new InetSocketAddress(serverHost, serverPort)) new Client[Cmd, Evt] { val actor = core @@ -120,7 +120,7 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip } class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reconnectDuration: FiniteDuration, - stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging { + stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash { import context.dispatcher @@ -130,6 +130,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco private case class ReconnectRouter(address: InetSocketAddress) var coreRouter: Option[ActorRef] = None + var reconnecting = false def antennaManagerProto(address: InetSocketAddress) = new ClientAntennaManager(address, stages, Resolver)(lowBytes, highBytes, maxBufferSize) @@ -149,6 +150,8 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco context.watch(router) addresses = addresses ++ List(x.addr -> Some(router)) coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + reconnecting = false + unstashAll() } else { log.debug("Client is already connected to: " + x.addr) } @@ -161,15 +164,15 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco addresses = addresses diff addresses.find(_._2 == Some(actor)).toList coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) + reconnecting = true context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) case None ⇒ } case x: Command[Cmd, Evt] ⇒ coreRouter match { - case Some(r) ⇒ - r forward x - case None ⇒ x.registration.promise.failure(new Exception("No connection(s) available")) + case Some(r) ⇒ if (reconnecting) stash() else r forward x + case None ⇒ x.registration.promise.failure(new Exception("No connection(s) available")) } case _ ⇒ diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 7cd00d7..05439f0 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -198,7 +198,7 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], } case ConsumeStreamChunk ⇒ - handleStreamData(StreamChunk(data)) // Should eventually seperate data chunks and stream chunks for better socket consistency handling + handleStreamData(StreamChunk(data)) case EndStream ⇒ handleStreamData(EndOfStream[Evt]()) case ConsumeChunkAndEndStream ⇒ diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index a6055b0..794dd81 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -47,10 +47,11 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) + Thread.sleep(500) val c = client(portNumber) val secC = client(portNumber) - Thread.sleep(1000) + Thread.sleep(500) val numberOfRequests = 10 diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 8ec3897..c807c43 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -18,7 +18,7 @@ class RequestResponseSpec extends WordSpec { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 64, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -42,10 +42,11 @@ class RequestResponseSpec extends WordSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) val c = client(portNumber) + Thread.sleep(100) - val numberOfRequests = 20 * 1000 + val numberOfRequests = 1000 - val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) + val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(ECHO, LargerPayloadTestHelper.randomBSForSize(1024 * 10)))) val result = Try(Await.result(action, 5 seconds)) result.get.length should equal(numberOfRequests) @@ -57,7 +58,7 @@ class RequestResponseSpec extends WordSpec { val s = server(portNumber) val c = client(portNumber) - val numberOfRequests = 90 * 1000 + val numberOfRequests = 20 * 1000 val items = List.range(0, numberOfRequests).map(_.toString) val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) @@ -66,20 +67,6 @@ class RequestResponseSpec extends WordSpec { result.map(_.payload) should equal(items) } - // "test a" in new TestKitSpec { - // val portNumber = TestHelpers.portNumber.getAndIncrement() - // val s = server(portNumber) - // val c = client(portNumber) - // - // val numberOfRequests = 90 * 1000 - // - // val items = List.range(0, numberOfRequests).map(_.toString) - // val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) - // val result = Await.result(action, 5 seconds) - // - // result.map(_.payload) should equal(items) - // } - "should automatically reconnect" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) @@ -92,11 +79,13 @@ class RequestResponseSpec extends WordSpec { result.isSuccess should equal(true) system.stop(s.actor) - Thread.sleep(1000) - val ss = server(portNumber) + Thread.sleep(250) val secAction = c ? SimpleCommand(PING_PONG, "") - val endResult = Try(Await.result(secAction, 5 seconds)) + val ss = server(portNumber) + + Thread.sleep(250) + val endResult = Try(Await.result(secAction, 10 seconds)) endResult.isSuccess should equal(true) } diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index b063e17..9c49763 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -105,6 +105,9 @@ class StreamingSpec extends WordSpec { takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) |>>> Iteratee.getChunks) takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) &> Enumeratee.map(x ⇒ throw new Exception("")) |>>> Iteratee.getChunks).recover { case e ⇒ () } act ← c ? SimpleCommand(PING_PONG, "") + act ← c ? SimpleCommand(PING_PONG, "") + takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks) + act ← c ? SimpleCommand(PING_PONG, "") } yield act val result = Try(Await.result(newAct, 5 seconds)) diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index bef1dd9..caf2598 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -13,7 +13,7 @@ import java.util.concurrent.atomic.AtomicInteger import protocols._ -abstract class TestKitSpec extends TestKit(ActorSystem()) +abstract class TestKitSpec extends TestKit(ActorSystem(java.util.UUID.randomUUID.toString)) with Suite with ShouldMatchers with BeforeAndAfterAll @@ -56,6 +56,6 @@ object LargerPayloadTestHelper { while (stringB.length() + paddingString.length() < size) stringB.append(paddingString) - ByteString(stringB.toString().getBytes()) + stringB.toString() } } diff --git a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala index aa62590..f041d8c 100644 --- a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala +++ b/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala @@ -66,7 +66,7 @@ class PingPongMessageStage extends SymmetricPipelineStage[PipelineContext, Simpl } object SimpleMessage { - val stages = new PingPongMessageStage >> new LengthFieldFrame(1000) + val stages = new PingPongMessageStage >> new LengthFieldFrame(1024 * 1024) val PING_PONG = 1 val TOTAL_CHUNK_SIZE = 2 From 35e88908d6c1709cffedf1f89fd064ae3891e9ab Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 16 May 2014 16:01:41 +0300 Subject: [PATCH 11/54] Fix incorrect error handling during stream processing --- project/Build.scala | 2 +- .../sentinel/processors/Consumer.scala | 114 ++++++++++-------- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 4 +- 3 files changed, 68 insertions(+), 52 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 2120a43..ae094a1 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.3", + version := "0.7.4", organization := "nl.gideondk", scalaVersion := "2.11.0", parallelExecution in Test := false, diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 05439f0..c336996 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -10,6 +10,7 @@ import akka.pattern.ask import akka.util.Timeout import play.api.libs.iteratee._ + import nl.gideondk.sentinel._ object Consumer { @@ -106,30 +107,38 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], implicit val timeout = streamChunkTimeout - var replyRegistrations = Queue[ReplyRegistration[Evt]]() - var streamRegistrations = Queue[StreamReplyRegistration[Evt]]() + var registrations = Queue[Registration[Evt, _]]() var streamBuffer = Queue[ConsumerData[Evt]]() var currentRunningStream: Option[ActorRef] = None override def postStop() = { - replyRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) - streamRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) + registrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) } def processAction(data: Evt, action: ConsumerAction) = { - def handleConsumerData(cd: ConsumerData[Evt]) = { - val registration = replyRegistrations.head - replyRegistrations = replyRegistrations.tail - - registration.promise.completeWith(cd match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - }) + val registration = registrations.head + registrations = registrations.tail + + registration match { + case r: ReplyRegistration[_] ⇒ + r.promise.completeWith(cd match { + case x: DataChunk[Evt] ⇒ + Future.successful(x.c) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + }) + + case r: StreamReplyRegistration[_] ⇒ + r.promise.completeWith(cd match { + case x: DataChunk[Evt] ⇒ + Future.failed(new Exception("Unexpectedly received a normal chunk instead of stream chunk")) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + }) + } } def handleStreamData(cd: ConsumerData[Evt]) = { @@ -143,44 +152,51 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], x ! cd case None ⇒ - streamRegistrations.headOption match { + registrations.headOption match { case Some(registration) ⇒ - val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) - currentRunningStream = Some(streamHandler) - - val worker = streamHandler - - // TODO: handle stream chunk timeout better - val resource = Enumerator.generateM[Evt] { - (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { - _ match { - case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) - case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) - case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) + registration match { + case r: ReplyRegistration[_] ⇒ + throw new Exception("Unexpectedly received a stream chunk instead of normal reply") // TODO: use specific exception classes + case r: StreamReplyRegistration[_] ⇒ { + val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) + currentRunningStream = Some(streamHandler) + + val worker = streamHandler + + // TODO: handle stream chunk timeout better + val resource = Enumerator.generateM[Evt] { + (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { + _ match { + case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) + case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) + case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) + } + } } - } - } - def dequeueStreamBuffer(): Unit = { - streamBuffer.headOption match { - case Some(x) ⇒ - streamBuffer = streamBuffer.tail - x match { - case x: EndOfStream[Evt] ⇒ - worker ! x - case x ⇒ - worker ! x - dequeueStreamBuffer() + def dequeueStreamBuffer(): Unit = { + streamBuffer.headOption match { + case Some(x) ⇒ + streamBuffer = streamBuffer.tail + x match { + case x: EndOfStream[Evt] ⇒ + worker ! x + case x ⇒ + worker ! x + dequeueStreamBuffer() + } + case None ⇒ () } - case None ⇒ () - } - } + } + + dequeueStreamBuffer() + worker ! cd - dequeueStreamBuffer() - worker ! cd + registrations = registrations.tail + r.promise success resource + } - streamRegistrations = streamRegistrations.tail - registration.promise success resource + } case None ⇒ streamBuffer :+= cd @@ -211,10 +227,10 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], def handleRegistrations: Receive = { case rc: ReplyRegistration[Evt] ⇒ - replyRegistrations :+= rc + registrations :+= rc case rc: StreamReplyRegistration[Evt] ⇒ - streamRegistrations :+= rc + registrations :+= rc } @@ -225,4 +241,4 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], } def receive = behavior -} +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index 794dd81..e044d20 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -47,11 +47,11 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) - Thread.sleep(500) + Thread.sleep(1000) val c = client(portNumber) val secC = client(portNumber) - Thread.sleep(500) + Thread.sleep(1000) val numberOfRequests = 10 From c88f3874811ecbbf7465da5ccd31d8c3a82626ed Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 26 May 2014 13:04:00 +0200 Subject: [PATCH 12/54] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e4bd8bb..dd86e97 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.3"
+  "nl.gideondk" %% "sentinel" % "0.7.4"
 )
 
From 1832fc6af181714a629a5c1e862712274ea57bf2 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 26 May 2014 13:06:54 +0200 Subject: [PATCH 13/54] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd86e97..472f8e0 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ You can install Sentinel through source (by publishing it into your local Ivy re Or by adding the repo:
"gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master"
-to your SBT configuration and adding the `SNAPSHOT` to your library dependencies: +to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
libraryDependencies ++= Seq(
   "nl.gideondk" %% "sentinel" % "0.7.4"

From 4d7bd64503311db57e99b8a526830a42e97d1677 Mon Sep 17 00:00:00 2001
From: Gideon de Kok 
Date: Mon, 7 Jul 2014 12:13:15 +0200
Subject: [PATCH 14/54] Add non-pipelined functionality

---
 project/Build.scala                           | 10 ++--
 .../scala/nl/gideondk/sentinel/Antenna.scala  | 53 +++++++++++++++----
 .../scala/nl/gideondk/sentinel/Client.scala   | 20 +++----
 .../nl/gideondk/sentinel/StreamingSpec.scala  | 19 +++++++
 4 files changed, 77 insertions(+), 25 deletions(-)

diff --git a/project/Build.scala b/project/Build.scala
index ae094a1..0f650d9 100755
--- a/project/Build.scala
+++ b/project/Build.scala
@@ -5,9 +5,9 @@ object ApplicationBuild extends Build {
   override lazy val settings = super.settings ++
     Seq(
       name := "sentinel",
-      version := "0.7.4",
+      version := "0.7.5",
       organization := "nl.gideondk",
-      scalaVersion := "2.11.0",
+      scalaVersion := "2.11.1",
       parallelExecution in Test := false,
       resolvers ++= Seq(Resolver.mavenLocal,
         "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master",
@@ -24,10 +24,10 @@ object ApplicationBuild extends Build {
   val appDependencies = Seq(
     "org.scalatest" %% "scalatest" % "2.1.4" % "test",
 
-    "com.typesafe.play" % "play-iteratees_2.10" % "2.2.0",
+    "com.typesafe.play" % "play-iteratees_2.10" % "2.3.0",
 
-    "com.typesafe.akka" %% "akka-actor" % "2.3.2",
-    "com.typesafe.akka" %% "akka-testkit" % "2.3.2"
+    "com.typesafe.akka" %% "akka-actor" % "2.3.4",
+    "com.typesafe.akka" %% "akka-testkit" % "2.3.4"
   )
 
   lazy val root = Project(id = "sentinel",
diff --git a/src/main/scala/nl/gideondk/sentinel/Antenna.scala b/src/main/scala/nl/gideondk/sentinel/Antenna.scala
index 772f40b..cc6bec6 100644
--- a/src/main/scala/nl/gideondk/sentinel/Antenna.scala
+++ b/src/main/scala/nl/gideondk/sentinel/Antenna.scala
@@ -1,15 +1,14 @@
 package nl.gideondk.sentinel
 
-import scala.concurrent.Future
-
 import akka.actor._
-
-import akka.io._
 import akka.io.TcpPipelineHandler.{ Init, WithinActorContext }
+import akka.io._
+import nl.gideondk.sentinel.processors._
+import scala.collection.immutable.Queue
 
-import processors._
+import scala.concurrent.Future
 
-class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Resolver[Evt, Cmd]) extends Actor with ActorLogging with Stash {
+class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true) extends Actor with ActorLogging with Stash {
 
   import context.dispatcher
 
@@ -17,6 +16,9 @@ class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Reso
     val consumer = context.actorOf(Props(new Consumer(init)), name = "resolver")
     val producer = context.actorOf(Props(new Producer(init)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "producer")
 
+    var commandQueue = Queue.empty[init.Command]
+    var commandInProcess = false
+
     context watch tcpHandler
     context watch producer
     context watch consumer
@@ -33,14 +35,38 @@ class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Reso
           stash()
       }
 
+      def popCommand() = if (!commandQueue.isEmpty) {
+        val cmd = commandQueue.head
+        commandQueue = commandQueue.tail
+        tcpHandler ! cmd
+      } else {
+        commandInProcess = false
+      }
+
       def handleCommands: Receive = {
         case x: Command.Ask[Cmd, Evt] ⇒
           consumer ! x.registration
-          tcpHandler ! init.Command(x.payload)
+
+          val cmd = init.Command(x.payload)
+          if (allowPipelining) tcpHandler ! cmd
+          else if (commandInProcess) {
+            commandQueue :+= cmd
+          } else {
+            commandInProcess = true
+            tcpHandler ! cmd
+          }
 
         case x: Command.AskStream[Cmd, Evt] ⇒
           consumer ! x.registration
-          tcpHandler ! init.Command(x.payload)
+
+          val cmd = init.Command(x.payload)
+          if (allowPipelining) tcpHandler ! cmd
+          else if (commandInProcess) {
+            commandQueue :+= cmd
+          } else {
+            commandInProcess = true
+            tcpHandler ! cmd
+          }
 
         case x: Command.SendStream[Cmd, Evt] ⇒
           consumer ! x.registration
@@ -60,10 +86,17 @@ class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Reso
         consumer ! x
 
       case init.Event(data) ⇒ {
-        Resolver.process(data) match {
+        resolver.process(data) match {
           case x: ProducerAction[Evt, Cmd] ⇒ producer ! ProducerActionAndData[Evt, Cmd](x, data)
-          case x: ConsumerAction           ⇒ consumer ! ConsumerActionAndData[Evt](x, data)
+
+          case ConsumerAction.ConsumeStreamChunk ⇒
+            consumer ! ConsumerActionAndData[Evt](ConsumerAction.ConsumeStreamChunk, data)
+
+          case x: ConsumerAction ⇒
+            consumer ! ConsumerActionAndData[Evt](x, data)
+            if (!allowPipelining) popCommand()
         }
+
       }
 
       case BackpressureBuffer.HighWatermarkReached ⇒ {
diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala
index 326a210..7b6f5b2 100644
--- a/src/main/scala/nl/gideondk/sentinel/Client.scala
+++ b/src/main/scala/nl/gideondk/sentinel/Client.scala
@@ -59,24 +59,24 @@ object Client {
   }
 
   def apply[Cmd, Evt](serverHost: String, serverPort: Int, routerConfig: RouterConfig,
-                      description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
-    val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString)
+                      description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
+    val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString)
     core ! Client.ConnectToServer(new InetSocketAddress(serverHost, serverPort))
     new Client[Cmd, Evt] {
       val actor = core
     }
   }
 
-  def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
-    apply(serverHost, serverPort, RandomRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, lowBytes, highBytes, maxBufferSize)
+  def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
+    apply(serverHost, serverPort, RandomRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize)
   }
 
-  def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
-    apply(serverHost, serverPort, RoundRobinRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, lowBytes, highBytes, maxBufferSize)
+  def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
+    apply(serverHost, serverPort, RoundRobinRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize)
   }
 }
 
-class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd])(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
+class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true)(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
   val tcp = akka.io.IO(Tcp)(context.system)
 
   override def preStart = tcp ! Tcp.Connect(address)
@@ -97,7 +97,7 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip
           new TcpReadWriteAdapter >>
           new BackpressureBuffer(lowBytes, highBytes, maxBufferSize))
 
-      val antenna = context.actorOf(Props(new Antenna(init, Resolver)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"))
+      val antenna = context.actorOf(Props(new Antenna(init, resolver, allowPipelining)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"))
       val handler = context.actorOf(TcpPipelineHandler.props(init, sender, antenna).withDeploy(Deploy.local))
       context watch handler
 
@@ -120,7 +120,7 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip
 }
 
 class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reconnectDuration: FiniteDuration,
-                           stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
+                           stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true, workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
 
   import context.dispatcher
 
@@ -133,7 +133,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco
   var reconnecting = false
 
   def antennaManagerProto(address: InetSocketAddress) =
-    new ClientAntennaManager(address, stages, Resolver)(lowBytes, highBytes, maxBufferSize)
+    new ClientAntennaManager(address, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize)
 
   def routerProto(address: InetSocketAddress) =
     context.actorOf(Props(antennaManagerProto(address)).withRouter(routerConfig).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"))
diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala
index 9c49763..0f08092 100644
--- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala
+++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala
@@ -22,6 +22,7 @@ class StreamingSpec extends WordSpec {
   implicit val duration = Duration(5, SECONDS)
 
   def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system)
+  def nonPipelinedClient(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler, false)(system)
 
   def server(portNumber: Int)(implicit system: ActorSystem) = {
     val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system)
@@ -93,6 +94,24 @@ class StreamingSpec extends WordSpec {
       result.isSuccess should equal(true)
     }
 
+    "be able to receive multiple streams and normal commands simultaneously from a server in a non-pipelined environment" in new TestKitSpec {
+      val portNumber = TestHelpers.portNumber.getAndIncrement()
+      val s = server(portNumber)
+      val c = nonPipelinedClient(portNumber)
+
+      val count = 500
+      val numberOfActions = 8
+
+      val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks)))
+      val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, "")))
+
+      val actions = Future.sequence(List(streamAction, action))
+
+      val result = Try(Await.result(actions.map(_.flatten), 5 seconds))
+
+      result.isSuccess should equal(true)
+    }
+
     "be able to handle slow or idle consumers while retrieving streams from a server" in new TestKitSpec {
       val portNumber = TestHelpers.portNumber.getAndIncrement()
       val s = server(portNumber)

From ee098a6c5ad4fccbd7c74762ac27209321308edb Mon Sep 17 00:00:00 2001
From: Gideon de Kok 
Date: Mon, 7 Jul 2014 12:14:29 +0200
Subject: [PATCH 15/54] Update README

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 472f8e0..429ed77 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ Or by adding the repo:
 to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
 
 
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.4"
+  "nl.gideondk" %% "sentinel" % "0.7.5"
 )
 
From 3da3c680a24dc5276ac0977c0c8ccb0c2f175510 Mon Sep 17 00:00:00 2001 From: crispy Date: Sun, 13 Jul 2014 21:53:41 -0700 Subject: [PATCH 16/54] Use play-iteratees for 2.11 and update to current release. move akka-testkit to test config. update scalatest to current. finally bump version ever so slightly. --- project/Build.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 0f650d9..3afd4c0 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.5", + version := "0.7.5.1", organization := "nl.gideondk", scalaVersion := "2.11.1", parallelExecution in Test := false, @@ -22,12 +22,12 @@ object ApplicationBuild extends Build { ) val appDependencies = Seq( - "org.scalatest" %% "scalatest" % "2.1.4" % "test", + "org.scalatest" %% "scalatest" % "2.2.0" % "test", - "com.typesafe.play" % "play-iteratees_2.10" % "2.3.0", + "com.typesafe.play" %% "play-iteratees" % "2.3.1", "com.typesafe.akka" %% "akka-actor" % "2.3.4", - "com.typesafe.akka" %% "akka-testkit" % "2.3.4" + "com.typesafe.akka" %% "akka-testkit" % "2.3.4" % "test" ) lazy val root = Project(id = "sentinel", From dcb8f36a31307730919bec45b1d343513a2aea6e Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 14 Jul 2014 08:08:16 +0200 Subject: [PATCH 17/54] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 429ed77..26a63d6 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.5"
+  "nl.gideondk" %% "sentinel" % "0.7.5.1"
 )
 
From ecea9d8745f53c7780e86e6364be100fa1a18d39 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Thu, 30 Jun 2016 14:27:15 +0200 Subject: [PATCH 18/54] Update to newest Scala, Akka versions --- project/Build.scala | 8 ++++---- project/build.properties | 2 +- src/main/scala/nl/gideondk/sentinel/Client.scala | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 3afd4c0..8ea4602 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,9 +5,9 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.5.1", + version := "0.8-SNAPSHOT", organization := "nl.gideondk", - scalaVersion := "2.11.1", + scalaVersion := "2.11.8", parallelExecution in Test := false, resolvers ++= Seq(Resolver.mavenLocal, "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master", @@ -26,8 +26,8 @@ object ApplicationBuild extends Build { "com.typesafe.play" %% "play-iteratees" % "2.3.1", - "com.typesafe.akka" %% "akka-actor" % "2.3.4", - "com.typesafe.akka" %% "akka-testkit" % "2.3.4" % "test" + "com.typesafe.akka" %% "akka-actor" % "2.4.6", + "com.typesafe.akka" %% "akka-testkit" % "2.4.6" % "test" ) lazy val root = Project(id = "sentinel", diff --git a/project/build.properties b/project/build.properties index 37b489c..43b8278 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.1 +sbt.version=0.13.11 diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala index 7b6f5b2..5328ee3 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -68,11 +68,11 @@ object Client { } def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RandomRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) + apply(serverHost, serverPort, RandomPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) } def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RoundRobinRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) + apply(serverHost, serverPort, RoundRobinPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) } } @@ -149,7 +149,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco val router = routerProto(x.addr) context.watch(router) addresses = addresses ++ List(x.addr -> Some(router)) - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) reconnecting = false unstashAll() } else { @@ -162,7 +162,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco terminatedRouter match { case Some(r) ⇒ addresses = addresses diff addresses.find(_._2 == Some(actor)).toList - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) reconnecting = true context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) From b51f790f2cf72e4ea50ed5de3a483f7cbe9e02e6 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 29 Apr 2014 08:40:10 +0200 Subject: [PATCH 19/54] Fix a problem which could result in idling consumers when both streaming requests as normal requests were active. --- README.md | 2 +- project/Build.scala | 2 +- .../sentinel/processors/Consumer.scala | 52 ++++++++++--------- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 7 ++- .../gideondk/sentinel/RequestResponse.scala | 2 +- .../nl/gideondk/sentinel/StreamingSpec.scala | 18 +++++++ 6 files changed, 54 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index ea8d563..996eb54 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.0"
+  "nl.gideondk" %% "sentinel" % "0.7.1"
 )
 
diff --git a/project/Build.scala b/project/Build.scala index a8b7c86..9f5ef40 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.0", + version := "0.7.1", organization := "nl.gideondk", scalaVersion := "2.10.2", parallelExecution in Test := false, diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index fe4fbdc..141f60f 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -10,7 +10,6 @@ import akka.pattern.ask import akka.util.Timeout import play.api.libs.iteratee._ - import nl.gideondk.sentinel._ object Consumer { @@ -53,7 +52,6 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi var buffer = Queue[Promise[ConsumerData[Evt]]]() var registrations = Queue[Registration[Evt, _]]() - var currentPromise: Option[Promise[Evt]] = None var runningSource: Option[Enumerator[Evt]] = None @@ -86,7 +84,7 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi } } - def popAndSetHook = { + def popAndSetHook: Unit = { val worker = self val registration = registrations.head registrations = registrations.tail @@ -94,16 +92,20 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi implicit val timeout = streamChunkTimeout registration match { - case x: ReplyRegistration[Evt] ⇒ x.promise.completeWith((self ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future.flatMap { - _ match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - } - })) - case x: StreamReplyRegistration[Evt] ⇒ - val resource = Enumerator.generateM { + case x: ReplyRegistration[Evt] ⇒ { + x.promise.completeWith(nextChunk.future.flatMap { + _ match { + case x: DataChunk[Evt] ⇒ + Future.successful(x.c) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + } + }) + if (registrations.headOption.isDefined) popAndSetHook + } + + case x: StreamReplyRegistration[Evt] ⇒ { + val resource = Enumerator.generateM[Evt] { (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { _ match { case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) @@ -115,13 +117,24 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi runningSource = Some(resource) x.promise success resource + } } } + def nextChunk() = buffer.headOption match { + case Some(p) ⇒ + buffer = buffer.tail + p + case None ⇒ + val p = Promise[ConsumerData[Evt]]() + hooks :+= p + p + } + def handleRegistrations: Receive = { case rc: Registration[Evt, _] ⇒ registrations :+= rc - if (runningSource.isEmpty && currentPromise.isEmpty) popAndSetHook + if (runningSource.isEmpty) popAndSetHook } var behavior: Receive = handleRegistrations orElse { @@ -131,16 +144,7 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi sender ! () case AskNextChunk ⇒ - val promise = buffer.headOption match { - case Some(p) ⇒ - buffer = buffer.tail - p - case None ⇒ - val p = Promise[ConsumerData[Evt]]() - hooks :+= p - p - } - sender ! promise + sender ! nextChunk() case x: ConsumerActionAndData[Evt] ⇒ processAction(x.data, x.action) diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index 9be71b6..dc0cf7c 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -19,7 +19,7 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { implicit val duration = Duration(25, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -47,10 +47,13 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) + Thread.sleep(500) + val c = client(portNumber) val secC = client(portNumber) + Thread.sleep(500) - val numberOfRequests = 1000 + val numberOfRequests = 100 val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 69b19bd..caf3db4 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -22,7 +22,7 @@ class RequestResponseSpec extends WordSpec with Matchers { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 320, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 64, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index 3ba2fd7..c919370 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -75,6 +75,24 @@ class StreamingSpec extends WordSpec with ShouldMatchers { result.length should equal(count * numberOfActions) } + "be able to receive multiple streams and normal commands simultaneously from a server" in new TestKitSpec { + val portNumber = TestHelpers.portNumber.getAndIncrement() + val s = server(portNumber) + val c = client(portNumber) + + val count = 500 + val numberOfActions = 8 + + val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) + val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, ""))) + + val actions = Future.sequence(List(streamAction, action)) + + val result = Try(Await.result(actions.map(_.flatten), 5 seconds)) + + result.isSuccess should equal(true) + } + "be able to receive send streams simultaneously to a server" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) From e5963981709b09d1fe041d848fd38c8e00ad8d08 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 29 Apr 2014 12:13:48 +0200 Subject: [PATCH 20/54] Update Scala version to 2.11 --- project/.Build.scala.swp | Bin 0 -> 12288 bytes project/Build.scala | 10 +++++----- .../nl/gideondk/sentinel/RequestResponse.scala | 6 +----- .../gideondk/sentinel/ServerRequestSpec.scala | 2 +- .../nl/gideondk/sentinel/StreamingSpec.scala | 2 +- .../nl/gideondk/sentinel/TestHelpers.scala | 17 +++-------------- 6 files changed, 11 insertions(+), 26 deletions(-) create mode 100644 project/.Build.scala.swp diff --git a/project/.Build.scala.swp b/project/.Build.scala.swp new file mode 100644 index 0000000000000000000000000000000000000000..1ae8d44f909388666b6ebbe5e2be696b877367ec GIT binary patch literal 12288 zcmeI2&u<(x6vy52qZBA0I3t;%NhXC>Z&BMO3;k4fVuMg(|5%|V5C{bBfXPn0)apv5C{YUfj}S-2m}K6Issc8V((!PhpT}sRNsg8d{?*OD-Z|-0)apv z5C{YUfj}S-2m}IwKp+qZ1nxlsqQlr%4>0!QgGe6#|F8f4|NU{set^D*Zb8?fYtRPt zD%60ELf<{c*cZ^7&>PUd#~8Z_eF7~*|2)dr-_Q-{I`lr2K`%hhLw`NO*q_kX&_~dR z&<9WpIuAV!{qitl-#}kNpF^KPpF%ew18L|b=m>NlbQt>fC~Tk`5J8jBZx5j#=wqmW zo`rrrg8ZRt&>D0c`WY;Kf*cdiYbjZ0Gd?=TV@Pk&lE`~9&3lVd8Cnq-q1_;p-X^!&+oLFR zoY%d+JDqnM&*i?fDLSO4JFVAI?M0P&x26@^UdYd;+Sv1GuUL-H{v2@Uww#D5KDQ6S z(rN{;MUNJ6)Nu1)CZ+v(cO-F%x(C}XseVgR>9Htn>77+NLn$KcPNABKic6XmN?Y#FN9q&>mBsGEmu_4P z3){r&C`2i)4EUCiyfWDxlAq!4Fj$o}?&q+K_zDfoXe@sE%=ieSU0{W{lntVeB-gB; zPEPVBy*=UjtA@~4S!@Rd8PTQKv~1jL*^wkL0M&9QLV3x<#JuR>}jaafiAB zh5x^iIrA1O0H)nkqSrWTxFEwx&`Q~zI1_0HH&K25HJaL)if9)avAHQCe&Pg=+^=a% z#%`t-Pk1yJpN!`s^uhUoKB8@v9`W#f?e!_77*hot80LPkxM3)d&V&}{PYL6tGF zsmx-`P107m#lGsZ_k%*6C%fX8+3pR$S`BGe%6ArhhelOkKKUVjkA4W&II?0p?&00r zZ^aI4Z8`igoUtSm1_cQyWzu-z`En>%t}CEnfn;Kfa=-7_#s#)nDM`M3nc97~r}H%D zYp7bkuerB&ZRrd65(>!~VfSz~JnL(|>=s-_o?h`U2H&1bALfQC9s1A2r{dXR zOOD;%yNxV*7uYWV&J{d0qV4P$BfF337pVT>gUQKycbqQ@nYMjO{^&fX%b53$;pH86 R&1%2(yZU;4=QiAR_8*%=MZf?6 literal 0 HcmV?d00001 diff --git a/project/Build.scala b/project/Build.scala index 9f5ef40..9a89db2 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -7,7 +7,7 @@ object ApplicationBuild extends Build { name := "sentinel", version := "0.7.1", organization := "nl.gideondk", - scalaVersion := "2.10.2", + scalaVersion := "2.11.0", parallelExecution in Test := false, resolvers ++= Seq(Resolver.mavenLocal, "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master", @@ -22,12 +22,12 @@ object ApplicationBuild extends Build { ) val appDependencies = Seq( - "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test", + "org.scalatest" %% "scalatest" % "2.1.4" % "test", - "com.typesafe.play" %% "play-iteratees" % "2.2.0", + "com.typesafe.play" % "play-iteratees_2.10" % "2.2.0", - "com.typesafe.akka" % "akka-actor_2.10" % "2.3.0", - "com.typesafe.akka" %% "akka-testkit" % "2.3.0" + "com.typesafe.akka" %% "akka-actor" % "2.3.2", + "com.typesafe.akka" %% "akka-testkit" % "2.3.2" ) lazy val root = Project(id = "sentinel", diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index caf3db4..9fc453a 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -3,20 +3,16 @@ package nl.gideondk.sentinel import scala.concurrent.ExecutionContext.Implicits.global import org.scalatest.WordSpec -import org.scalatest.matchers.{ Matchers, ShouldMatchers } import akka.actor._ -import akka.routing._ import scala.concurrent.duration._ import scala.concurrent._ import scala.util.Try -import play.api.libs.iteratee._ - import protocols._ -class RequestResponseSpec extends WordSpec with Matchers { +class RequestResponseSpec extends WordSpec { import SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala index 128ce10..f8db82c 100644 --- a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala @@ -13,7 +13,7 @@ import scala.concurrent.duration._ import protocols._ import akka.util.Timeout -class ServerRequestSpec extends WordSpec with ShouldMatchers { +class ServerRequestSpec extends WordSpec { import SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index c919370..1d49751 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -15,7 +15,7 @@ import play.api.libs.iteratee._ import protocols._ -class StreamingSpec extends WordSpec with ShouldMatchers { +class StreamingSpec extends WordSpec { import SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index 940c682..bef1dd9 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -1,31 +1,20 @@ package nl.gideondk.sentinel -import scala.concurrent._ -import scala.concurrent.ExecutionContext.Implicits.global - -import scala.util.Try - -import org.scalatest.BeforeAndAfterAll -import org.scalatest.WordSpec +import org.scalatest.{ Suite, BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.ShouldMatchers -import akka.io.{ LengthFieldFrame, PipelineContext, SymmetricPipePair, SymmetricPipelineStage } -import akka.routing.RoundRobinRouter +import akka.io.SymmetricPipelineStage import akka.util.ByteString import akka.actor._ import akka.testkit._ -import scala.concurrent.duration._ -import scala.concurrent._ import java.util.concurrent.atomic.AtomicInteger import protocols._ -import java.net.InetSocketAddress - abstract class TestKitSpec extends TestKit(ActorSystem()) - with WordSpec + with Suite with ShouldMatchers with BeforeAndAfterAll with ImplicitSender { From 7451dfba0083b6004492a917cf57c08e736fcb31 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 11 May 2014 15:09:32 +0200 Subject: [PATCH 21/54] Run stream consumers in separate actor --- README.md | 2 +- project/.Build.scala.swp | Bin 12288 -> 0 bytes project/Build.scala | 2 +- src/main/resources/application.conf | 2 +- .../sentinel/processors/Consumer.scala | 222 ++++++++++++------ .../sentinel/processors/Producer.scala | 7 +- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 7 +- .../gideondk/sentinel/RequestResponse.scala | 14 ++ .../nl/gideondk/sentinel/StreamingSpec.scala | 23 +- 9 files changed, 192 insertions(+), 87 deletions(-) delete mode 100644 project/.Build.scala.swp diff --git a/README.md b/README.md index 996eb54..45097de 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.1"
+  "nl.gideondk" %% "sentinel" % "0.7.2"
 )
 
diff --git a/project/.Build.scala.swp b/project/.Build.scala.swp deleted file mode 100644 index 1ae8d44f909388666b6ebbe5e2be696b877367ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12288 zcmeI2&u<(x6vy52qZBA0I3t;%NhXC>Z&BMO3;k4fVuMg(|5%|V5C{bBfXPn0)apv5C{YUfj}S-2m}K6Issc8V((!PhpT}sRNsg8d{?*OD-Z|-0)apv z5C{YUfj}S-2m}IwKp+qZ1nxlsqQlr%4>0!QgGe6#|F8f4|NU{set^D*Zb8?fYtRPt zD%60ELf<{c*cZ^7&>PUd#~8Z_eF7~*|2)dr-_Q-{I`lr2K`%hhLw`NO*q_kX&_~dR z&<9WpIuAV!{qitl-#}kNpF^KPpF%ew18L|b=m>NlbQt>fC~Tk`5J8jBZx5j#=wqmW zo`rrrg8ZRt&>D0c`WY;Kf*cdiYbjZ0Gd?=TV@Pk&lE`~9&3lVd8Cnq-q1_;p-X^!&+oLFR zoY%d+JDqnM&*i?fDLSO4JFVAI?M0P&x26@^UdYd;+Sv1GuUL-H{v2@Uww#D5KDQ6S z(rN{;MUNJ6)Nu1)CZ+v(cO-F%x(C}XseVgR>9Htn>77+NLn$KcPNABKic6XmN?Y#FN9q&>mBsGEmu_4P z3){r&C`2i)4EUCiyfWDxlAq!4Fj$o}?&q+K_zDfoXe@sE%=ieSU0{W{lntVeB-gB; zPEPVBy*=UjtA@~4S!@Rd8PTQKv~1jL*^wkL0M&9QLV3x<#JuR>}jaafiAB zh5x^iIrA1O0H)nkqSrWTxFEwx&`Q~zI1_0HH&K25HJaL)if9)avAHQCe&Pg=+^=a% z#%`t-Pk1yJpN!`s^uhUoKB8@v9`W#f?e!_77*hot80LPkxM3)d&V&}{PYL6tGF zsmx-`P107m#lGsZ_k%*6C%fX8+3pR$S`BGe%6ArhhelOkKKUVjkA4W&II?0p?&00r zZ^aI4Z8`igoUtSm1_cQyWzu-z`En>%t}CEnfn;Kfa=-7_#s#)nDM`M3nc97~r}H%D zYp7bkuerB&ZRrd65(>!~VfSz~JnL(|>=s-_o?h`U2H&1bALfQC9s1A2r{dXR zOOD;%yNxV*7uYWV&J{d0qV4P$BfF337pVT>gUQKycbqQ@nYMjO{^&fX%b53$;pH86 R&1%2(yZU;4=QiAR_8*%=MZf?6 diff --git a/project/Build.scala b/project/Build.scala index 9a89db2..2d42f6e 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.1", + version := "0.7.2", organization := "nl.gideondk", scalaVersion := "2.11.0", parallelExecution in Test := false, diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf index 8b0979d..839d2c4 100644 --- a/src/main/resources/application.conf +++ b/src/main/resources/application.conf @@ -2,7 +2,7 @@ akka.log-dead-letters-during-shutdown = off akka.log-dead-letters = off akka { - //loglevel = DEBUG + // loglevel = DEBUG io { tcp { // trace-logging = on diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 141f60f..6b5d5e6 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -26,6 +26,8 @@ object Consumer { case object ReleaseStreamConsumer extends StreamConsumerMessage + case object TimeoutStreamConsumer extends StreamConsumerMessage + trait ConsumerData[Evt] case class ConsumerException[Evt](cause: Evt) extends Exception { @@ -34,35 +36,155 @@ object Consumer { case class DataChunk[Evt](c: Evt) extends ConsumerData[Evt] + case class StreamChunk[Evt](c: Evt) extends ConsumerData[Evt] + case class ErrorChunk[Evt](c: Evt) extends ConsumerData[Evt] case class EndOfStream[Evt]() extends ConsumerData[Evt] } -class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTimeout: Timeout = Timeout(5 seconds)) extends Actor with ActorLogging { +class StreamHandler[Cmd, Evt](streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { + import Registration._ + import Consumer._ + import ConsumerAction._ + import context.dispatcher + + context.setReceiveTimeout(streamConsumerTimeout.duration) + + var hook: Option[Promise[ConsumerData[Evt]]] = None + var buffer = Queue[ConsumerData[Evt]]() + + override def postStop() = { + hook.foreach(_.failure(new Exception("Actor quit unexpectedly"))) + } + + def receive: Receive = { + case ReleaseStreamConsumer ⇒ + context.stop(self) + sender ! () + + case AskNextChunk ⇒ + sender ! nextStreamChunk + + case chunk: ConsumerData[Evt] ⇒ + hook match { + case Some(x) ⇒ + x.success(chunk) + hook = None + case None ⇒ + buffer :+= chunk + } + + case ReceiveTimeout ⇒ { + context.stop(self) + } + } + + def nextStreamChunk = { + buffer.headOption match { + case Some(c) ⇒ + buffer = buffer.tail + Promise[ConsumerData[Evt]]().success(c) + case None ⇒ + val p = Promise[ConsumerData[Evt]]() + hook = Some(p) + p + } + } +} + +class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], + streamChunkTimeout: Timeout = Timeout(120 seconds), + streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { import Registration._ import Consumer._ import ConsumerAction._ import context.dispatcher - var hooks = Queue[Promise[ConsumerData[Evt]]]() - var buffer = Queue[Promise[ConsumerData[Evt]]]() + implicit val timeout = streamChunkTimeout - var registrations = Queue[Registration[Evt, _]]() + var replyRegistrations = Queue[ReplyRegistration[Evt]]() + var streamRegistrations = Queue[StreamReplyRegistration[Evt]]() - var runningSource: Option[Enumerator[Evt]] = None + var streamBuffer = Queue[ConsumerData[Evt]]() + + var currentRunningStream: Option[ActorRef] = None + + override def postStop() = { + replyRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) + streamRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) + } def processAction(data: Evt, action: ConsumerAction) = { + def handleConsumerData(cd: ConsumerData[Evt]) = { - hooks.headOption match { + val registration = replyRegistrations.head + replyRegistrations = replyRegistrations.tail + + registration.promise.completeWith(cd match { + case x: DataChunk[Evt] ⇒ + Future.successful(x.c) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + }) + } + + def handleStreamData(cd: ConsumerData[Evt]) = { + currentRunningStream match { case Some(x) ⇒ - x.success(cd) - hooks = hooks.tail + cd match { + case x: EndOfStream[Evt] ⇒ currentRunningStream = None + case _ ⇒ () + } + + x ! cd + case None ⇒ - buffer :+= Promise.successful(cd) + streamRegistrations.headOption match { + case Some(registration) ⇒ + val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) + currentRunningStream = Some(streamHandler) + + val worker = streamHandler + + // TODO: handle stream chunk timeout better + val resource = Enumerator.generateM[Evt] { + (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { + _ match { + case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) + case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) + case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) + } + } + } + + def dequeueStreamBuffer(): Unit = { + streamBuffer.headOption match { + case Some(x) ⇒ + streamBuffer = streamBuffer.tail + x match { + case x: EndOfStream[Evt] ⇒ + worker ! x + case x ⇒ + worker ! x + dequeueStreamBuffer() + } + case None ⇒ () + } + } + + dequeueStreamBuffer() + worker ! cd + + streamRegistrations = streamRegistrations.tail + registration.promise success resource + + case None ⇒ + streamBuffer :+= cd + } } } @@ -70,89 +192,37 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi case AcceptSignal ⇒ handleConsumerData(DataChunk(data)) case AcceptError ⇒ - handleConsumerData(ErrorChunk(data)) + currentRunningStream match { + case Some(x) ⇒ handleStreamData(ErrorChunk(data)) + case None ⇒ handleConsumerData(ErrorChunk(data)) + } case ConsumeStreamChunk ⇒ - handleConsumerData(DataChunk(data)) // Should eventually seperate data chunks and stream chunks for better socket consistency handling + handleStreamData(StreamChunk(data)) // Should eventually seperate data chunks and stream chunks for better socket consistency handling case EndStream ⇒ - handleConsumerData(EndOfStream[Evt]()) + handleStreamData(EndOfStream[Evt]()) case ConsumeChunkAndEndStream ⇒ - handleConsumerData(DataChunk(data)) - handleConsumerData(EndOfStream[Evt]()) + handleStreamData(StreamChunk(data)) + handleStreamData(EndOfStream[Evt]()) case Ignore ⇒ () } } - def popAndSetHook: Unit = { - val worker = self - val registration = registrations.head - registrations = registrations.tail - - implicit val timeout = streamChunkTimeout - - registration match { - case x: ReplyRegistration[Evt] ⇒ { - x.promise.completeWith(nextChunk.future.flatMap { - _ match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - } - }) - if (registrations.headOption.isDefined) popAndSetHook - } - - case x: StreamReplyRegistration[Evt] ⇒ { - val resource = Enumerator.generateM[Evt] { - (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { - _ match { - case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) - case x: DataChunk[Evt] ⇒ Future(Some(x.c)) - case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) - } - } - } - - runningSource = Some(resource) - x.promise success resource - } - } - } + def handleRegistrations: Receive = { + case rc: ReplyRegistration[Evt] ⇒ + replyRegistrations :+= rc - def nextChunk() = buffer.headOption match { - case Some(p) ⇒ - buffer = buffer.tail - p - case None ⇒ - val p = Promise[ConsumerData[Evt]]() - hooks :+= p - p - } + case rc: StreamReplyRegistration[Evt] ⇒ + streamRegistrations :+= rc - def handleRegistrations: Receive = { - case rc: Registration[Evt, _] ⇒ - registrations :+= rc - if (runningSource.isEmpty) popAndSetHook } var behavior: Receive = handleRegistrations orElse { - case ReleaseStreamConsumer ⇒ - runningSource = None - if (registrations.headOption.isDefined) popAndSetHook - sender ! () - - case AskNextChunk ⇒ - sender ! nextChunk() - - case x: ConsumerActionAndData[Evt] ⇒ processAction(x.data, x.action) + case x: ConsumerActionAndData[Evt] ⇒ + processAction(x.data, x.action) } - override def postStop() = { - hooks.foreach(_.failure(new Exception("Actor quit unexpectedly"))) - } - def receive = behavior } diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala index bac5d39..33a7ce7 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala @@ -75,9 +75,9 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi case x: ProduceStream[Evt, Cmd] ⇒ initStreamProducer(data, x.f) case x: ConsumeStream[Evt, Cmd] ⇒ - val imcomingStreamPromise = Promise[Enumerator[Evt]]() - context.parent ! Registration.StreamReplyRegistration(imcomingStreamPromise) - imcomingStreamPromise.future flatMap ((s) ⇒ initStreamConsumer(data, x.f(_)(s))) + val incomingStreamPromise = Promise[Enumerator[Evt]]() + context.parent ! Registration.StreamReplyRegistration(incomingStreamPromise) + incomingStreamPromise.future flatMap ((s) ⇒ initStreamConsumer(data, x.f(_)(s))) } future.onFailure { @@ -118,6 +118,7 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi case x: HandleAsyncResult[Cmd] ⇒ context.parent ! Reply.Response(x.response) case x: HandleStreamResult[Cmd] ⇒ val worker = self + // TODO: What to do when producing Enumerator times out, send error, close stream and continue producing? implicit val timeout = streamChunkTimeout (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index dc0cf7c..a6055b0 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -47,13 +47,12 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) - Thread.sleep(500) val c = client(portNumber) val secC = client(portNumber) - Thread.sleep(500) + Thread.sleep(1000) - val numberOfRequests = 100 + val numberOfRequests = 10 val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) @@ -61,6 +60,8 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { val combined = Future.sequence(List(actions, serverActions.map(_.flatten), secActions)) + val aa = Await.result(actions, 5 seconds) + val results = Await.result(combined, 5 seconds) results(0).length should equal(numberOfRequests) diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 9fc453a..8ec3897 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -66,6 +66,20 @@ class RequestResponseSpec extends WordSpec { result.map(_.payload) should equal(items) } + // "test a" in new TestKitSpec { + // val portNumber = TestHelpers.portNumber.getAndIncrement() + // val s = server(portNumber) + // val c = client(portNumber) + // + // val numberOfRequests = 90 * 1000 + // + // val items = List.range(0, numberOfRequests).map(_.toString) + // val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) + // val result = Await.result(action, 5 seconds) + // + // result.map(_.payload) should equal(items) + // } + "should automatically reconnect" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index 1d49751..b063e17 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -21,7 +21,7 @@ class StreamingSpec extends WordSpec { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 2, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -93,6 +93,25 @@ class StreamingSpec extends WordSpec { result.isSuccess should equal(true) } + "be able to handle slow or idle consumers while retrieving streams from a server" in new TestKitSpec { + val portNumber = TestHelpers.portNumber.getAndIncrement() + val s = server(portNumber) + val c = client(portNumber) + + val count = 500 + val numberOfActions = 8 + + val newAct = for { + takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) |>>> Iteratee.getChunks) + takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) &> Enumeratee.map(x ⇒ throw new Exception("")) |>>> Iteratee.getChunks).recover { case e ⇒ () } + act ← c ? SimpleCommand(PING_PONG, "") + } yield act + + val result = Try(Await.result(newAct, 5 seconds)) + + result.isSuccess should equal(true) + } + "be able to receive send streams simultaneously to a server" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) @@ -102,7 +121,7 @@ class StreamingSpec extends WordSpec { val chunks = List.fill(count)(SimpleStreamChunk("ABCDEF")) ++ List(SimpleStreamChunk("")) val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) - val numberOfActions = 8 + val numberOfActions = 2 val actions = Future.sequence(List.fill(numberOfActions)(c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)))) val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) From cec5f83d6d92386dd15ca2a9ba678799767131ed Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 11 May 2014 15:31:28 +0200 Subject: [PATCH 22/54] Stop producing actor when enumeration fails --- .../scala/nl/gideondk/sentinel/processors/Consumer.scala | 2 +- .../scala/nl/gideondk/sentinel/processors/Producer.scala | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 6b5d5e6..7cd00d7 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -95,7 +95,7 @@ class StreamHandler[Cmd, Evt](streamConsumerTimeout: Timeout = Timeout(10 second } } -class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], +class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTimeout: Timeout = Timeout(120 seconds), streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { import Registration._ diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala index 33a7ce7..aba73ec 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala @@ -121,7 +121,12 @@ class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTi // TODO: What to do when producing Enumerator times out, send error, close stream and continue producing? implicit val timeout = streamChunkTimeout - (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) + val consumer = (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) + consumer.onFailure { + case e ⇒ + log.error(e, e.getMessage) + context.stop(self) + } context.become(handleRequestAndStreamResponse) From 0857ee981d871121c0cda0c9ae92c032221e191f Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 11 May 2014 21:07:43 +0200 Subject: [PATCH 23/54] Minor bugfixes --- README.md | 2 +- project/Build.scala | 2 +- src/main/resources/application.conf | 4 +-- src/main/scala/akka/io/Pipelines.scala | 9 ++++-- .../scala/nl/gideondk/sentinel/Client.scala | 13 +++++--- .../sentinel/processors/Consumer.scala | 2 +- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 3 +- .../gideondk/sentinel/RequestResponse.scala | 31 ++++++------------- .../nl/gideondk/sentinel/StreamingSpec.scala | 3 ++ .../nl/gideondk/sentinel/TestHelpers.scala | 4 +-- .../sentinel/protocols/SimpleMessage.scala | 2 +- 11 files changed, 37 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 45097de..e4bd8bb 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.2"
+  "nl.gideondk" %% "sentinel" % "0.7.3"
 )
 
diff --git a/project/Build.scala b/project/Build.scala index 2d42f6e..2120a43 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.2", + version := "0.7.3", organization := "nl.gideondk", scalaVersion := "2.11.0", parallelExecution in Test := false, diff --git a/src/main/resources/application.conf b/src/main/resources/application.conf index 839d2c4..21480cd 100644 --- a/src/main/resources/application.conf +++ b/src/main/resources/application.conf @@ -2,10 +2,10 @@ akka.log-dead-letters-during-shutdown = off akka.log-dead-letters = off akka { - // loglevel = DEBUG + //loglevel = DEBUG io { tcp { -// trace-logging = on + // trace-logging = on } } } \ No newline at end of file diff --git a/src/main/scala/akka/io/Pipelines.scala b/src/main/scala/akka/io/Pipelines.scala index eace52a..54144aa 100644 --- a/src/main/scala/akka/io/Pipelines.scala +++ b/src/main/scala/akka/io/Pipelines.scala @@ -946,9 +946,12 @@ class LengthFieldFrame(maxSize: Int, */ override def commandPipeline = { bs: ByteString ⇒ - val length = - if (lengthIncludesHeader) bs.length + headerSize else bs.length - if (length > maxSize) Seq() + val length = if (lengthIncludesHeader) bs.length + headerSize else bs.length + + if (length < 0 || length > maxSize) + throw new IllegalArgumentException( + s"received too large frame of size $length (max = $maxSize)") + else { val bb = ByteString.newBuilder bb.putLongPart(length, headerSize) diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala index a2453ad..326a210 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -60,7 +60,7 @@ object Client { def apply[Cmd, Evt](serverHost: String, serverPort: Int, routerConfig: RouterConfig, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver)(lowBytes, highBytes, maxBufferSize)), name = "sentinel-client-" + java.util.UUID.randomUUID.toString) + val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString) core ! Client.ConnectToServer(new InetSocketAddress(serverHost, serverPort)) new Client[Cmd, Evt] { val actor = core @@ -120,7 +120,7 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip } class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reconnectDuration: FiniteDuration, - stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging { + stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash { import context.dispatcher @@ -130,6 +130,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco private case class ReconnectRouter(address: InetSocketAddress) var coreRouter: Option[ActorRef] = None + var reconnecting = false def antennaManagerProto(address: InetSocketAddress) = new ClientAntennaManager(address, stages, Resolver)(lowBytes, highBytes, maxBufferSize) @@ -149,6 +150,8 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco context.watch(router) addresses = addresses ++ List(x.addr -> Some(router)) coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + reconnecting = false + unstashAll() } else { log.debug("Client is already connected to: " + x.addr) } @@ -161,15 +164,15 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco addresses = addresses diff addresses.find(_._2 == Some(actor)).toList coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) + reconnecting = true context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) case None ⇒ } case x: Command[Cmd, Evt] ⇒ coreRouter match { - case Some(r) ⇒ - r forward x - case None ⇒ x.registration.promise.failure(new Exception("No connection(s) available")) + case Some(r) ⇒ if (reconnecting) stash() else r forward x + case None ⇒ x.registration.promise.failure(new Exception("No connection(s) available")) } case _ ⇒ diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 7cd00d7..05439f0 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -198,7 +198,7 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], } case ConsumeStreamChunk ⇒ - handleStreamData(StreamChunk(data)) // Should eventually seperate data chunks and stream chunks for better socket consistency handling + handleStreamData(StreamChunk(data)) case EndStream ⇒ handleStreamData(EndOfStream[Evt]()) case ConsumeChunkAndEndStream ⇒ diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index a6055b0..794dd81 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -47,10 +47,11 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) + Thread.sleep(500) val c = client(portNumber) val secC = client(portNumber) - Thread.sleep(1000) + Thread.sleep(500) val numberOfRequests = 10 diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala index 8ec3897..c807c43 100644 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala @@ -18,7 +18,7 @@ class RequestResponseSpec extends WordSpec { implicit val duration = Duration(5, SECONDS) - def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 64, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) + def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) def server(portNumber: Int)(implicit system: ActorSystem) = { val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) @@ -42,10 +42,11 @@ class RequestResponseSpec extends WordSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) val c = client(portNumber) + Thread.sleep(100) - val numberOfRequests = 20 * 1000 + val numberOfRequests = 1000 - val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) + val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(ECHO, LargerPayloadTestHelper.randomBSForSize(1024 * 10)))) val result = Try(Await.result(action, 5 seconds)) result.get.length should equal(numberOfRequests) @@ -57,7 +58,7 @@ class RequestResponseSpec extends WordSpec { val s = server(portNumber) val c = client(portNumber) - val numberOfRequests = 90 * 1000 + val numberOfRequests = 20 * 1000 val items = List.range(0, numberOfRequests).map(_.toString) val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) @@ -66,20 +67,6 @@ class RequestResponseSpec extends WordSpec { result.map(_.payload) should equal(items) } - // "test a" in new TestKitSpec { - // val portNumber = TestHelpers.portNumber.getAndIncrement() - // val s = server(portNumber) - // val c = client(portNumber) - // - // val numberOfRequests = 90 * 1000 - // - // val items = List.range(0, numberOfRequests).map(_.toString) - // val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) - // val result = Await.result(action, 5 seconds) - // - // result.map(_.payload) should equal(items) - // } - "should automatically reconnect" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) @@ -92,11 +79,13 @@ class RequestResponseSpec extends WordSpec { result.isSuccess should equal(true) system.stop(s.actor) - Thread.sleep(1000) - val ss = server(portNumber) + Thread.sleep(250) val secAction = c ? SimpleCommand(PING_PONG, "") - val endResult = Try(Await.result(secAction, 5 seconds)) + val ss = server(portNumber) + + Thread.sleep(250) + val endResult = Try(Await.result(secAction, 10 seconds)) endResult.isSuccess should equal(true) } diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala index b063e17..9c49763 100644 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala @@ -105,6 +105,9 @@ class StreamingSpec extends WordSpec { takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) |>>> Iteratee.getChunks) takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) &> Enumeratee.map(x ⇒ throw new Exception("")) |>>> Iteratee.getChunks).recover { case e ⇒ () } act ← c ? SimpleCommand(PING_PONG, "") + act ← c ? SimpleCommand(PING_PONG, "") + takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks) + act ← c ? SimpleCommand(PING_PONG, "") } yield act val result = Try(Await.result(newAct, 5 seconds)) diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index bef1dd9..caf2598 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -13,7 +13,7 @@ import java.util.concurrent.atomic.AtomicInteger import protocols._ -abstract class TestKitSpec extends TestKit(ActorSystem()) +abstract class TestKitSpec extends TestKit(ActorSystem(java.util.UUID.randomUUID.toString)) with Suite with ShouldMatchers with BeforeAndAfterAll @@ -56,6 +56,6 @@ object LargerPayloadTestHelper { while (stringB.length() + paddingString.length() < size) stringB.append(paddingString) - ByteString(stringB.toString().getBytes()) + stringB.toString() } } diff --git a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala index aa62590..f041d8c 100644 --- a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala +++ b/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala @@ -66,7 +66,7 @@ class PingPongMessageStage extends SymmetricPipelineStage[PipelineContext, Simpl } object SimpleMessage { - val stages = new PingPongMessageStage >> new LengthFieldFrame(1000) + val stages = new PingPongMessageStage >> new LengthFieldFrame(1024 * 1024) val PING_PONG = 1 val TOTAL_CHUNK_SIZE = 2 From 8dc9ff227ab5932985e0cd74669238a045993f08 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 16 May 2014 16:01:41 +0300 Subject: [PATCH 24/54] Fix incorrect error handling during stream processing --- project/Build.scala | 2 +- .../sentinel/processors/Consumer.scala | 114 ++++++++++-------- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 4 +- 3 files changed, 68 insertions(+), 52 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 2120a43..ae094a1 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.3", + version := "0.7.4", organization := "nl.gideondk", scalaVersion := "2.11.0", parallelExecution in Test := false, diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala index 05439f0..c336996 100644 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala @@ -10,6 +10,7 @@ import akka.pattern.ask import akka.util.Timeout import play.api.libs.iteratee._ + import nl.gideondk.sentinel._ object Consumer { @@ -106,30 +107,38 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], implicit val timeout = streamChunkTimeout - var replyRegistrations = Queue[ReplyRegistration[Evt]]() - var streamRegistrations = Queue[StreamReplyRegistration[Evt]]() + var registrations = Queue[Registration[Evt, _]]() var streamBuffer = Queue[ConsumerData[Evt]]() var currentRunningStream: Option[ActorRef] = None override def postStop() = { - replyRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) - streamRegistrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) + registrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) } def processAction(data: Evt, action: ConsumerAction) = { - def handleConsumerData(cd: ConsumerData[Evt]) = { - val registration = replyRegistrations.head - replyRegistrations = replyRegistrations.tail - - registration.promise.completeWith(cd match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - }) + val registration = registrations.head + registrations = registrations.tail + + registration match { + case r: ReplyRegistration[_] ⇒ + r.promise.completeWith(cd match { + case x: DataChunk[Evt] ⇒ + Future.successful(x.c) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + }) + + case r: StreamReplyRegistration[_] ⇒ + r.promise.completeWith(cd match { + case x: DataChunk[Evt] ⇒ + Future.failed(new Exception("Unexpectedly received a normal chunk instead of stream chunk")) + case x: ErrorChunk[Evt] ⇒ + Future.failed(ConsumerException(x.c)) + }) + } } def handleStreamData(cd: ConsumerData[Evt]) = { @@ -143,44 +152,51 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], x ! cd case None ⇒ - streamRegistrations.headOption match { + registrations.headOption match { case Some(registration) ⇒ - val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) - currentRunningStream = Some(streamHandler) - - val worker = streamHandler - - // TODO: handle stream chunk timeout better - val resource = Enumerator.generateM[Evt] { - (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { - _ match { - case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) - case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) - case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) + registration match { + case r: ReplyRegistration[_] ⇒ + throw new Exception("Unexpectedly received a stream chunk instead of normal reply") // TODO: use specific exception classes + case r: StreamReplyRegistration[_] ⇒ { + val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) + currentRunningStream = Some(streamHandler) + + val worker = streamHandler + + // TODO: handle stream chunk timeout better + val resource = Enumerator.generateM[Evt] { + (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { + _ match { + case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) + case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) + case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) + } + } } - } - } - def dequeueStreamBuffer(): Unit = { - streamBuffer.headOption match { - case Some(x) ⇒ - streamBuffer = streamBuffer.tail - x match { - case x: EndOfStream[Evt] ⇒ - worker ! x - case x ⇒ - worker ! x - dequeueStreamBuffer() + def dequeueStreamBuffer(): Unit = { + streamBuffer.headOption match { + case Some(x) ⇒ + streamBuffer = streamBuffer.tail + x match { + case x: EndOfStream[Evt] ⇒ + worker ! x + case x ⇒ + worker ! x + dequeueStreamBuffer() + } + case None ⇒ () } - case None ⇒ () - } - } + } + + dequeueStreamBuffer() + worker ! cd - dequeueStreamBuffer() - worker ! cd + registrations = registrations.tail + r.promise success resource + } - streamRegistrations = streamRegistrations.tail - registration.promise success resource + } case None ⇒ streamBuffer :+= cd @@ -211,10 +227,10 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], def handleRegistrations: Receive = { case rc: ReplyRegistration[Evt] ⇒ - replyRegistrations :+= rc + registrations :+= rc case rc: StreamReplyRegistration[Evt] ⇒ - streamRegistrations :+= rc + registrations :+= rc } @@ -225,4 +241,4 @@ class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], } def receive = behavior -} +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala index 794dd81..e044d20 100644 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala @@ -47,11 +47,11 @@ class FullDuplexSpec extends WordSpec with ShouldMatchers { "be able to exchange multiple requests simultaneously" in new TestKitSpec { val portNumber = TestHelpers.portNumber.getAndIncrement() val s = server(portNumber) - Thread.sleep(500) + Thread.sleep(1000) val c = client(portNumber) val secC = client(portNumber) - Thread.sleep(500) + Thread.sleep(1000) val numberOfRequests = 10 From cafc4efbcb8ce596abffdbf0015a78d63c156731 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 26 May 2014 13:04:00 +0200 Subject: [PATCH 25/54] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e4bd8bb..dd86e97 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding the `SNAPSHOT` to your library dependencies:
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.3"
+  "nl.gideondk" %% "sentinel" % "0.7.4"
 )
 
From 48f156f310960ab08b2c2edeaec6db147d01beb0 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 26 May 2014 13:06:54 +0200 Subject: [PATCH 26/54] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd86e97..472f8e0 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ You can install Sentinel through source (by publishing it into your local Ivy re Or by adding the repo:
"gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master"
-to your SBT configuration and adding the `SNAPSHOT` to your library dependencies: +to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
libraryDependencies ++= Seq(
   "nl.gideondk" %% "sentinel" % "0.7.4"

From 0b405599192e0d5b8a962b861b62c952e5d5555c Mon Sep 17 00:00:00 2001
From: Gideon de Kok 
Date: Mon, 7 Jul 2014 12:13:15 +0200
Subject: [PATCH 27/54] Add non-pipelined functionality

---
 project/Build.scala                           | 10 ++--
 .../scala/nl/gideondk/sentinel/Antenna.scala  | 53 +++++++++++++++----
 .../scala/nl/gideondk/sentinel/Client.scala   | 20 +++----
 .../nl/gideondk/sentinel/StreamingSpec.scala  | 19 +++++++
 4 files changed, 77 insertions(+), 25 deletions(-)

diff --git a/project/Build.scala b/project/Build.scala
index ae094a1..0f650d9 100755
--- a/project/Build.scala
+++ b/project/Build.scala
@@ -5,9 +5,9 @@ object ApplicationBuild extends Build {
   override lazy val settings = super.settings ++
     Seq(
       name := "sentinel",
-      version := "0.7.4",
+      version := "0.7.5",
       organization := "nl.gideondk",
-      scalaVersion := "2.11.0",
+      scalaVersion := "2.11.1",
       parallelExecution in Test := false,
       resolvers ++= Seq(Resolver.mavenLocal,
         "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master",
@@ -24,10 +24,10 @@ object ApplicationBuild extends Build {
   val appDependencies = Seq(
     "org.scalatest" %% "scalatest" % "2.1.4" % "test",
 
-    "com.typesafe.play" % "play-iteratees_2.10" % "2.2.0",
+    "com.typesafe.play" % "play-iteratees_2.10" % "2.3.0",
 
-    "com.typesafe.akka" %% "akka-actor" % "2.3.2",
-    "com.typesafe.akka" %% "akka-testkit" % "2.3.2"
+    "com.typesafe.akka" %% "akka-actor" % "2.3.4",
+    "com.typesafe.akka" %% "akka-testkit" % "2.3.4"
   )
 
   lazy val root = Project(id = "sentinel",
diff --git a/src/main/scala/nl/gideondk/sentinel/Antenna.scala b/src/main/scala/nl/gideondk/sentinel/Antenna.scala
index 772f40b..cc6bec6 100644
--- a/src/main/scala/nl/gideondk/sentinel/Antenna.scala
+++ b/src/main/scala/nl/gideondk/sentinel/Antenna.scala
@@ -1,15 +1,14 @@
 package nl.gideondk.sentinel
 
-import scala.concurrent.Future
-
 import akka.actor._
-
-import akka.io._
 import akka.io.TcpPipelineHandler.{ Init, WithinActorContext }
+import akka.io._
+import nl.gideondk.sentinel.processors._
+import scala.collection.immutable.Queue
 
-import processors._
+import scala.concurrent.Future
 
-class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Resolver[Evt, Cmd]) extends Actor with ActorLogging with Stash {
+class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true) extends Actor with ActorLogging with Stash {
 
   import context.dispatcher
 
@@ -17,6 +16,9 @@ class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Reso
     val consumer = context.actorOf(Props(new Consumer(init)), name = "resolver")
     val producer = context.actorOf(Props(new Producer(init)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "producer")
 
+    var commandQueue = Queue.empty[init.Command]
+    var commandInProcess = false
+
     context watch tcpHandler
     context watch producer
     context watch consumer
@@ -33,14 +35,38 @@ class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Reso
           stash()
       }
 
+      def popCommand() = if (!commandQueue.isEmpty) {
+        val cmd = commandQueue.head
+        commandQueue = commandQueue.tail
+        tcpHandler ! cmd
+      } else {
+        commandInProcess = false
+      }
+
       def handleCommands: Receive = {
         case x: Command.Ask[Cmd, Evt] ⇒
           consumer ! x.registration
-          tcpHandler ! init.Command(x.payload)
+
+          val cmd = init.Command(x.payload)
+          if (allowPipelining) tcpHandler ! cmd
+          else if (commandInProcess) {
+            commandQueue :+= cmd
+          } else {
+            commandInProcess = true
+            tcpHandler ! cmd
+          }
 
         case x: Command.AskStream[Cmd, Evt] ⇒
           consumer ! x.registration
-          tcpHandler ! init.Command(x.payload)
+
+          val cmd = init.Command(x.payload)
+          if (allowPipelining) tcpHandler ! cmd
+          else if (commandInProcess) {
+            commandQueue :+= cmd
+          } else {
+            commandInProcess = true
+            tcpHandler ! cmd
+          }
 
         case x: Command.SendStream[Cmd, Evt] ⇒
           consumer ! x.registration
@@ -60,10 +86,17 @@ class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], Resolver: Reso
         consumer ! x
 
       case init.Event(data) ⇒ {
-        Resolver.process(data) match {
+        resolver.process(data) match {
           case x: ProducerAction[Evt, Cmd] ⇒ producer ! ProducerActionAndData[Evt, Cmd](x, data)
-          case x: ConsumerAction           ⇒ consumer ! ConsumerActionAndData[Evt](x, data)
+
+          case ConsumerAction.ConsumeStreamChunk ⇒
+            consumer ! ConsumerActionAndData[Evt](ConsumerAction.ConsumeStreamChunk, data)
+
+          case x: ConsumerAction ⇒
+            consumer ! ConsumerActionAndData[Evt](x, data)
+            if (!allowPipelining) popCommand()
         }
+
       }
 
       case BackpressureBuffer.HighWatermarkReached ⇒ {
diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala
index 326a210..7b6f5b2 100644
--- a/src/main/scala/nl/gideondk/sentinel/Client.scala
+++ b/src/main/scala/nl/gideondk/sentinel/Client.scala
@@ -59,24 +59,24 @@ object Client {
   }
 
   def apply[Cmd, Evt](serverHost: String, serverPort: Int, routerConfig: RouterConfig,
-                      description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
-    val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString)
+                      description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
+    val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString)
     core ! Client.ConnectToServer(new InetSocketAddress(serverHost, serverPort))
     new Client[Cmd, Evt] {
       val actor = core
     }
   }
 
-  def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
-    apply(serverHost, serverPort, RandomRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, lowBytes, highBytes, maxBufferSize)
+  def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
+    apply(serverHost, serverPort, RandomRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize)
   }
 
-  def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
-    apply(serverHost, serverPort, RoundRobinRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, lowBytes, highBytes, maxBufferSize)
+  def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = {
+    apply(serverHost, serverPort, RoundRobinRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize)
   }
 }
 
-class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd])(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
+class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true)(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
   val tcp = akka.io.IO(Tcp)(context.system)
 
   override def preStart = tcp ! Tcp.Connect(address)
@@ -97,7 +97,7 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip
           new TcpReadWriteAdapter >>
           new BackpressureBuffer(lowBytes, highBytes, maxBufferSize))
 
-      val antenna = context.actorOf(Props(new Antenna(init, Resolver)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"))
+      val antenna = context.actorOf(Props(new Antenna(init, resolver, allowPipelining)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"))
       val handler = context.actorOf(TcpPipelineHandler.props(init, sender, antenna).withDeploy(Deploy.local))
       context watch handler
 
@@ -120,7 +120,7 @@ class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ Pip
 }
 
 class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reconnectDuration: FiniteDuration,
-                           stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], Resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
+                           stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true, workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash {
 
   import context.dispatcher
 
@@ -133,7 +133,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco
   var reconnecting = false
 
   def antennaManagerProto(address: InetSocketAddress) =
-    new ClientAntennaManager(address, stages, Resolver)(lowBytes, highBytes, maxBufferSize)
+    new ClientAntennaManager(address, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize)
 
   def routerProto(address: InetSocketAddress) =
     context.actorOf(Props(antennaManagerProto(address)).withRouter(routerConfig).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"))
diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala
index 9c49763..0f08092 100644
--- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala
+++ b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala
@@ -22,6 +22,7 @@ class StreamingSpec extends WordSpec {
   implicit val duration = Duration(5, SECONDS)
 
   def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system)
+  def nonPipelinedClient(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler, false)(system)
 
   def server(portNumber: Int)(implicit system: ActorSystem) = {
     val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system)
@@ -93,6 +94,24 @@ class StreamingSpec extends WordSpec {
       result.isSuccess should equal(true)
     }
 
+    "be able to receive multiple streams and normal commands simultaneously from a server in a non-pipelined environment" in new TestKitSpec {
+      val portNumber = TestHelpers.portNumber.getAndIncrement()
+      val s = server(portNumber)
+      val c = nonPipelinedClient(portNumber)
+
+      val count = 500
+      val numberOfActions = 8
+
+      val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks)))
+      val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, "")))
+
+      val actions = Future.sequence(List(streamAction, action))
+
+      val result = Try(Await.result(actions.map(_.flatten), 5 seconds))
+
+      result.isSuccess should equal(true)
+    }
+
     "be able to handle slow or idle consumers while retrieving streams from a server" in new TestKitSpec {
       val portNumber = TestHelpers.portNumber.getAndIncrement()
       val s = server(portNumber)

From 9728be35a7499774282210f56dbfb7d9ffc06ea4 Mon Sep 17 00:00:00 2001
From: Gideon de Kok 
Date: Mon, 7 Jul 2014 12:14:29 +0200
Subject: [PATCH 28/54] Update README

---
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/README.md b/README.md
index 472f8e0..429ed77 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ Or by adding the repo:
 to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
 
 
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.4"
+  "nl.gideondk" %% "sentinel" % "0.7.5"
 )
 
From d4ae034bf4ec1797ec4ca9564296a775aa478086 Mon Sep 17 00:00:00 2001 From: crispy Date: Sun, 13 Jul 2014 21:53:41 -0700 Subject: [PATCH 29/54] Use play-iteratees for 2.11 and update to current release. move akka-testkit to test config. update scalatest to current. finally bump version ever so slightly. --- project/Build.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 0f650d9..3afd4c0 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,7 +5,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.5", + version := "0.7.5.1", organization := "nl.gideondk", scalaVersion := "2.11.1", parallelExecution in Test := false, @@ -22,12 +22,12 @@ object ApplicationBuild extends Build { ) val appDependencies = Seq( - "org.scalatest" %% "scalatest" % "2.1.4" % "test", + "org.scalatest" %% "scalatest" % "2.2.0" % "test", - "com.typesafe.play" % "play-iteratees_2.10" % "2.3.0", + "com.typesafe.play" %% "play-iteratees" % "2.3.1", "com.typesafe.akka" %% "akka-actor" % "2.3.4", - "com.typesafe.akka" %% "akka-testkit" % "2.3.4" + "com.typesafe.akka" %% "akka-testkit" % "2.3.4" % "test" ) lazy val root = Project(id = "sentinel", From b57c44719b60c25fef981c536cb5e75df57cb1d6 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Mon, 14 Jul 2014 08:08:16 +0200 Subject: [PATCH 30/54] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 429ed77..26a63d6 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ Or by adding the repo: to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.5"
+  "nl.gideondk" %% "sentinel" % "0.7.5.1"
 )
 
From 9717611e76e7e18eda7f147bc9711cad13968d27 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Thu, 30 Jun 2016 14:27:15 +0200 Subject: [PATCH 31/54] Update to newest Scala, Akka versions --- project/Build.scala | 8 ++++---- project/build.properties | 2 +- src/main/scala/nl/gideondk/sentinel/Client.scala | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 3afd4c0..8ea4602 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -5,9 +5,9 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.7.5.1", + version := "0.8-SNAPSHOT", organization := "nl.gideondk", - scalaVersion := "2.11.1", + scalaVersion := "2.11.8", parallelExecution in Test := false, resolvers ++= Seq(Resolver.mavenLocal, "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master", @@ -26,8 +26,8 @@ object ApplicationBuild extends Build { "com.typesafe.play" %% "play-iteratees" % "2.3.1", - "com.typesafe.akka" %% "akka-actor" % "2.3.4", - "com.typesafe.akka" %% "akka-testkit" % "2.3.4" % "test" + "com.typesafe.akka" %% "akka-actor" % "2.4.6", + "com.typesafe.akka" %% "akka-testkit" % "2.4.6" % "test" ) lazy val root = Project(id = "sentinel", diff --git a/project/build.properties b/project/build.properties index 37b489c..43b8278 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=0.13.1 +sbt.version=0.13.11 diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala index 7b6f5b2..5328ee3 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -68,11 +68,11 @@ object Client { } def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RandomRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) + apply(serverHost, serverPort, RandomPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) } def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RoundRobinRouter(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) + apply(serverHost, serverPort, RoundRobinPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) } } @@ -149,7 +149,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco val router = routerProto(x.addr) context.watch(router) addresses = addresses ++ List(x.addr -> Some(router)) - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) reconnecting = false unstashAll() } else { @@ -162,7 +162,7 @@ class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reco terminatedRouter match { case Some(r) ⇒ addresses = addresses diff addresses.find(_._2 == Some(actor)).toList - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinRouter(routees = addresses.map(_._2).flatten)))) + coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) reconnecting = true context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) From f77fe9bde06eef3c3e301c3773ef557c5c9abd6c Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 13 Sep 2016 21:57:54 +0200 Subject: [PATCH 32/54] Start migration to Akka Streams version --- project/Build.scala | 24 +- project/plugins.sbt | 9 +- src/main/scala/akka/io/Pipelines.scala | 1168 ----------------- .../scala/akka/io/TcpPipelineHandler.scala | 174 --- .../scala/nl/gideondk/sentinel/Action.scala | 29 +- .../scala/nl/gideondk/sentinel/Antenna.scala | 112 -- .../scala/nl/gideondk/sentinel/Client.scala | 180 --- .../scala/nl/gideondk/sentinel/Command.scala | 47 +- .../scala/nl/gideondk/sentinel/Config.scala | 11 + .../scala/nl/gideondk/sentinel/Pipeline.scala | 12 + .../nl/gideondk/sentinel/Processor.scala | 37 + .../scala/nl/gideondk/sentinel/Resolver.scala | 99 +- .../scala/nl/gideondk/sentinel/Server.scala | 143 -- .../sentinel/processors/Consumer.scala | 244 ---- .../sentinel/processors/Producer.scala | 150 --- 15 files changed, 240 insertions(+), 2199 deletions(-) delete mode 100644 src/main/scala/akka/io/Pipelines.scala delete mode 100644 src/main/scala/akka/io/TcpPipelineHandler.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Antenna.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Client.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/Config.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/Pipeline.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/Processor.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Server.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/processors/Producer.scala diff --git a/project/Build.scala b/project/Build.scala index 8ea4602..3615ead 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -1,5 +1,6 @@ +import sbt.Keys._ import sbt._ -import Keys._ +import org.ensime.EnsimePlugin object ApplicationBuild extends Build { override lazy val settings = super.settings ++ @@ -26,20 +27,27 @@ object ApplicationBuild extends Build { "com.typesafe.play" %% "play-iteratees" % "2.3.1", - "com.typesafe.akka" %% "akka-actor" % "2.4.6", - "com.typesafe.akka" %% "akka-testkit" % "2.4.6" % "test" + "com.typesafe.akka" %% "akka-stream" % "2.4.8", + "com.typesafe.akka" %% "akka-stream-testkit" % "2.4.8", + + "com.typesafe.akka" %% "akka-actor" % "2.4.8", + "com.typesafe.akka" %% "akka-testkit" % "2.4.8" % "test", + + "com.typesafe" % "config" % "1.3.0" ) - lazy val root = Project(id = "sentinel", - base = file("."), - settings = Project.defaultSettings ++ Seq( + lazy val root = Project( + id = "sentinel", + base = file(".") + ).settings(Project.defaultSettings ++ Seq( libraryDependencies ++= appDependencies, mainClass := Some("Main") - ) ++ Format.settings - ) + ) ++ EnsimePlugin.projectSettings ++ Format.settings) + } object Format { + import com.typesafe.sbt.SbtScalariform._ lazy val settings = scalariformSettings ++ Seq( diff --git a/project/plugins.sbt b/project/plugins.sbt index 337ed97..6bc7c29 100755 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -3,4 +3,11 @@ resolvers ++= Seq( "Typesafe Releases" at "http://repo.typesafe.com/typesafe/releases/" ) -addSbtPlugin("com.typesafe.sbt" % "sbt-scalariform" % "1.2.0") \ No newline at end of file +addSbtPlugin("com.typesafe.sbt" % "sbt-scalariform" % "1.2.0") + +addSbtPlugin("io.get-coursier" % "sbt-coursier" % "1.0.0-M12") + +// or clone this repo and type `sbt publishLocal` +resolvers += Resolver.sonatypeRepo("snapshots") + +addSbtPlugin("org.ensime" % "sbt-ensime" % "1.0.0") diff --git a/src/main/scala/akka/io/Pipelines.scala b/src/main/scala/akka/io/Pipelines.scala deleted file mode 100644 index 54144aa..0000000 --- a/src/main/scala/akka/io/Pipelines.scala +++ /dev/null @@ -1,1168 +0,0 @@ -/** Copyright (C) 2009-2013 Typesafe Inc. - */ - -package akka.io - -import java.lang.{ Iterable ⇒ JIterable } -import scala.annotation.tailrec -import scala.util.{ Try, Success, Failure } -import java.nio.ByteOrder -import akka.util.ByteString -import scala.collection.mutable -import akka.actor.{ NoSerializationVerificationNeeded, ActorContext } -import scala.concurrent.duration.FiniteDuration -import scala.collection.mutable.WrappedArray -import scala.concurrent.duration.Deadline -import scala.beans.BeanProperty -import akka.event.LoggingAdapter - -/** Scala API: A pair of pipes, one for commands and one for events, plus a - * management port. Commands travel from top to bottom, events from bottom to - * top. All messages which need to be handled “in-order” (e.g. top-down or - * bottom-up) need to be either events or commands; management messages are - * processed in no particular order. - * - * Java base classes are provided in the form of [[AbstractPipePair]] - * and [[AbstractSymmetricPipePair]] since the Scala function types can be - * awkward to handle in Java. - * - * @see [[PipelineStage]] - * @see [[AbstractPipePair]] - * @see [[AbstractSymmetricPipePair]] - * @see [[PipePairFactory]] - */ -trait PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - - type Result = Either[EvtAbove, CmdBelow] - type Mgmt = PartialFunction[AnyRef, Iterable[Result]] - - /** The command pipeline transforms injected commands from the upper stage - * into commands for the stage below, but it can also emit events for the - * upper stage. Any number of each can be generated. - */ - def commandPipeline: CmdAbove ⇒ Iterable[Result] - - /** The event pipeline transforms injected event from the lower stage - * into event for the stage above, but it can also emit commands for the - * stage below. Any number of each can be generated. - */ - def eventPipeline: EvtBelow ⇒ Iterable[Result] - - /** The management port allows sending broadcast messages to all stages - * within this pipeline. This can be used to communicate with stages in the - * middle without having to thread those messages through the surrounding - * stages. Each stage can generate events and commands in response to a - * command, and the aggregation of all those is returned. - * - * The default implementation ignores all management commands. - */ - def managementPort: Mgmt = PartialFunction.empty -} - -/** A convenience type for expressing a [[PipePair]] which has the same types - * for commands and events. - */ -trait SymmetricPipePair[Above, Below] extends PipePair[Above, Below, Above, Below] - -/** Java API: A pair of pipes, one for commands and one for events. Commands travel from - * top to bottom, events from bottom to top. - * - * @see [[PipelineStage]] - * @see [[AbstractSymmetricPipePair]] - * @see [[PipePairFactory]] - */ -abstract class AbstractPipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - - /** Commands reaching this pipe pair are transformed into a sequence of - * commands for the next or events for the previous stage. - * - * Throwing exceptions within this method will abort processing of the whole - * pipeline which this pipe pair is part of. - * - * @param cmd the incoming command - * @return an Iterable of elements which are either events or commands - * - * @see [[#makeCommand]] - * @see [[#makeEvent]] - */ - def onCommand(cmd: CmdAbove): JIterable[Either[EvtAbove, CmdBelow]] - - /** Events reaching this pipe pair are transformed into a sequence of - * commands for the next or events for the previous stage. - * - * Throwing exceptions within this method will abort processing of the whole - * pipeline which this pipe pair is part of. - * - * @param cmd the incoming command - * @return an Iterable of elements which are either events or commands - * - * @see [[#makeCommand]] - * @see [[#makeEvent]] - */ - def onEvent(event: EvtBelow): JIterable[Either[EvtAbove, CmdBelow]] - - /** Management commands are sent to all stages in a broadcast fashion, - * conceptually in parallel (but not actually executing a stage - * reentrantly in case of events or commands being generated in response - * to a management command). - */ - def onManagementCommand(cmd: AnyRef): JIterable[Either[EvtAbove, CmdBelow]] = - java.util.Collections.emptyList() - - /** Helper method for wrapping a command which shall be emitted. - */ - def makeCommand(cmd: CmdBelow): Either[EvtAbove, CmdBelow] = Right(cmd) - - /** Helper method for wrapping an event which shall be emitted. - */ - def makeEvent(event: EvtAbove): Either[EvtAbove, CmdBelow] = Left(event) - - /** INTERNAL API: do not touch! - */ - private[io] val _internal$cmd = { - val l = new java.util.ArrayList[AnyRef](1) - l add null - l - } - /** INTERNAL API: do not touch! - */ - private[io] val _internal$evt = { - val l = new java.util.ArrayList[AnyRef](1) - l add null - l - } - - /** Wrap a single command for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Right]] and an [[java.lang.Iterable]] by reusing - * one such instance within the AbstractPipePair, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * final MyResult result = ... ; - * return singleCommand(result); - * }}} - * - * @see PipelineContext#singleCommand - */ - def singleCommand(cmd: CmdBelow): JIterable[Either[EvtAbove, CmdBelow]] = { - _internal$cmd.set(0, cmd.asInstanceOf[AnyRef]) - _internal$cmd.asInstanceOf[JIterable[Either[EvtAbove, CmdBelow]]] - } - - /** Wrap a single event for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Left]] and an [[java.lang.Iterable]] by reusing - * one such instance within the AbstractPipePair, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * final MyResult result = ... ; - * return singleEvent(result); - * }}} - * - * @see PipelineContext#singleEvent - */ - def singleEvent(evt: EvtAbove): JIterable[Either[EvtAbove, CmdBelow]] = { - _internal$evt.set(0, evt.asInstanceOf[AnyRef]) - _internal$evt.asInstanceOf[JIterable[Either[EvtAbove, CmdBelow]]] - } - - /** INTERNAL API: Dealias a possibly optimized return value such that it can - * be safely used; this is never needed when only using public API. - */ - def dealias[Cmd, Evt](msg: JIterable[Either[Evt, Cmd]]): JIterable[Either[Evt, Cmd]] = { - import java.util.Collections.singletonList - if (msg eq _internal$cmd) singletonList(Right(_internal$cmd.get(0).asInstanceOf[Cmd])) - else if (msg eq _internal$evt) singletonList(Left(_internal$evt.get(0).asInstanceOf[Evt])) - else msg - } -} - -/** A convenience type for expressing a [[AbstractPipePair]] which has the same types - * for commands and events. - */ -abstract class AbstractSymmetricPipePair[Above, Below] extends AbstractPipePair[Above, Below, Above, Below] - -/** This class contains static factory methods which produce [[PipePair]] - * instances; those are needed within the implementation of [[PipelineStage#apply]]. - */ -object PipePairFactory { - - /** Scala API: construct a [[PipePair]] from the two given functions; useful for not capturing `$outer` references. - */ - def apply[CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (commandPL: CmdAbove ⇒ Iterable[Either[EvtAbove, CmdBelow]], - eventPL: EvtBelow ⇒ Iterable[Either[EvtAbove, CmdBelow]], - management: PartialFunction[AnyRef, Iterable[Either[EvtAbove, CmdBelow]]] = PartialFunction.empty) = - new PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - override def commandPipeline = commandPL - override def eventPipeline = eventPL - override def managementPort = management - } - - private abstract class Converter[CmdAbove <: AnyRef, CmdBelow <: AnyRef, EvtAbove <: AnyRef, EvtBelow <: AnyRef] // - (val ap: AbstractPipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow], ctx: PipelineContext) { - import scala.collection.JavaConverters._ - protected def normalize(output: JIterable[Either[EvtAbove, CmdBelow]]): Iterable[Either[EvtAbove, CmdBelow]] = - if (output == java.util.Collections.EMPTY_LIST) Nil - else if (output eq ap._internal$cmd) ctx.singleCommand(ap._internal$cmd.get(0).asInstanceOf[CmdBelow]) - else if (output eq ap._internal$evt) ctx.singleEvent(ap._internal$evt.get(0).asInstanceOf[EvtAbove]) - else output.asScala - } - - /** Java API: construct a [[PipePair]] from the given [[AbstractPipePair]]. - */ - def create[CmdAbove <: AnyRef, CmdBelow <: AnyRef, EvtAbove <: AnyRef, EvtBelow <: AnyRef] // - (ctx: PipelineContext, ap: AbstractPipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - new Converter(ap, ctx) with PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - override val commandPipeline = { cmd: CmdAbove ⇒ normalize(ap.onCommand(cmd)) } - override val eventPipeline = { evt: EvtBelow ⇒ normalize(ap.onEvent(evt)) } - override val managementPort: Mgmt = { case x ⇒ normalize(ap.onManagementCommand(x)) } - } - - /** Java API: construct a [[PipePair]] from the given [[AbstractSymmetricPipePair]]. - */ - def create[Above <: AnyRef, Below <: AnyRef] // - (ctx: PipelineContext, ap: AbstractSymmetricPipePair[Above, Below]): SymmetricPipePair[Above, Below] = - new Converter(ap, ctx) with SymmetricPipePair[Above, Below] { - override val commandPipeline = { cmd: Above ⇒ normalize(ap.onCommand(cmd)) } - override val eventPipeline = { evt: Below ⇒ normalize(ap.onEvent(evt)) } - override val managementPort: Mgmt = { case x ⇒ normalize(ap.onManagementCommand(x)) } - } -} - -case class PipelinePorts[CmdAbove, CmdBelow, EvtAbove, EvtBelow]( - commands: CmdAbove ⇒ (Iterable[EvtAbove], Iterable[CmdBelow]), - events: EvtBelow ⇒ (Iterable[EvtAbove], Iterable[CmdBelow]), - management: PartialFunction[AnyRef, (Iterable[EvtAbove], Iterable[CmdBelow])]) - -/** This class contains static factory methods which turn a pipeline context - * and a [[PipelineStage]] into readily usable pipelines. - */ -object PipelineFactory { - - /** Scala API: build the pipeline and return a pair of functions representing - * the command and event pipelines. Each function returns the commands and - * events resulting from running the pipeline on the given input, where the - * the sequence of events is the first element of the returned pair and the - * sequence of commands the second element. - * - * Exceptions thrown by the pipeline stages will not be caught. - * - * @param ctx The context object for this pipeline - * @param stage The (composite) pipeline stage from whcih to build the pipeline - * @return a pair of command and event pipeline functions - */ - def buildFunctionTriple[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (ctx: Ctx, stage: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipelinePorts[CmdAbove, CmdBelow, EvtAbove, EvtBelow] = { - val pp = stage apply ctx - val split: (Iterable[Either[EvtAbove, CmdBelow]]) ⇒ (Iterable[EvtAbove], Iterable[CmdBelow]) = { in ⇒ - if (in.isEmpty) (Nil, Nil) - else if (in eq ctx.cmd) (Nil, Seq[CmdBelow](ctx.cmd(0))) - else if (in eq ctx.evt) (Seq[EvtAbove](ctx.evt(0)), Nil) - else { - val cmds = Vector.newBuilder[CmdBelow] - val evts = Vector.newBuilder[EvtAbove] - in foreach { - case Right(cmd) ⇒ cmds += cmd - case Left(evt) ⇒ evts += evt - } - (evts.result, cmds.result) - } - } - PipelinePorts(pp.commandPipeline andThen split, pp.eventPipeline andThen split, pp.managementPort andThen split) - } - - /** Scala API: build the pipeline attaching the given command and event sinks - * to its outputs. Exceptions thrown within the pipeline stages will abort - * processing (i.e. will not be processed in following stages) but will be - * caught and passed as [[scala.util.Failure]] into the respective sink. - * - * Exceptions thrown while processing management commands are not caught. - * - * @param ctx The context object for this pipeline - * @param stage The (composite) pipeline stage from whcih to build the pipeline - * @param commandSink The function to invoke for commands or command failures - * @param eventSink The function to invoke for events or event failures - * @return a handle for injecting events or commands into the pipeline - */ - def buildWithSinkFunctions[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (ctx: Ctx, - stage: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow])( - commandSink: Try[CmdBelow] ⇒ Unit, - eventSink: Try[EvtAbove] ⇒ Unit): PipelineInjector[CmdAbove, EvtBelow] = - new PipelineInjector[CmdAbove, EvtBelow] { - val pl = stage(ctx) - override def injectCommand(cmd: CmdAbove): Unit = { - Try(pl.commandPipeline(cmd)) match { - case f: Failure[_] ⇒ commandSink(f.asInstanceOf[Try[CmdBelow]]) - case Success(out) ⇒ - if (out.isEmpty) () // nothing - else if (out eq ctx.cmd) commandSink(Success(ctx.cmd(0))) - else if (out eq ctx.evt) eventSink(Success(ctx.evt(0))) - else out foreach { - case Right(cmd) ⇒ commandSink(Success(cmd)) - case Left(evt) ⇒ eventSink(Success(evt)) - } - } - } - override def injectEvent(evt: EvtBelow): Unit = { - Try(pl.eventPipeline(evt)) match { - case f: Failure[_] ⇒ eventSink(f.asInstanceOf[Try[EvtAbove]]) - case Success(out) ⇒ - if (out.isEmpty) () // nothing - else if (out eq ctx.cmd) commandSink(Success(ctx.cmd(0))) - else if (out eq ctx.evt) eventSink(Success(ctx.evt(0))) - else out foreach { - case Right(cmd) ⇒ commandSink(Success(cmd)) - case Left(evt) ⇒ eventSink(Success(evt)) - } - } - } - override def managementCommand(cmd: AnyRef): Unit = { - val out = pl.managementPort(cmd) - if (out.isEmpty) () // nothing - else if (out eq ctx.cmd) commandSink(Success(ctx.cmd(0))) - else if (out eq ctx.evt) eventSink(Success(ctx.evt(0))) - else out foreach { - case Right(cmd) ⇒ commandSink(Success(cmd)) - case Left(evt) ⇒ eventSink(Success(evt)) - } - } - } - - /** Java API: build the pipeline attaching the given callback object to its - * outputs. Exceptions thrown within the pipeline stages will abort - * processing (i.e. will not be processed in following stages) but will be - * caught and passed as [[scala.util.Failure]] into the respective sink. - * - * Exceptions thrown while processing management commands are not caught. - * - * @param ctx The context object for this pipeline - * @param stage The (composite) pipeline stage from whcih to build the pipeline - * @param callback The [[PipelineSink]] to attach to the built pipeline - * @return a handle for injecting events or commands into the pipeline - */ - def buildWithSink[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (ctx: Ctx, - stage: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow], - callback: PipelineSink[CmdBelow, EvtAbove]): PipelineInjector[CmdAbove, EvtBelow] = - buildWithSinkFunctions[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow](ctx, stage)({ - case Failure(thr) ⇒ callback.onCommandFailure(thr) - case Success(cmd) ⇒ callback.onCommand(cmd) - }, { - case Failure(thr) ⇒ callback.onEventFailure(thr) - case Success(evt) ⇒ callback.onEvent(evt) - }) -} - -/** A handle for injecting commands and events into a pipeline. Commands travel - * down (or to the right) through the stages, events travel in the opposite - * direction. - * - * @see [[PipelineFactory#buildWithSinkFunctions]] - * @see [[PipelineFactory#buildWithSink]] - */ -trait PipelineInjector[Cmd, Evt] { - - /** Inject the given command into the connected pipeline. - */ - @throws(classOf[Exception]) - def injectCommand(cmd: Cmd): Unit - - /** Inject the given event into the connected pipeline. - */ - @throws(classOf[Exception]) - def injectEvent(event: Evt): Unit - - /** Send a management command to all stages (in an unspecified order). - */ - @throws(classOf[Exception]) - def managementCommand(cmd: AnyRef): Unit -} - -/** A sink which can be attached by [[PipelineFactory#buildWithSink]] to a - * pipeline when it is being built. The methods are called when commands, - * events or their failures occur during evaluation of the pipeline (i.e. - * when injection is triggered using the associated [[PipelineInjector]]). - */ -abstract class PipelineSink[Cmd, Evt] { - - /** This callback is invoked for every command generated by the pipeline. - * - * By default this does nothing. - */ - @throws(classOf[Throwable]) - def onCommand(cmd: Cmd): Unit = () - - /** This callback is invoked if an exception occurred while processing an - * injected command. If this callback is invoked that no other callbacks will - * be invoked for the same injection. - * - * By default this will just throw the exception. - */ - @throws(classOf[Throwable]) - def onCommandFailure(thr: Throwable): Unit = throw thr - - /** This callback is invoked for every event generated by the pipeline. - * - * By default this does nothing. - */ - @throws(classOf[Throwable]) - def onEvent(event: Evt): Unit = () - - /** This callback is invoked if an exception occurred while processing an - * injected event. If this callback is invoked that no other callbacks will - * be invoked for the same injection. - * - * By default this will just throw the exception. - */ - @throws(classOf[Throwable]) - def onEventFailure(thr: Throwable): Unit = throw thr -} - -/** This base trait of each pipeline’s context provides optimized facilities - * for generating single commands or events (i.e. the fast common case of 1:1 - * message transformations). - * - * IMPORTANT NOTICE: - * - * A PipelineContext MUST NOT be shared between multiple pipelines, it contains mutable - * state without synchronization. You have been warned! - * - * @see AbstractPipelineContext see AbstractPipelineContext for a default implementation (Java) - */ -trait PipelineContext { - - /** INTERNAL API: do not touch! - */ - private val cmdHolder = new Array[AnyRef](1) - /** INTERNAL API: do not touch! - */ - private val evtHolder = new Array[AnyRef](1) - /** INTERNAL API: do not touch! - */ - private[io] val cmd = WrappedArray.make(cmdHolder) - /** INTERNAL API: do not touch! - */ - private[io] val evt = WrappedArray.make(evtHolder) - - /** Scala API: Wrap a single command for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Right]] and an [[scala.collection.Iterable]] by reusing - * one such instance within the PipelineContext, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * override val commandPipeline = { cmd => - * val myResult = ... - * ctx.singleCommand(myResult) - * } - * }}} - * - * @see AbstractPipePair#singleCommand see AbstractPipePair for the Java API - */ - def singleCommand[Cmd <: AnyRef, Evt <: AnyRef](cmd: Cmd): Iterable[Either[Evt, Cmd]] = { - cmdHolder(0) = cmd - this.cmd - } - - /** Scala API: Wrap a single event for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Left]] and an [[scala.collection.Iterable]] by reusing - * one such instance within the context, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * override val eventPipeline = { cmd => - * val myResult = ... - * ctx.singleEvent(myResult) - * } - * }}} - * - * @see AbstractPipePair#singleEvent see AbstractPipePair for the Java API - */ - def singleEvent[Cmd <: AnyRef, Evt <: AnyRef](evt: Evt): Iterable[Either[Evt, Cmd]] = { - evtHolder(0) = evt - this.evt - } - - /** A shared (and shareable) instance of an empty `Iterable[Either[EvtAbove, CmdBelow]]`. - * Use this when processing does not yield any commands or events as result. - */ - def nothing[Cmd, Evt]: Iterable[Either[Evt, Cmd]] = Nil - - /** INTERNAL API: Dealias a possibly optimized return value such that it can - * be safely used; this is never needed when only using public API. - */ - def dealias[Cmd, Evt](msg: Iterable[Either[Evt, Cmd]]): Iterable[Either[Evt, Cmd]] = { - if (msg.isEmpty) Nil - else if (msg eq cmd) Seq(Right(cmd(0))) - else if (msg eq evt) Seq(Left(evt(0))) - else msg - } -} - -/** This base trait of each pipeline’s context provides optimized facilities - * for generating single commands or events (i.e. the fast common case of 1:1 - * message transformations). - * - * IMPORTANT NOTICE: - * - * A PipelineContext MUST NOT be shared between multiple pipelines, it contains mutable - * state without synchronization. You have been warned! - */ -abstract class AbstractPipelineContext extends PipelineContext - -object PipelineStage { - - /** Java API: attach the two given stages such that the command output of the - * first is fed into the command input of the second, and the event output of - * the second is fed into the event input of the first. In other words: - * sequence the stages such that the left one is on top of the right one. - * - * @param left the left or upper pipeline stage - * @param right the right or lower pipeline stage - * @return a pipeline stage representing the sequence of the two stages - */ - def sequence[Ctx <: PipelineContext, CmdAbove, CmdBelow, CmdBelowBelow, EvtAbove, EvtBelow, EvtBelowBelow] // - (left: PipelineStage[_ >: Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow], - right: PipelineStage[_ >: Ctx, CmdBelow, CmdBelowBelow, EvtBelow, EvtBelowBelow]) // - : PipelineStage[Ctx, CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] = - left >> right - - /** Java API: combine the two stages such that the command pipeline of the - * left stage is used and the event pipeline of the right, discarding the - * other two sub-pipelines. - * - * @param left the command pipeline - * @param right the event pipeline - * @return a pipeline stage using the left command pipeline and the right event pipeline - */ - def combine[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (left: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow], - right: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - left | right -} - -/** A [[PipelineStage]] which is symmetric in command and event types, i.e. it only - * has one command and event type above and one below. - */ -abstract class SymmetricPipelineStage[Context <: PipelineContext, Above, Below] extends PipelineStage[Context, Above, Below, Above, Below] - -/** A pipeline stage which can be combined with other stages to build a - * protocol stack. The main function of this class is to serve as a factory - * for the actual [[PipePair]] generated by the [[#apply]] method so that a - * context object can be passed in. - * - * @see [[PipelineFactory]] - */ -abstract class PipelineStage[Context <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] { left ⇒ - - /** Implement this method to generate this stage’s pair of command and event - * functions. - * - * INTERNAL API: do not use this method to instantiate a pipeline! - * - * @see [[PipelineFactory]] - * @see [[AbstractPipePair]] - * @see [[AbstractSymmetricPipePair]] - */ - protected[io] def apply(ctx: Context): PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] - - /** Scala API: attach the two given stages such that the command output of the - * first is fed into the command input of the second, and the event output of - * the second is fed into the event input of the first. In other words: - * sequence the stages such that the left one is on top of the right one. - * - * @param right the right or lower pipeline stage - * @return a pipeline stage representing the sequence of the two stages - */ - def >>[CmdBelowBelow, EvtBelowBelow, BelowContext <: Context] // - (right: PipelineStage[_ >: BelowContext, CmdBelow, CmdBelowBelow, EvtBelow, EvtBelowBelow]) // - : PipelineStage[BelowContext, CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] = - new PipelineStage[BelowContext, CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] { - - protected[io] override def apply(ctx: BelowContext): PipePair[CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] = { - - val leftPL = left(ctx) - val rightPL = right(ctx) - - new PipePair[CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] { - - type Output = Either[EvtAbove, CmdBelowBelow] - - import language.implicitConversions - @inline implicit def narrowRight[A, B, C](in: Right[A, B]): Right[C, B] = in.asInstanceOf[Right[C, B]] - @inline implicit def narrowLeft[A, B, C](in: Left[A, B]): Left[A, C] = in.asInstanceOf[Left[A, C]] - - def loopLeft(input: Iterable[Either[EvtAbove, CmdBelow]]): Iterable[Output] = { - if (input.isEmpty) Nil - else if (input eq ctx.cmd) loopRight(rightPL.commandPipeline(ctx.cmd(0))) - else if (input eq ctx.evt) ctx.evt - else { - val output = Vector.newBuilder[Output] - input foreach { - case Right(cmd) ⇒ output ++= ctx.dealias(loopRight(rightPL.commandPipeline(cmd))) - case l @ Left(_) ⇒ output += l - } - output.result - } - } - - def loopRight(input: Iterable[Either[EvtBelow, CmdBelowBelow]]): Iterable[Output] = { - if (input.isEmpty) Nil - else if (input eq ctx.cmd) ctx.cmd - else if (input eq ctx.evt) loopLeft(leftPL.eventPipeline(ctx.evt(0))) - else { - val output = Vector.newBuilder[Output] - input foreach { - case r @ Right(_) ⇒ output += r - case Left(evt) ⇒ output ++= ctx.dealias(loopLeft(leftPL.eventPipeline(evt))) - } - output.result - } - } - - override val commandPipeline = { a: CmdAbove ⇒ loopLeft(leftPL.commandPipeline(a)) } - - override val eventPipeline = { b: EvtBelowBelow ⇒ loopRight(rightPL.eventPipeline(b)) } - - override val managementPort: PartialFunction[AnyRef, Iterable[Either[EvtAbove, CmdBelowBelow]]] = { - case x ⇒ - val output = Vector.newBuilder[Output] - output ++= ctx.dealias(loopLeft(leftPL.managementPort.applyOrElse(x, (_: AnyRef) ⇒ Nil))) - output ++= ctx.dealias(loopRight(rightPL.managementPort.applyOrElse(x, (_: AnyRef) ⇒ Nil))) - output.result - } - } - } - } - - /** Scala API: combine the two stages such that the command pipeline of the - * left stage is used and the event pipeline of the right, discarding the - * other two sub-pipelines. - * - * @param right the event pipeline - * @return a pipeline stage using the left command pipeline and the right event pipeline - */ - def |[RightContext <: Context] // - (right: PipelineStage[_ >: RightContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipelineStage[RightContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - new PipelineStage[RightContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - override def apply(ctx: RightContext): PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - new PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - - val leftPL = left(ctx) - val rightPL = right(ctx) - - override val commandPipeline = leftPL.commandPipeline - override val eventPipeline = rightPL.eventPipeline - override val managementPort: Mgmt = { - case x ⇒ - val output = Vector.newBuilder[Either[EvtAbove, CmdBelow]] - output ++= ctx.dealias(leftPL.managementPort(x)) - output ++= ctx.dealias(rightPL.managementPort(x)) - output.result - } - } - } -} - -object BackpressureBuffer { - /** Message type which is sent when the buffer’s high watermark has been - * reached, which means that further write requests should not be sent - * until the low watermark has been reached again. - */ - trait HighWatermarkReached extends Tcp.Event - case object HighWatermarkReached extends HighWatermarkReached - - /** Message type which is sent when the buffer’s fill level falls below - * the low watermark, which means that writing can commence again. - */ - trait LowWatermarkReached extends Tcp.Event - case object LowWatermarkReached extends LowWatermarkReached - -} - -/** This pipeline stage implements a configurable buffer for transforming the - * per-write ACK/NACK-based backpressure model of a TCP connection actor into - * an edge-triggered back-pressure model: the upper stages will receive - * notification when the buffer runs full ([[BackpressureBuffer.HighWatermarkReached]]) and when - * it subsequently empties ([[BackpressureBuffer.LowWatermarkReached]]). The upper layers should - * respond by not generating more writes when the buffer is full. There is also - * a hard limit upon which this buffer will abort the connection. - * - * All limits are configurable and are given in number of bytes. - * The `highWatermark` should be set such that the - * amount of data generated before reception of the asynchronous - * [[BackpressureBuffer.HighWatermarkReached]] notification does not lead to exceeding the - * `maxCapacity` hard limit; if the writes may arrive in bursts then the - * difference between these two should allow for at least one burst to be sent - * after the high watermark has been reached. The `lowWatermark` must be less - * than or equal to the `highWatermark`, where the difference between these two - * defines the hysteresis, i.e. how often these notifications are sent out (i.e. - * if the difference is rather large then it will take some time for the buffer - * to empty below the low watermark, and that room is then available for data - * sent in response to the [[BackpressureBuffer.LowWatermarkReached]] notification; if the - * difference was small then the buffer would more quickly oscillate between - * these two limits). - */ -class BackpressureBuffer(lowBytes: Long, highBytes: Long, maxBytes: Long) - extends PipelineStage[HasLogging, Tcp.Command, Tcp.Command, Tcp.Event, Tcp.Event] { - - require(lowBytes >= 0, "lowWatermark needs to be non-negative") - require(highBytes >= lowBytes, "highWatermark needs to be at least as large as lowWatermark") - require(maxBytes >= highBytes, "maxCapacity needs to be at least as large as highWatermark") - - // WARNING: Closes over enclosing class -- cannot moved outside because of backwards binary compatibility - // Fixed in 2.3 - case class Ack(num: Int, ack: Tcp.Event) extends Tcp.Event with NoSerializationVerificationNeeded - - override def apply(ctx: HasLogging) = new PipePair[Tcp.Command, Tcp.Command, Tcp.Event, Tcp.Event] { - - import Tcp._ - import BackpressureBuffer._ - - private val log = ctx.getLogger - - private var storageOffset = 0 - private var storage = Vector.empty[Write] - private def currentOffset = storageOffset + storage.size - - private var stored = 0L - private var suspended = false - - private var behavior = writing - override def commandPipeline = behavior - override def eventPipeline = behavior - - private def become(f: Message ⇒ Iterable[Result]) { behavior = f } - - private lazy val writing: Message ⇒ Iterable[Result] = { - case Write(data, ack) ⇒ - buffer(Write(data, Ack(currentOffset, ack)), doWrite = true) - - case CommandFailed(Write(_, Ack(offset, _))) ⇒ - become(buffering(offset)) - ctx.singleCommand(ResumeWriting) - - case cmd: CloseCommand ⇒ cmd match { - case _ if storage.isEmpty ⇒ - become(finished) - ctx.singleCommand(cmd) - case Abort ⇒ - storage = Vector.empty - become(finished) - ctx.singleCommand(Abort) - case _ ⇒ - become(closing(cmd)) - ctx.nothing - } - - case Ack(seq, ack) ⇒ acknowledge(seq, ack) - - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - - private def buffering(nack: Int): Message ⇒ Iterable[Result] = { - var toAck = 10 - var closed: CloseCommand = null - - { - case Write(data, ack) ⇒ - buffer(Write(data, Ack(currentOffset, ack)), doWrite = false) - - case WritingResumed ⇒ - ctx.singleCommand(storage(0)) - - case cmd: CloseCommand ⇒ cmd match { - case Abort ⇒ - storage = Vector.empty - become(finished) - ctx.singleCommand(Abort) - case _ ⇒ - closed = cmd - ctx.nothing - } - - case Ack(seq, ack) if seq < nack ⇒ acknowledge(seq, ack) - - case Ack(seq, ack) ⇒ - val ackMsg = acknowledge(seq, ack) - if (storage.nonEmpty) { - if (toAck > 0) { - toAck -= 1 - ctx.dealias(ackMsg) ++ Seq(Right(storage(0))) - } else { - become(if (closed != null) closing(closed) else writing) - ctx.dealias(ackMsg) ++ storage.map(Right(_)) - } - } else if (closed != null) { - become(finished) - ctx.dealias(ackMsg) ++ Seq(Right(closed)) - } else { - become(writing) - ackMsg - } - - case CommandFailed(_: Write) ⇒ ctx.nothing - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - } - - private def closing(cmd: CloseCommand): Message ⇒ Iterable[Result] = { - case Ack(seq, ack) ⇒ - val result = acknowledge(seq, ack) - if (storage.isEmpty) { - become(finished) - ctx.dealias(result) ++ Seq(Right(cmd)) - } else result - - case CommandFailed(_: Write) ⇒ - become({ - case WritingResumed ⇒ - become(closing(cmd)) - storage.map(Right(_)) - case CommandFailed(_: Write) ⇒ ctx.nothing - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - }) - ctx.singleCommand(ResumeWriting) - - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - - private val finished: Message ⇒ Iterable[Result] = { - case _: Write ⇒ ctx.nothing - case CommandFailed(_: Write) ⇒ ctx.nothing - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - - private def buffer(w: Write, doWrite: Boolean): Iterable[Result] = { - storage :+= w - stored += w.data.size - - if (stored > maxBytes) { - log.warning("aborting connection (buffer overrun)") - become(finished) - ctx.singleCommand(Abort) - } else if (stored > highBytes && !suspended) { - log.debug("suspending writes") - suspended = true - if (doWrite) { - Seq(Right(w), Left(HighWatermarkReached)) - } else { - ctx.singleEvent(HighWatermarkReached) - } - } else if (doWrite) { - ctx.singleCommand(w) - } else Nil - } - - private def acknowledge(seq: Int, ack: Event): Iterable[Result] = { - require(seq == storageOffset, s"received ack $seq at $storageOffset") - require(storage.nonEmpty, s"storage was empty at ack $seq") - - val size = storage(0).data.size - stored -= size - - storageOffset += 1 - storage = storage drop 1 - - if (suspended && stored < lowBytes) { - log.debug("resuming writes") - suspended = false - if (ack == NoAck) ctx.singleEvent(LowWatermarkReached) - else Vector(Left(ack), Left(LowWatermarkReached)) - } else if (ack == NoAck) ctx.nothing - else ctx.singleEvent(ack) - } - } - -} - -//#length-field-frame -/** Pipeline stage for length-field encoded framing. It will prepend a - * four-byte length header to the message; the header contains the length of - * the resulting frame including header in big-endian representation. - * - * The `maxSize` argument is used to protect the communication channel sanity: - * larger frames will not be sent (silently dropped) or received (in which case - * stream decoding would be broken, hence throwing an IllegalArgumentException). - */ -class LengthFieldFrame(maxSize: Int, - byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN, - headerSize: Int = 4, - lengthIncludesHeader: Boolean = true) - extends SymmetricPipelineStage[PipelineContext, ByteString, ByteString] { - - //#range-checks-omitted - require(byteOrder ne null, "byteOrder must not be null") - require(headerSize > 0 && headerSize <= 4, "headerSize must be in (0, 4]") - require(maxSize > 0, "maxSize must be positive") - require(maxSize <= (Int.MaxValue >> (4 - headerSize) * 8) * (if (headerSize == 4) 1 else 2), - "maxSize cannot exceed 256**headerSize") - //#range-checks-omitted - - override def apply(ctx: PipelineContext) = - new SymmetricPipePair[ByteString, ByteString] { - var buffer = None: Option[ByteString] - implicit val byteOrder = LengthFieldFrame.this.byteOrder - - /** Extract as many complete frames as possible from the given ByteString - * and return the remainder together with the extracted frames in reverse - * order. - */ - @tailrec - def extractFrames(bs: ByteString, acc: List[ByteString]) // - : (Option[ByteString], Seq[ByteString]) = { - if (bs.isEmpty) { - (None, acc) - } else if (bs.length < headerSize) { - (Some(bs.compact), acc) - } else { - val length = bs.iterator.getLongPart(headerSize).toInt - if (length < 0 || length > maxSize) - throw new IllegalArgumentException( - s"received too large frame of size $length (max = $maxSize)") - val total = if (lengthIncludesHeader) length else length + headerSize - if (bs.length >= total) { - extractFrames(bs drop total, bs.slice(headerSize, total) :: acc) - } else { - (Some(bs.compact), acc) - } - } - } - - /* - * This is how commands (writes) are transformed: calculate length - * including header, write that to a ByteStringBuilder and append the - * payload data. The result is a single command (i.e. `Right(...)`). - */ - override def commandPipeline = - { bs: ByteString ⇒ - val length = if (lengthIncludesHeader) bs.length + headerSize else bs.length - - if (length < 0 || length > maxSize) - throw new IllegalArgumentException( - s"received too large frame of size $length (max = $maxSize)") - - else { - val bb = ByteString.newBuilder - bb.putLongPart(length, headerSize) - bb ++= bs - ctx.singleCommand(bb.result) - } - } - - /* - * This is how events (reads) are transformed: append the received - * ByteString to the buffer (if any) and extract the frames from the - * result. In the end store the new buffer contents and return the - * list of events (i.e. `Left(...)`). - */ - override def eventPipeline = - { bs: ByteString ⇒ - val data = if (buffer.isEmpty) bs else buffer.get ++ bs - val (nb, frames) = extractFrames(data, Nil) - buffer = nb - /* - * please note the specialized (optimized) facility for emitting - * just a single event - */ - frames match { - case Nil ⇒ Nil - case one :: Nil ⇒ ctx.singleEvent(one) - case many ⇒ many reverseMap (Left(_)) - } - } - } -} -//#length-field-frame - -/** Pipeline stage for delimiter byte based framing and de-framing. Useful for string oriented protocol using '\n' - * or 0 as delimiter values. - * - * @param maxSize The maximum size of the frame the pipeline is willing to decode. Not checked for encoding, as the - * sender might decide to pass through multiple chunks in one go (multiple lines in case of a line-based - * protocol) - * @param delimiter The sequence of bytes that will be used as the delimiter for decoding. - * @param includeDelimiter If enabled, the delmiter bytes will be part of the decoded messages. In the case of sends - * the delimiter has to be appended to the end of frames by the user. It is also possible - * to send multiple frames by embedding multiple delimiters in the passed ByteString - */ -class DelimiterFraming(maxSize: Int, delimiter: ByteString = ByteString('\n'), includeDelimiter: Boolean = false) - extends SymmetricPipelineStage[PipelineContext, ByteString, ByteString] { - - require(maxSize > 0, "maxSize must be positive") - require(delimiter.nonEmpty, "delimiter must not be empty") - - override def apply(ctx: PipelineContext) = new SymmetricPipePair[ByteString, ByteString] { - val singleByteDelimiter: Boolean = delimiter.size == 1 - var buffer: ByteString = ByteString.empty - var delimiterFragment: Option[ByteString] = None - val firstByteOfDelimiter = delimiter.head - - @tailrec - private def extractParts(nextChunk: ByteString, acc: List[ByteString]): List[ByteString] = delimiterFragment match { - case Some(fragment) if nextChunk.size < fragment.size && fragment.startsWith(nextChunk) ⇒ - buffer ++= nextChunk - delimiterFragment = Some(fragment.drop(nextChunk.size)) - acc - // We got the missing parts of the delimiter - case Some(fragment) if nextChunk.startsWith(fragment) ⇒ - val decoded = if (includeDelimiter) buffer ++ fragment else buffer.take(buffer.size - delimiter.size + fragment.size) - buffer = ByteString.empty - delimiterFragment = None - extractParts(nextChunk.drop(fragment.size), decoded :: acc) - case _ ⇒ - val matchPosition = nextChunk.indexOf(firstByteOfDelimiter) - if (matchPosition == -1) { - delimiterFragment = None - val minSize = buffer.size + nextChunk.size - if (minSize > maxSize) throw new IllegalArgumentException( - s"Received too large frame of size $minSize (max = $maxSize)") - buffer ++= nextChunk - acc - } else if (matchPosition + delimiter.size > nextChunk.size) { - val delimiterMatchLength = nextChunk.size - matchPosition - if (nextChunk.drop(matchPosition) == delimiter.take(delimiterMatchLength)) { - buffer ++= nextChunk - // we are expecting the other parts of the delimiter - delimiterFragment = Some(delimiter.drop(nextChunk.size - matchPosition)) - acc - } else { - // false positive - delimiterFragment = None - buffer ++= nextChunk.take(matchPosition + 1) - extractParts(nextChunk.drop(matchPosition + 1), acc) - } - } else { - delimiterFragment = None - val missingBytes: Int = if (includeDelimiter) matchPosition + delimiter.size else matchPosition - val expectedSize = buffer.size + missingBytes - if (expectedSize > maxSize) throw new IllegalArgumentException( - s"Received frame already of size $expectedSize (max = $maxSize)") - - if (singleByteDelimiter || nextChunk.slice(matchPosition, matchPosition + delimiter.size) == delimiter) { - val decoded = buffer ++ nextChunk.take(missingBytes) - buffer = ByteString.empty - extractParts(nextChunk.drop(matchPosition + delimiter.size), decoded :: acc) - } else { - buffer ++= nextChunk.take(matchPosition + 1) - extractParts(nextChunk.drop(matchPosition + 1), acc) - } - } - - } - - override val eventPipeline = { - bs: ByteString ⇒ - val parts = extractParts(bs, Nil) - buffer = buffer.compact // TODO: This should be properly benchmarked and memory profiled - parts match { - case Nil ⇒ Nil - case one :: Nil ⇒ ctx.singleEvent(one.compact) - case many ⇒ many reverseMap { frame ⇒ Left(frame.compact) } - } - } - - override val commandPipeline = { - bs: ByteString ⇒ ctx.singleCommand(bs) - } - } -} - -/** Simple convenience pipeline stage for turning Strings into ByteStrings and vice versa. - * - * @param charset The character set to be used for encoding and decoding the raw byte representation of the strings. - */ -class StringByteStringAdapter(charset: String = "utf-8") - extends PipelineStage[PipelineContext, String, ByteString, String, ByteString] { - - override def apply(ctx: PipelineContext) = new PipePair[String, ByteString, String, ByteString] { - - val commandPipeline = (str: String) ⇒ ctx.singleCommand(ByteString(str, charset)) - - val eventPipeline = (bs: ByteString) ⇒ ctx.singleEvent(bs.decodeString(charset)) - } -} - -/** This trait expresses that the pipeline’s context needs to provide a logging - * facility. - */ -trait HasLogging extends PipelineContext { - /** Retrieve the [[akka.event.LoggingAdapter]] for this pipeline’s context. - */ - def getLogger: LoggingAdapter -} - -//#tick-generator -/** This trait expresses that the pipeline’s context needs to live within an - * actor and provide its ActorContext. - */ -trait HasActorContext extends PipelineContext { - /** Retrieve the [[akka.actor.ActorContext]] for this pipeline’s context. - */ - def getContext: ActorContext -} - -object TickGenerator { - /** This message type is used by the TickGenerator to trigger - * the rescheduling of the next Tick. The actor hosting the pipeline - * which includes a TickGenerator must arrange for messages of this - * type to be injected into the management port of the pipeline. - */ - trait Trigger - - /** This message type is emitted by the TickGenerator to the whole - * pipeline, informing all stages about the time at which this Tick - * was emitted (relative to some arbitrary epoch). - */ - case class Tick(@BeanProperty timestamp: FiniteDuration) extends Trigger -} - -/** This pipeline stage does not alter the events or commands - */ -class TickGenerator[Cmd <: AnyRef, Evt <: AnyRef](interval: FiniteDuration) - extends PipelineStage[HasActorContext, Cmd, Cmd, Evt, Evt] { - import TickGenerator._ - - override def apply(ctx: HasActorContext) = - new PipePair[Cmd, Cmd, Evt, Evt] { - - // use unique object to avoid double-activation on actor restart - private val trigger: Trigger = { - val path = ctx.getContext.self.path - - new Trigger { - override def toString = s"Tick[$path]" - } - } - - private def schedule() = - ctx.getContext.system.scheduler.scheduleOnce( - interval, ctx.getContext.self, trigger)(ctx.getContext.dispatcher) - - // automatically activate this generator - schedule() - - override val commandPipeline = (cmd: Cmd) ⇒ ctx.singleCommand(cmd) - - override val eventPipeline = (evt: Evt) ⇒ ctx.singleEvent(evt) - - override val managementPort: Mgmt = { - case `trigger` ⇒ - ctx.getContext.self ! Tick(Deadline.now.time) - schedule() - Nil - } - } -} -//#tick-generator - diff --git a/src/main/scala/akka/io/TcpPipelineHandler.scala b/src/main/scala/akka/io/TcpPipelineHandler.scala deleted file mode 100644 index abd9e79..0000000 --- a/src/main/scala/akka/io/TcpPipelineHandler.scala +++ /dev/null @@ -1,174 +0,0 @@ -/** Copyright (C) 2009-2013 Typesafe Inc. - */ - -package akka.io - -import scala.beans.BeanProperty -import scala.util.{ Failure, Success } -import akka.actor._ -import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } -import akka.util.ByteString -import akka.event.Logging -import akka.event.LoggingAdapter - -object TcpPipelineHandler { - - /** This class wraps up a pipeline with its external (i.e. “top”) command and - * event types and providing unique wrappers for sending commands and - * receiving events (nested and non-static classes which are specific to each - * instance of [[Init]]). All events emitted by the pipeline will be sent to - * the registered handler wrapped in an Event. - */ - abstract class Init[Ctx <: PipelineContext, Cmd, Evt]( - val stages: PipelineStage[_ >: Ctx <: PipelineContext, Cmd, Tcp.Command, Evt, Tcp.Event]) { - - /** This method must be implemented to return the [[PipelineContext]] - * necessary for the operation of the given [[PipelineStage]]. - */ - def makeContext(actorContext: ActorContext): Ctx - - /** Java API: construct a command to be sent to the [[TcpPipelineHandler]] - * actor. - */ - def command(cmd: Cmd): Command = Command(cmd) - - /** Java API: extract a wrapped event received from the [[TcpPipelineHandler]] - * actor. - * - * @throws MatchError if the given object is not an Event matching this - * specific Init instance. - */ - def event(evt: AnyRef): Evt = evt match { - case Event(evt) ⇒ evt - } - - /** Wrapper class for commands to be sent to the [[TcpPipelineHandler]] actor. - */ - case class Command(@BeanProperty cmd: Cmd) extends NoSerializationVerificationNeeded - - /** Wrapper class for events emitted by the [[TcpPipelineHandler]] actor. - */ - case class Event(@BeanProperty evt: Evt) extends NoSerializationVerificationNeeded - } - - /** This interface bundles logging and ActorContext for Java. - */ - trait WithinActorContext extends HasLogging with HasActorContext - - def withLogger[Cmd, Evt](log: LoggingAdapter, - stages: PipelineStage[_ >: WithinActorContext <: PipelineContext, Cmd, Tcp.Command, Evt, Tcp.Event]): Init[WithinActorContext, Cmd, Evt] = - new Init[WithinActorContext, Cmd, Evt](stages) { - override def makeContext(ctx: ActorContext): WithinActorContext = new WithinActorContext { - override def getLogger = log - override def getContext = ctx - } - } - - /** Wrapper class for management commands sent to the [[TcpPipelineHandler]] actor. - */ - case class Management(@BeanProperty cmd: AnyRef) - - /** This is a new Tcp.Command which the pipeline can emit to effect the - * sending a message to another actor. Using this instead of doing the send - * directly has the advantage that other pipeline stages can also see and - * possibly transform the send. - */ - case class Tell(receiver: ActorRef, msg: Any, sender: ActorRef) extends Tcp.Command - - /** The pipeline may want to emit a [[Tcp.Event]] to the registered handler - * actor, which is enabled by emitting this [[Tcp.Command]] wrapping an event - * instead. The [[TcpPipelineHandler]] actor will upon reception of this command - * forward the wrapped event to the handler. - */ - case class TcpEvent(@BeanProperty evt: Tcp.Event) extends Tcp.Command - - /** create [[akka.actor.Props]] for a pipeline handler - */ - def props[Ctx <: PipelineContext, Cmd, Evt](init: TcpPipelineHandler.Init[Ctx, Cmd, Evt], connection: ActorRef, handler: ActorRef) = - Props(classOf[TcpPipelineHandler[_, _, _]], init, connection, handler) - -} - -/** This actor wraps a pipeline and forwards commands and events between that - * one and a [[Tcp]] connection actor. In order to inject commands into the - * pipeline send an [[TcpPipelineHandler.Init.Command]] message to this actor; events will be sent - * to the designated handler wrapped in [[TcpPipelineHandler.Init.Event]] messages. - * - * When the designated handler terminates the TCP connection is aborted. When - * the connection actor terminates this actor terminates as well; the designated - * handler may want to watch this actor’s lifecycle. - * - * IMPORTANT: - * - * Proper function of this actor (and of other pipeline stages like [[TcpReadWriteAdapter]] - * depends on the fact that stages handling TCP commands and events pass unknown - * subtypes through unaltered. There are more commands and events than are declared - * within the [[Tcp]] object and you can even define your own. - */ -class TcpPipelineHandler[Ctx <: PipelineContext, Cmd, Evt]( - init: TcpPipelineHandler.Init[Ctx, Cmd, Evt], - connection: ActorRef, - handler: ActorRef) - extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { - - import init._ - import TcpPipelineHandler._ - - // sign death pact - context watch connection - // watch so we can Close - context watch handler - - val ctx = init.makeContext(context) - - val pipes = PipelineFactory.buildWithSinkFunctions(ctx, init.stages)({ - case Success(cmd) ⇒ - cmd match { - case Tell(receiver, msg, sender) ⇒ receiver.tell(msg, sender) - case TcpEvent(ev) ⇒ handler ! ev - case _ ⇒ connection ! cmd - } - case Failure(ex) ⇒ throw ex - }, { - case Success(evt) ⇒ handler ! Event(evt) - case Failure(ex) ⇒ throw ex - }) - - def receive = { - case Command(cmd) ⇒ pipes.injectCommand(cmd) - case evt: Tcp.Event ⇒ pipes.injectEvent(evt) - case Management(cmd) ⇒ pipes.managementCommand(cmd) - case Terminated(`handler`) ⇒ connection ! Tcp.Abort - case Terminated(`connection`) ⇒ context.stop(self) - } - -} - -/** Adapts a ByteString oriented pipeline stage to a stage that communicates via Tcp Commands and Events. Every ByteString - * passed down to this stage will be converted to Tcp.Write commands, while incoming Tcp.Receive events will be unwrapped - * and their contents passed up as raw ByteStrings. This adapter should be used together with TcpPipelineHandler. - * - * While this adapter communicates to the stage above it via raw ByteStrings, it is possible to inject Tcp Command - * by sending them to the management port, and the adapter will simply pass them down to the stage below. Incoming Tcp Events - * that are not Receive events will be passed downwards wrapped in a [[TcpPipelineHandler.TcpEvent]]; the [[TcpPipelineHandler]] will - * send these notifications to the registered event handler actor. - */ -class TcpReadWriteAdapter extends PipelineStage[PipelineContext, ByteString, Tcp.Command, ByteString, Tcp.Event] { - import TcpPipelineHandler.TcpEvent - - override def apply(ctx: PipelineContext) = new PipePair[ByteString, Tcp.Command, ByteString, Tcp.Event] { - - override val commandPipeline = { - data: ByteString ⇒ ctx.singleCommand(Tcp.Write(data)) - } - - override val eventPipeline = (evt: Tcp.Event) ⇒ evt match { - case Tcp.Received(data) ⇒ ctx.singleEvent(data) - case ev: Tcp.Event ⇒ ctx.singleCommand(TcpEvent(ev)) - } - - override val managementPort: Mgmt = { - case cmd: Tcp.Command ⇒ ctx.singleCommand(cmd) - } - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/Action.scala b/src/main/scala/nl/gideondk/sentinel/Action.scala index 194ff39..8ab7f5a 100644 --- a/src/main/scala/nl/gideondk/sentinel/Action.scala +++ b/src/main/scala/nl/gideondk/sentinel/Action.scala @@ -1,16 +1,20 @@ package nl.gideondk.sentinel -import scala.concurrent.Future +import akka.stream.scaladsl.Source +import scala.concurrent.Future import play.api.libs.iteratee._ trait Action trait ProducerAction[E, C] extends Action + trait ConsumerAction extends Action object ProducerAction { + trait Reaction[E, C] extends ProducerAction[E, C] + trait StreamReaction[E, C] extends Reaction[E, C] trait Signal[E, C] extends Reaction[E, C] { @@ -18,23 +22,29 @@ object ProducerAction { } object Signal { - def apply[E, C](fun: E ⇒ Future[C]): Signal[E, C] = new Signal[E, C] { val f = fun } + def apply[E, C](fun: E ⇒ Future[C]): Signal[E, C] = new Signal[E, C] { + val f = fun + } } trait ConsumeStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Enumerator[E] ⇒ Future[C] + def f: E ⇒ Source[E, Any] ⇒ Future[C] } object ConsumeStream { - def apply[E, A <: E, B <: E, C](fun: A ⇒ Enumerator[B] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { val f = fun.asInstanceOf[E ⇒ Enumerator[E] ⇒ Future[C]] } // Yikes :/ + def apply[E, A <: E, B <: E, C](fun: A ⇒ Enumerator[B] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { + val f = fun.asInstanceOf[E ⇒ Source[E, Any] ⇒ Future[C]] + } } trait ProduceStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Future[Enumerator[C]] + def f: E ⇒ Future[Source[C, Any]] } object ProduceStream { - def apply[E, C](fun: E ⇒ Future[Enumerator[C]]): ProduceStream[E, C] = new ProduceStream[E, C] { val f = fun } + def apply[E, C](fun: E ⇒ Future[Source[C, Any]]): ProduceStream[E, C] = new ProduceStream[E, C] { + val f = fun + } } } @@ -42,14 +52,21 @@ object ProducerAction { case class ProducerActionAndData[Evt, Cmd](action: ProducerAction[Evt, Cmd], data: Evt) object ConsumerAction { + case object AcceptSignal extends ConsumerAction + case object AcceptError extends ConsumerAction + case object StartStream extends ConsumerAction + case object ConsumeStreamChunk extends ConsumerAction + case object EndStream extends ConsumerAction + case object ConsumeChunkAndEndStream extends ConsumerAction case object Ignore extends ConsumerAction + } case class ConsumerActionAndData[Evt](action: ConsumerAction, data: Evt) \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Antenna.scala b/src/main/scala/nl/gideondk/sentinel/Antenna.scala deleted file mode 100644 index cc6bec6..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Antenna.scala +++ /dev/null @@ -1,112 +0,0 @@ -package nl.gideondk.sentinel - -import akka.actor._ -import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } -import akka.io._ -import nl.gideondk.sentinel.processors._ -import scala.collection.immutable.Queue - -import scala.concurrent.Future - -class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true) extends Actor with ActorLogging with Stash { - - import context.dispatcher - - def active(tcpHandler: ActorRef): Receive = { - val consumer = context.actorOf(Props(new Consumer(init)), name = "resolver") - val producer = context.actorOf(Props(new Producer(init)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "producer") - - var commandQueue = Queue.empty[init.Command] - var commandInProcess = false - - context watch tcpHandler - context watch producer - context watch consumer - - def handleTermination: Receive = { - case x: Terminated ⇒ context.stop(self) - } - - def highWaterMark: Receive = handleTermination orElse { - case BackpressureBuffer.LowWatermarkReached ⇒ - unstashAll() - context.unbecome() - case _ ⇒ - stash() - } - - def popCommand() = if (!commandQueue.isEmpty) { - val cmd = commandQueue.head - commandQueue = commandQueue.tail - tcpHandler ! cmd - } else { - commandInProcess = false - } - - def handleCommands: Receive = { - case x: Command.Ask[Cmd, Evt] ⇒ - consumer ! x.registration - - val cmd = init.Command(x.payload) - if (allowPipelining) tcpHandler ! cmd - else if (commandInProcess) { - commandQueue :+= cmd - } else { - commandInProcess = true - tcpHandler ! cmd - } - - case x: Command.AskStream[Cmd, Evt] ⇒ - consumer ! x.registration - - val cmd = init.Command(x.payload) - if (allowPipelining) tcpHandler ! cmd - else if (commandInProcess) { - commandQueue :+= cmd - } else { - commandInProcess = true - tcpHandler ! cmd - } - - case x: Command.SendStream[Cmd, Evt] ⇒ - consumer ! x.registration - producer ! ProducerActionAndData(ProducerAction.ProduceStream[Unit, Cmd](Unit ⇒ Future(x.stream)), ()) - } - - def handleReplies: Receive = { - case x: Reply.Response[Cmd] ⇒ - tcpHandler ! init.Command(x.payload) - - case x: Reply.StreamResponseChunk[Cmd] ⇒ - tcpHandler ! init.Command(x.payload) - } - - handleTermination orElse handleCommands orElse handleReplies orElse { - case x: Registration[Evt, _] ⇒ - consumer ! x - - case init.Event(data) ⇒ { - resolver.process(data) match { - case x: ProducerAction[Evt, Cmd] ⇒ producer ! ProducerActionAndData[Evt, Cmd](x, data) - - case ConsumerAction.ConsumeStreamChunk ⇒ - consumer ! ConsumerActionAndData[Evt](ConsumerAction.ConsumeStreamChunk, data) - - case x: ConsumerAction ⇒ - consumer ! ConsumerActionAndData[Evt](x, data) - if (!allowPipelining) popCommand() - } - - } - - case BackpressureBuffer.HighWatermarkReached ⇒ { - context.become(highWaterMark, false) - } - } - } - - def receive = { - case Management.RegisterTcpHandler(tcpHandler) ⇒ - context.become(active(tcpHandler)) - } -} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala deleted file mode 100644 index 5328ee3..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ /dev/null @@ -1,180 +0,0 @@ -package nl.gideondk.sentinel - -import java.net.InetSocketAddress - -import scala.concurrent._ -import scala.concurrent.duration.{ DurationInt, FiniteDuration } - -import akka.actor._ -import akka.io._ -import akka.io.Tcp._ -import akka.routing._ - -import akka.util.ByteString - -import play.api.libs.iteratee._ - -trait Client[Cmd, Evt] { - import Registration._ - - def actor: ActorRef - - def ?(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = ask(command) - - def ?->>(command: Cmd)(implicit context: ExecutionContext): Future[Enumerator[Evt]] = askStream(command) - - def ?<<-(command: Cmd, source: Enumerator[Cmd])(implicit context: ExecutionContext): Future[Evt] = sendStream(command, source) - - def ?<<-(source: Enumerator[Cmd])(implicit context: ExecutionContext): Future[Evt] = sendStream(source) - - def ask(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = { - val promise = Promise[Evt]() - actor ! Command.Ask(command, ReplyRegistration(promise)) - promise.future - } - - def askStream(command: Cmd)(implicit context: ExecutionContext): Future[Enumerator[Evt]] = { - val promise = Promise[Enumerator[Evt]]() - actor ! Command.AskStream(command, StreamReplyRegistration(promise)) - promise.future - } - - def sendStream(command: Cmd, source: Enumerator[Cmd]): Future[Evt] = - sendStream(Enumerator(command) >>> source) - - def sendStream(source: Enumerator[Cmd]): Future[Evt] = { - val promise = Promise[Evt]() - actor ! Command.SendStream(source, ReplyRegistration(promise)) - promise.future - } -} - -object Client { - case class ConnectToServer(addr: InetSocketAddress) - - def defaultResolver[Cmd, Evt] = new Resolver[Evt, Cmd] { - def process = { - case _ ⇒ ConsumerAction.AcceptSignal - } - } - - def apply[Cmd, Evt](serverHost: String, serverPort: Int, routerConfig: RouterConfig, - description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString) - core ! Client.ConnectToServer(new InetSocketAddress(serverHost, serverPort)) - new Client[Cmd, Evt] { - val actor = core - } - } - - def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RandomPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) - } - - def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RoundRobinPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) - } -} - -class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true)(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash { - val tcp = akka.io.IO(Tcp)(context.system) - - override def preStart = tcp ! Tcp.Connect(address) - - def connected(antenna: ActorRef): Receive = { - case x: Command[Cmd, Evt] ⇒ - antenna forward x - - case x: Terminated ⇒ - context.stop(self) - - } - - def disconnected: Receive = { - case Connected(remoteAddr, localAddr) ⇒ - val init = TcpPipelineHandler.withLogger(log, - stages >> - new TcpReadWriteAdapter >> - new BackpressureBuffer(lowBytes, highBytes, maxBufferSize)) - - val antenna = context.actorOf(Props(new Antenna(init, resolver, allowPipelining)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher")) - val handler = context.actorOf(TcpPipelineHandler.props(init, sender, antenna).withDeploy(Deploy.local)) - context watch handler - - sender ! Register(handler) - antenna ! Management.RegisterTcpHandler(handler) - - unstashAll() - context.become(connected(antenna)) - - case CommandFailed(cmd: akka.io.Tcp.Command) ⇒ - context.stop(self) // Bit harsh at the moment, but should trigger reconnect and probably do better next time... - - // case x: nl.gideondk.sentinel.Command[Cmd, Evt] ⇒ - // x.registration.promise.failure(new Exception("Client has not yet been connected to a endpoint")) - - case _ ⇒ stash() - } - - def receive = disconnected -} - -class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reconnectDuration: FiniteDuration, - stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true, workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash { - - import context.dispatcher - - var addresses = List.empty[Tuple2[InetSocketAddress, Option[ActorRef]]] - - private case object InitializeRouter - private case class ReconnectRouter(address: InetSocketAddress) - - var coreRouter: Option[ActorRef] = None - var reconnecting = false - - def antennaManagerProto(address: InetSocketAddress) = - new ClientAntennaManager(address, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize) - - def routerProto(address: InetSocketAddress) = - context.actorOf(Props(antennaManagerProto(address)).withRouter(routerConfig).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher")) - - override def preStart = { - self ! InitializeRouter - } - - def receive = { - case x: Client.ConnectToServer ⇒ - log.debug("Connecting to: " + x.addr) - if (!addresses.map(_._1).contains(x)) { - val router = routerProto(x.addr) - context.watch(router) - addresses = addresses ++ List(x.addr -> Some(router)) - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) - reconnecting = false - unstashAll() - } else { - log.debug("Client is already connected to: " + x.addr) - } - - case Terminated(actor) ⇒ - /* If router died, restart after a period of time */ - val terminatedRouter = addresses.find(_._2 == Some(actor)) - terminatedRouter match { - case Some(r) ⇒ - addresses = addresses diff addresses.find(_._2 == Some(actor)).toList - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) - log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) - reconnecting = true - context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) - case None ⇒ - } - - case x: Command[Cmd, Evt] ⇒ - coreRouter match { - case Some(r) ⇒ if (reconnecting) stash() else r forward x - case None ⇒ x.registration.promise.failure(new Exception("No connection(s) available")) - } - - case _ ⇒ - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/Command.scala b/src/main/scala/nl/gideondk/sentinel/Command.scala index a2773fd..18109a2 100644 --- a/src/main/scala/nl/gideondk/sentinel/Command.scala +++ b/src/main/scala/nl/gideondk/sentinel/Command.scala @@ -1,18 +1,28 @@ package nl.gideondk.sentinel -import scala.concurrent.{ Future, Promise } - import akka.actor.ActorRef +import akka.stream.scaladsl.Source + +import scala.concurrent.Promise + +trait Response[Evt] + +case class SingularResponse[Evt](data: Evt) extends Response[Evt] -import play.api.libs.iteratee._ +case class SingularErrorResponse[Evt](data: Evt) extends Response[Evt] -trait Registration[Evt, A] { - def promise: Promise[A] +case class StreamResponse[Evt](source: Source[Evt, Any]) extends Response[Evt] + +trait Registration[Evt, Resp <: Response[Evt]] { + def promise: Promise[Resp] } object Registration { - case class ReplyRegistration[Evt](promise: Promise[Evt]) extends Registration[Evt, Evt] - case class StreamReplyRegistration[Evt](promise: Promise[Enumerator[Evt]]) extends Registration[Evt, Enumerator[Evt]] + + case class SingularResponseRegistration[Evt](promise: Promise[SingularResponse[Evt]]) extends Registration[Evt, SingularResponse[Evt]] + + case class StreamReplyRegistration[Evt](promise: Promise[StreamResponse[Evt]]) extends Registration[Evt, StreamResponse[Evt]] + } trait Command[Cmd, Evt] { @@ -26,33 +36,50 @@ trait ServerMetric trait Reply[Cmd] object Command { + import Registration._ - case class Ask[Cmd, Evt](payload: Cmd, registration: ReplyRegistration[Evt]) extends Command[Cmd, Evt] - case class Tell[Cmd, Evt](payload: Cmd, registration: ReplyRegistration[Evt]) extends Command[Cmd, Evt] + case class Ask[Cmd, Evt](payload: Cmd, registration: SingularResponseRegistration[Evt]) extends Command[Cmd, Evt] + + case class Tell[Cmd, Evt](payload: Cmd, registration: SingularResponseRegistration[Evt]) extends Command[Cmd, Evt] case class AskStream[Cmd, Evt](payload: Cmd, registration: StreamReplyRegistration[Evt]) extends Command[Cmd, Evt] - case class SendStream[Cmd, Evt](stream: Enumerator[Cmd], registration: ReplyRegistration[Evt]) extends Command[Cmd, Evt] + + case class SendStream[Cmd, Evt](stream: Source[Cmd, Any], registration: StreamReplyRegistration[Evt]) extends Command[Cmd, Evt] + } object ServerCommand { + case class AskAll[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] + case class AskAllHosts[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] + case class AskAny[Cmd, Evt](payload: Cmd, promise: Promise[Evt]) extends ServerCommand[Cmd, Evt] + } object ServerMetric { + case object ConnectedSockets extends ServerMetric + case object ConnectedHosts extends ServerMetric + } object Reply { + case class Response[Cmd](payload: Cmd) extends Reply[Cmd] + case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] + } object Management { + trait ManagementMessage + case class RegisterTcpHandler(h: ActorRef) extends ManagementMessage + } diff --git a/src/main/scala/nl/gideondk/sentinel/Config.scala b/src/main/scala/nl/gideondk/sentinel/Config.scala new file mode 100644 index 0000000..bcd4757 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/Config.scala @@ -0,0 +1,11 @@ +package nl.gideondk.sentinel + +import com.typesafe.config.ConfigFactory + +object Config { + private lazy val config = ConfigFactory.load().getConfig("sentinel") + + val parallelism = config.getInt("pipeline.parallelism") + val framesize = config.getInt("pipeline.framesize") + val buffersize = config.getInt("pipeline.buffersize") +} diff --git a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala new file mode 100644 index 0000000..6c695f6 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala @@ -0,0 +1,12 @@ +package nl.gideondk.sentinel + +import akka.actor.ActorSystem +import akka.stream.OverflowStrategy +import akka.stream.scaladsl.{ BidiFlow, Flow } +import akka.util.ByteString +import scala.concurrent.Future + +object Pipeline { + + +} diff --git a/src/main/scala/nl/gideondk/sentinel/Processor.scala b/src/main/scala/nl/gideondk/sentinel/Processor.scala new file mode 100644 index 0000000..3b8c36c --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/Processor.scala @@ -0,0 +1,37 @@ +package nl.gideondk.sentinel + +import akka.actor.{Actor, Props} +import akka.stream.BidiShape +import akka.stream.scaladsl.{BidiFlow, Flow, Sink, Source} +import akka.util.ByteString +import akka.pattern.ask + +//case object Processor { +// +//} +// +//class Processor[I, R <: Rx[I], O, T <: Tx[O]](rxProps: Props, txProps: Props, protocol: BidiFlow[I, ByteString, O, ByteString, Nothing], connection: Flow[ByteString, ByteString, _]) extends Actor { +// +// val rx = Sink.actorSubscriber[O](rxProps) +// val tx = Source.actorPublisher[I](txProps) +// +// protocol >> connection +//// val flow = tx.via(connection).to(rx) +//// flow.run() +// +// def receive = { +// case x: I => +// tx. ? x +// } +//} +// +// +///* +// +// +//--> Request --> Processor --> Tx (bp) --> TCP --> Rx (bp) --> Processor --> Response +// +// +// +// +// */ \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Resolver.scala b/src/main/scala/nl/gideondk/sentinel/Resolver.scala index 8c1485f..32bc36e 100644 --- a/src/main/scala/nl/gideondk/sentinel/Resolver.scala +++ b/src/main/scala/nl/gideondk/sentinel/Resolver.scala @@ -1,6 +1,99 @@ package nl.gideondk.sentinel -trait Resolver[Evt, Cmd] { +import akka.stream.scaladsl.{Flow, Source} +import akka.stream._ +import akka.stream.stage._ +import akka.util.ByteString +import nl.gideondk.sentinel.ConsumerAction._ - def process: PartialFunction[Evt, Action] -} \ No newline at end of file +import scala.util.{Failure, Success, Try} + + +class ResponseStage[Evt, Cmd](resolver: Processor[Evt]) extends GraphStage[FanOutShape2[Evt, Cmd, Response[Evt]]] { + + private val events = Inlet[Evt]("EventIn") + private val responses = Outlet[Response[Evt]]("ResponseOut") + private val signals = Outlet[Cmd]("SignalOut") + + val shape = new FanOutShape2(events, responses, signals) + + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { + private var chunkSource: SubSourceOutlet[Evt] = _ + + private def chunkSubStreamStarted = chunkSource != null + + private def idle = this + + def setInitialHandlers(): Unit = setHandlers(events, responses, idle) + + def startStream(): Unit = { + chunkSource = new SubSourceOutlet[Evt]("ChunkSource") + chunkSource.setHandler(substreamHandler) + setHandler(events, substreamHandler) + push(responses, StreamResponse(Source.fromGraph(chunkSource.source))) + } + + def onPush(): Unit = { + + val evt = grab(events) + resolver.process(evt) match { + case AcceptSignal ⇒ push(responses, SingularResponse(evt)) + + case AcceptError ⇒ push(responses, SingularErrorResponse(evt)) + + case StartStream => startStream() + + case ConsumeStreamChunk => startStream() + + case ConsumeChunkAndEndStream => push(responses, StreamResponse(Source.single(evt))) + + case Ignore ⇒ () + } + } + + def onPull(): Unit = { + if (!chunkSubStreamStarted) pull(events) + } + + private lazy val substreamHandler = new InHandler with OutHandler { + def endStream(): Unit = { + chunkSource.complete() + chunkSource = null + + if (isAvailable(responses)) pull(events) + setInitialHandlers() + } + + override def onPush(): Unit = { + val chunk = grab(events) + + resolver.process(chunk) match { + case ConsumeStreamChunk => chunkSource.push(chunk) + + case EndStream => endStream() + + case ConsumeChunkAndEndStream => chunkSource.push(chunk); endStream() + + case Ignore ⇒ () + } + } + + override def onPull(): Unit = pull(events) + + override def onUpstreamFinish(): Unit = { + chunkSource.complete() + completeStage() + } + + override def onUpstreamFailure(reason: Throwable): Unit = { + chunkSource.fail(reason) + failStage(reason) + } + } + } +} + + +trait Processor[In] { + def process: PartialFunction[In, Action] +} diff --git a/src/main/scala/nl/gideondk/sentinel/Server.scala b/src/main/scala/nl/gideondk/sentinel/Server.scala deleted file mode 100644 index 90e9f75..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Server.scala +++ /dev/null @@ -1,143 +0,0 @@ -package nl.gideondk.sentinel - -import java.net.InetSocketAddress - -import akka.actor._ -import akka.io._ -import akka.io.Tcp._ -import akka.util.{ Timeout, ByteString } - -import scala.concurrent.{ Future, Promise, ExecutionContext } -import scala.util.Random - -import akka.pattern.ask - -trait Server[Cmd, Evt] { - def actor: ActorRef - - def ?**(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = askAll(command) - - def ?*(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = askAllHosts(command) - - def ?(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = askAny(command) - - def askAll(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = { - val promise = Promise[List[Evt]]() - actor ! ServerCommand.AskAll(command, promise) - promise.future - } - - def askAllHosts(command: Cmd)(implicit context: ExecutionContext): Future[List[Evt]] = { - val promise = Promise[List[Evt]]() - actor ! ServerCommand.AskAllHosts(command, promise) - promise.future - } - - def askAny(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = { - val promise = Promise[Evt]() - actor ! ServerCommand.AskAny(command, promise) - promise.future - } - - def connectedSockets(implicit timeout: Timeout): Future[Int] = { - (actor ? ServerMetric.ConnectedSockets).mapTo[Int] - } - - def connectedHosts(implicit timeout: Timeout): Future[Int] = { - (actor ? ServerMetric.ConnectedHosts).mapTo[Int] - } -} - -class ServerCore[Cmd, Evt](port: Int, description: String, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], - resolver: Resolver[Evt, Cmd], workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging { - - import context.dispatcher - - def wrapAtenna(a: ActorRef) = new Client[Cmd, Evt] { - val actor = a - } - - val tcp = akka.io.IO(Tcp)(context.system) - val address = new InetSocketAddress(port) - - var connections = Map[String, List[ActorRef]]() - - override def preStart = { - tcp ! Bind(self, address) - } - - def receiveCommands: Receive = { - case x: ServerCommand.AskAll[Cmd, Evt] ⇒ - if (connections.values.toList.length > 0) { - val futures = Future.sequence(connections.values.toList.flatten.map(wrapAtenna).map(_ ? x.payload)) - x.promise.completeWith(futures) - } else x.promise.failure(new Exception("No clients connected")) - - case x: ServerCommand.AskAllHosts[Cmd, Evt] ⇒ - if (connections.values.toList.length > 0) { - val futures = Future.sequence(connections.values.toList.map(x ⇒ Random.shuffle(x.toList).head).map(wrapAtenna).map(_ ? x.payload)) - x.promise.completeWith(futures) - } else x.promise.failure(new Exception("No clients connected")) - - case x: ServerCommand.AskAny[Cmd, Evt] ⇒ - if (connections.values.toList.length > 0) { - val future = (wrapAtenna(Random.shuffle(connections.values.toList.flatten).head) ? x.payload) - x.promise.completeWith(future) - } else x.promise.failure(new Exception("No clients connected")) - - case ServerMetric.ConnectedSockets ⇒ - sender ! connections.values.flatten.toList.length - - case ServerMetric.ConnectedHosts ⇒ - sender ! connections.keys.toList.length - } - - def receive = receiveCommands orElse { - case x: Terminated ⇒ - val antenna = x.getActor - connections = connections.foldLeft(Map[String, List[ActorRef]]()) { - case (c, i) ⇒ - i._2.contains(antenna) match { - case true ⇒ if (i._2.length == 1) c else c + (i._1 -> i._2.filter(_ != antenna)) - case false ⇒ c + i - } - } - - case Bound ⇒ - log.debug(description + " bound to " + address) - - case CommandFailed(cmd) ⇒ - cmd match { - case x: Bind ⇒ - log.error(description + " failed to bind to " + address) - } - - case req @ Connected(remoteAddr, localAddr) ⇒ - val init = - TcpPipelineHandler.withLogger(log, - stages >> - new TcpReadWriteAdapter >> - new BackpressureBuffer(lowBytes, highBytes, maxBufferSize)) - - val connection = sender - - val antenna = context.actorOf(Props(new Antenna(init, resolver)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher")) - context.watch(antenna) - - val currentAtennas = connections.get(remoteAddr.getHostName).getOrElse(List[ActorRef]()) - connections = connections + (remoteAddr.getHostName -> (currentAtennas ++ List(antenna))) - - val tcpHandler = context.actorOf(TcpPipelineHandler.props(init, connection, antenna).withDeploy(Deploy.local)) - - antenna ! Management.RegisterTcpHandler(tcpHandler) - connection ! Tcp.Register(tcpHandler) - } -} - -object Server { - def apply[Evt, Cmd](serverPort: Int, resolver: Resolver[Evt, Cmd], description: String = "Sentinel Server", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], lowBytes: Long = 100L, highBytes: Long = 50 * 1024L, maxBufferSize: Long = 1000L * 1024L)(implicit system: ActorSystem) = { - new Server[Evt, Cmd] { - val actor = system.actorOf(Props(new ServerCore(serverPort, description, stages, resolver)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-server-" + java.util.UUID.randomUUID.toString) - } - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala deleted file mode 100644 index c336996..0000000 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ /dev/null @@ -1,244 +0,0 @@ -package nl.gideondk.sentinel.processors - -import scala.collection.immutable.Queue -import scala.concurrent._ -import scala.concurrent.duration.DurationInt - -import akka.actor._ -import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } -import akka.pattern.ask -import akka.util.Timeout - -import play.api.libs.iteratee._ - -import nl.gideondk.sentinel._ - -object Consumer { - - trait StreamConsumerMessage - - case object ReadyForStream extends StreamConsumerMessage - - case object StartingWithStream extends StreamConsumerMessage - - case object AskNextChunk extends StreamConsumerMessage - - case object RegisterStreamConsumer extends StreamConsumerMessage - - case object ReleaseStreamConsumer extends StreamConsumerMessage - - case object TimeoutStreamConsumer extends StreamConsumerMessage - - trait ConsumerData[Evt] - - case class ConsumerException[Evt](cause: Evt) extends Exception { - override def toString() = "ConsumerException(" + cause + ")" - } - - case class DataChunk[Evt](c: Evt) extends ConsumerData[Evt] - - case class StreamChunk[Evt](c: Evt) extends ConsumerData[Evt] - - case class ErrorChunk[Evt](c: Evt) extends ConsumerData[Evt] - - case class EndOfStream[Evt]() extends ConsumerData[Evt] - -} - -class StreamHandler[Cmd, Evt](streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { - import Registration._ - import Consumer._ - import ConsumerAction._ - import context.dispatcher - - context.setReceiveTimeout(streamConsumerTimeout.duration) - - var hook: Option[Promise[ConsumerData[Evt]]] = None - var buffer = Queue[ConsumerData[Evt]]() - - override def postStop() = { - hook.foreach(_.failure(new Exception("Actor quit unexpectedly"))) - } - - def receive: Receive = { - case ReleaseStreamConsumer ⇒ - context.stop(self) - sender ! () - - case AskNextChunk ⇒ - sender ! nextStreamChunk - - case chunk: ConsumerData[Evt] ⇒ - hook match { - case Some(x) ⇒ - x.success(chunk) - hook = None - case None ⇒ - buffer :+= chunk - } - - case ReceiveTimeout ⇒ { - context.stop(self) - } - - } - - def nextStreamChunk = { - buffer.headOption match { - case Some(c) ⇒ - buffer = buffer.tail - Promise[ConsumerData[Evt]]().success(c) - case None ⇒ - val p = Promise[ConsumerData[Evt]]() - hook = Some(p) - p - } - } -} - -class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], - streamChunkTimeout: Timeout = Timeout(120 seconds), - streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { - import Registration._ - import Consumer._ - import ConsumerAction._ - - import context.dispatcher - - implicit val timeout = streamChunkTimeout - - var registrations = Queue[Registration[Evt, _]]() - - var streamBuffer = Queue[ConsumerData[Evt]]() - - var currentRunningStream: Option[ActorRef] = None - - override def postStop() = { - registrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) - } - - def processAction(data: Evt, action: ConsumerAction) = { - def handleConsumerData(cd: ConsumerData[Evt]) = { - val registration = registrations.head - registrations = registrations.tail - - registration match { - case r: ReplyRegistration[_] ⇒ - r.promise.completeWith(cd match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - }) - - case r: StreamReplyRegistration[_] ⇒ - r.promise.completeWith(cd match { - case x: DataChunk[Evt] ⇒ - Future.failed(new Exception("Unexpectedly received a normal chunk instead of stream chunk")) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - }) - } - } - - def handleStreamData(cd: ConsumerData[Evt]) = { - currentRunningStream match { - case Some(x) ⇒ - cd match { - case x: EndOfStream[Evt] ⇒ currentRunningStream = None - case _ ⇒ () - } - - x ! cd - - case None ⇒ - registrations.headOption match { - case Some(registration) ⇒ - registration match { - case r: ReplyRegistration[_] ⇒ - throw new Exception("Unexpectedly received a stream chunk instead of normal reply") // TODO: use specific exception classes - case r: StreamReplyRegistration[_] ⇒ { - val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) - currentRunningStream = Some(streamHandler) - - val worker = streamHandler - - // TODO: handle stream chunk timeout better - val resource = Enumerator.generateM[Evt] { - (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { - _ match { - case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) - case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) - case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) - } - } - } - - def dequeueStreamBuffer(): Unit = { - streamBuffer.headOption match { - case Some(x) ⇒ - streamBuffer = streamBuffer.tail - x match { - case x: EndOfStream[Evt] ⇒ - worker ! x - case x ⇒ - worker ! x - dequeueStreamBuffer() - } - case None ⇒ () - } - } - - dequeueStreamBuffer() - worker ! cd - - registrations = registrations.tail - r.promise success resource - } - - } - - case None ⇒ - streamBuffer :+= cd - } - } - } - - action match { - case AcceptSignal ⇒ - handleConsumerData(DataChunk(data)) - case AcceptError ⇒ - currentRunningStream match { - case Some(x) ⇒ handleStreamData(ErrorChunk(data)) - case None ⇒ handleConsumerData(ErrorChunk(data)) - } - - case ConsumeStreamChunk ⇒ - handleStreamData(StreamChunk(data)) - case EndStream ⇒ - handleStreamData(EndOfStream[Evt]()) - case ConsumeChunkAndEndStream ⇒ - handleStreamData(StreamChunk(data)) - handleStreamData(EndOfStream[Evt]()) - - case Ignore ⇒ () - } - } - - def handleRegistrations: Receive = { - case rc: ReplyRegistration[Evt] ⇒ - registrations :+= rc - - case rc: StreamReplyRegistration[Evt] ⇒ - registrations :+= rc - - } - - var behavior: Receive = handleRegistrations orElse { - case x: ConsumerActionAndData[Evt] ⇒ - processAction(x.data, x.action) - - } - - def receive = behavior -} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala deleted file mode 100644 index aba73ec..0000000 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ /dev/null @@ -1,150 +0,0 @@ -package nl.gideondk.sentinel.processors - -import scala.collection.immutable.Queue -import scala.concurrent.{ ExecutionContext, Future, Promise } -import scala.concurrent.duration.DurationInt -import scala.util.{ Failure, Success } - -import akka.actor._ -import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } -import akka.pattern.ask -import akka.util.Timeout - -import play.api.libs.iteratee._ - -import nl.gideondk.sentinel._ - -object Producer { - trait HandleResult - case class HandleAsyncResult[Cmd](response: Cmd) extends HandleResult - case class HandleStreamResult[Cmd](stream: Enumerator[Cmd]) extends HandleResult - - trait StreamProducerMessage - case class StreamProducerChunk[Cmd](c: Cmd) extends StreamProducerMessage - - case object StartStreamHandling extends StreamProducerMessage - case object ReadyForStream extends StreamProducerMessage - case object StreamProducerEnded extends StreamProducerMessage - case object StreamProducerChunkReceived extends StreamProducerMessage - - case object DequeueResponse -} - -class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTimeout: Timeout = Timeout(5 seconds)) extends Actor with ActorLogging with Stash { - import Producer._ - import ProducerAction._ - import context.dispatcher - - var responseQueue = Queue.empty[Promise[HandleResult]] - - def produceAsyncResult(data: Evt, f: Evt ⇒ Future[Cmd]) = { - val worker = self - val promise = Promise[HandleResult]() - responseQueue :+= promise - - for { - response ← f(data) map (result ⇒ HandleAsyncResult(result)) - } yield { - promise.success(response) - worker ! DequeueResponse - } - } - - def produceStreamResult(data: Evt, f: Evt ⇒ Future[Enumerator[Cmd]]) = { - val worker = self - val promise = Promise[HandleResult]() - responseQueue :+= promise - - for { - response ← f(data) map (result ⇒ HandleStreamResult(result)) - } yield { - promise.success(response) - worker ! DequeueResponse - } - } - - val initSignal = produceAsyncResult(_, _) - val initStreamConsumer = produceAsyncResult(_, _) - val initStreamProducer = produceStreamResult(_, _) - - def processAction(data: Evt, action: ProducerAction[Evt, Cmd]) = { - val worker = self - val future = action match { - case x: Signal[Evt, Cmd] ⇒ initSignal(data, x.f) - - case x: ProduceStream[Evt, Cmd] ⇒ initStreamProducer(data, x.f) - - case x: ConsumeStream[Evt, Cmd] ⇒ - val incomingStreamPromise = Promise[Enumerator[Evt]]() - context.parent ! Registration.StreamReplyRegistration(incomingStreamPromise) - incomingStreamPromise.future flatMap ((s) ⇒ initStreamConsumer(data, x.f(_)(s))) - } - - future.onFailure { - case e ⇒ - log.error(e, e.getMessage) - context.stop(self) - } - } - - def handleRequest: Receive = { - case x: ProducerActionAndData[Evt, Cmd] ⇒ - processAction(x.data, x.action) - } - - def handleDequeue: Receive = { - case DequeueResponse ⇒ { - def dequeueAndSend: Unit = { - if (!responseQueue.isEmpty && responseQueue.front.isCompleted) { - // TODO: Should be handled a lot safer! - val promise = responseQueue.head - responseQueue = responseQueue.tail - promise.future.value match { - case Some(Success(v)) ⇒ - self ! v - dequeueAndSend - case Some(Failure(e)) ⇒ // Would normally not occur... - log.error(e, e.getMessage) - context.stop(self) - } - } - - } - dequeueAndSend - } - } - - def handleRequestAndResponse: Receive = handleRequest orElse handleDequeue orElse { - case x: HandleAsyncResult[Cmd] ⇒ context.parent ! Reply.Response(x.response) - case x: HandleStreamResult[Cmd] ⇒ - val worker = self - // TODO: What to do when producing Enumerator times out, send error, close stream and continue producing? - implicit val timeout = streamChunkTimeout - - val consumer = (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) - consumer.onFailure { - case e ⇒ - log.error(e, e.getMessage) - context.stop(self) - } - - context.become(handleRequestAndStreamResponse) - - case x: StreamProducerMessage ⇒ - log.error("Internal leakage in stream: received unexpected stream chunk") - context.stop(self) - } - - def handleRequestAndStreamResponse: Receive = handleRequest orElse { - case StreamProducerChunk(c) ⇒ - sender ! StreamProducerChunkReceived - context.parent ! Reply.StreamResponseChunk(c) - case StreamProducerEnded ⇒ - sender ! StreamProducerChunkReceived - context.become(handleRequestAndResponse) - unstashAll() - case _ ⇒ stash() - } - - def receive = handleRequestAndResponse -} From ba46b44fd829e9ffe0e387135a55be31be2c589f Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Tue, 13 Sep 2016 23:10:43 +0200 Subject: [PATCH 33/54] Make client flow working --- project/Build.scala | 12 +- project/plugins.sbt | 2 +- .../scala/nl/gideondk/sentinel/Action.scala | 4 +- .../scala/nl/gideondk/sentinel/Client.scala | 270 ++++++++++++++++++ .../scala/nl/gideondk/sentinel/Command.scala | 55 ++-- .../nl/gideondk/sentinel/ConsumerStage.scala | 124 ++++++++ .../scala/nl/gideondk/sentinel/Pipeline.scala | 13 +- .../nl/gideondk/sentinel/Processor.scala | 75 ++--- .../nl/gideondk/sentinel/ProducerStage.scala | 70 +++++ .../scala/nl/gideondk/sentinel/Protocol.scala | 14 + .../scala/nl/gideondk/sentinel/Resolver.scala | 97 +------ .../gideondk/sentinel/ClientStageSpec.scala | 87 ++++++ .../gideondk/sentinel/ConsumerStageSpec.scala | 235 +++++++++++++++ .../nl/gideondk/sentinel/FullDuplexSpec.scala | 73 ----- .../nl/gideondk/sentinel/ProcessorSpec.scala | 59 ++++ .../gideondk/sentinel/ProducerStageSpec.scala | 48 ++++ .../gideondk/sentinel/RequestResponse.scala | 93 ------ .../gideondk/sentinel/ServerRequestSpec.scala | 99 ------- .../nl/gideondk/sentinel/StreamingSpec.scala | 155 ---------- .../nl/gideondk/sentinel/TestHelpers.scala | 125 +++++++- .../sentinel/protocol/SimpleMessage.scala | 104 +++++++ .../sentinel/protocols/SimpleMessage.scala | 103 ------- 22 files changed, 1217 insertions(+), 700 deletions(-) create mode 100644 src/main/scala/nl/gideondk/sentinel/Client.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/ProducerStage.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/Protocol.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/RequestResponse.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala diff --git a/project/Build.scala b/project/Build.scala index 3615ead..4f72ab6 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -22,16 +22,18 @@ object ApplicationBuild extends Build { publishTo := Some(Resolver.file("file", new File("/Users/gideondk/Development/gideondk-mvn-repo"))) ) + val akkaVersion = "2.4.11" + val appDependencies = Seq( - "org.scalatest" %% "scalatest" % "2.2.0" % "test", + "org.scalatest" %% "scalatest" % "3.0.0" % "test", "com.typesafe.play" %% "play-iteratees" % "2.3.1", - "com.typesafe.akka" %% "akka-stream" % "2.4.8", - "com.typesafe.akka" %% "akka-stream-testkit" % "2.4.8", + "com.typesafe.akka" %% "akka-stream" % akkaVersion, + "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion, - "com.typesafe.akka" %% "akka-actor" % "2.4.8", - "com.typesafe.akka" %% "akka-testkit" % "2.4.8" % "test", + "com.typesafe.akka" %% "akka-actor" % akkaVersion, + "com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test", "com.typesafe" % "config" % "1.3.0" ) diff --git a/project/plugins.sbt b/project/plugins.sbt index 6bc7c29..ecdd4fa 100755 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -10,4 +10,4 @@ addSbtPlugin("io.get-coursier" % "sbt-coursier" % "1.0.0-M12") // or clone this repo and type `sbt publishLocal` resolvers += Resolver.sonatypeRepo("snapshots") -addSbtPlugin("org.ensime" % "sbt-ensime" % "1.0.0") +addSbtPlugin("org.ensime" % "sbt-ensime" % "1.11.1") diff --git a/src/main/scala/nl/gideondk/sentinel/Action.scala b/src/main/scala/nl/gideondk/sentinel/Action.scala index 8ab7f5a..30dd279 100644 --- a/src/main/scala/nl/gideondk/sentinel/Action.scala +++ b/src/main/scala/nl/gideondk/sentinel/Action.scala @@ -17,8 +17,8 @@ object ProducerAction { trait StreamReaction[E, C] extends Reaction[E, C] - trait Signal[E, C] extends Reaction[E, C] { - def f: E ⇒ Future[C] + trait Signal[In, Out] extends Reaction[In, Out] { + def f: In ⇒ Future[Out] } object Signal { diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala new file mode 100644 index 0000000..014c56a --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/Client.scala @@ -0,0 +1,270 @@ +package nl.gideondk.sentinel + +import akka.{Done, NotUsed, stream} +import akka.actor.ActorSystem +import akka.event.Logging +import akka.stream._ +import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp} +import akka.stream.stage._ +import akka.util.ByteString + +import scala.collection.mutable +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util.{Failure, Success, Try} + +case class Host(host: String, port: Int) + +object ClientStage { + + trait ConnectionClosedException + + case class ConnectionClosedWithReasonException(message: String, cause: Throwable) extends Exception(message, cause) with ConnectionClosedException + + case class ConnectionClosedWithoutReasonException(message: String) extends Exception(message) with ConnectionClosedException + + trait ConnectionEvent { + def host: Host + } + + case class LinkUp(host: Host) extends ConnectionEvent + + case class LinkDown(host: Host) extends ConnectionEvent + + case object NoConnectionsAvailableException extends Exception + +} + +import ClientStage._ + +class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, processor: () => Processor[Cmd, Evt], protocol: () => BidiFlow[ByteString, Evt, Cmd, ByteString, Any]) + (implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[ConnectionEvent, (Command[Cmd], Promise[Event[Evt]]), (Try[Event[Evt]], Promise[Event[Evt]])]] { + + type Context = Promise[Event[Evt]] + + val connectionEventIn = Inlet[ConnectionEvent]("ClientStage.ConnectionEvent.In") + val commandIn = Inlet[(Command[Cmd], Context)]("ClientStage.Command.In") + val eventOut = Outlet[(Try[Event[Evt]], Context)]("ClientStage.Event.Out") + + override def shape = new FanInShape2(connectionEventIn, commandIn, eventOut) + + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) { + private val hosts = mutable.Map.empty[Host, Int] + private val hostFailures = mutable.Map.empty[Host, Int] + private val connectionPool = mutable.Queue.empty[Connection] + private val failures = mutable.Queue.empty[(Try[Event[Evt]], Context)] + private var antennaId = 0 + + override def preStart() = { + pull(connectionEventIn) + pull(commandIn) + schedulePeriodically(Done, recoveryPeriod) + } + + def nextId() = { + antennaId += 1; + antennaId + } + + def addHost(host: Host) = { + if (!hosts.contains(host)) { + hosts += (host -> 0) + pullCommand(true) + } + } + + def ensureConnections() = { + hosts + .find(_._2 < connectionsPerHost) + .foreach { case (host, connectionCount) => + val connection = Connection(host, nextId()) + connection.initialize() + connectionPool.enqueue(connection) + hosts(connection.host) = connectionCount + 1 + } + + pullCommand(false) + } + + def pullCommand(shouldInitializeConnection: Boolean): Unit = + if (hosts.isEmpty && isAvailable(commandIn)) { + val (_, context) = grab(commandIn) + failures.enqueue((Failure(NoConnectionsAvailableException), context)) + + if (isAvailable(eventOut) && failures.nonEmpty) { + push(eventOut, failures.dequeue()) + } + + pull(commandIn) + } else if (isAvailable(commandIn)) { + connectionPool.dequeueFirst(_.canBePushedForCommand) match { + case Some(connection) => + val (command, context) = grab(commandIn) + connection.pushCommand(command, context) + connectionPool.enqueue(connection) + pull(commandIn) + + case None => if (shouldInitializeConnection) ensureConnections() + } + } + + + def connectionFailed(connection: Connection, cause: Throwable) = { + val host = connection.host + val totalFailure = hostFailures.getOrElse(host, 0) + 1 + hostFailures(host) = totalFailure + + system.log.warning(s"Connection ${connection.connectionId} to $host failed due to ${cause.getMessage}") + + if (hostFailures(host) >= maximumFailuresPerHost) { + system.log.error(cause, s"Dropping $host, failed $totalFailure times") + removeHost(host, Some(cause)) + } else { + removeConnection(connection, Some(cause)) + } + } + + def removeHost(host: Host, cause: Option[Throwable] = None) = { + hosts.remove(host) + hostFailures.remove(host) + connectionPool.dequeueAll(_.host == host).foreach(_.close(cause)) + + if (isAvailable(eventOut) && failures.nonEmpty) { + push(eventOut, failures.dequeue()) + } + + pullCommand(true) + } + + def removeConnection(connection: Connection, cause: Option[Throwable]) = { + hosts(connection.host) = hosts(connection.host) - 1 + connectionPool.dequeueAll(_.connectionId == connection.connectionId).foreach(_.close(cause)) + + if (isAvailable(eventOut) && failures.nonEmpty) { + push(eventOut, failures.dequeue()) + } + + pullCommand(true) + } + + setHandler(connectionEventIn, new InHandler { + override def onPush() = { + grab(connectionEventIn) match { + case LinkUp(connection) => addHost(connection) + case LinkDown(connection) => removeHost(connection) + } + pull(connectionEventIn) + } + + override def onUpstreamFinish() = () + + override def onUpstreamFailure(ex: Throwable) = + failStage(throw new IllegalStateException(s"Stream for ConnectionEvents failed", ex)) + }) + + setHandler(commandIn, new InHandler { + override def onPush() = pullCommand(shouldInitializeConnection = true) + + override def onUpstreamFinish() = completeStage() + + override def onUpstreamFailure(ex: Throwable) = + failStage(throw new IllegalStateException(s"Requests stream failed", ex)) + }) + + setHandler(eventOut, new OutHandler { + override def onPull() = + if (failures.nonEmpty) push(eventOut, failures.dequeue()) + else { + connectionPool + .dequeueFirst(_.canBePulledForEvent) + .foreach(connection => { + if (isAvailable(eventOut)) { + push(eventOut, connection.pullEvent) + } + connectionPool.enqueue(connection) + }) + } + + override def onDownstreamFinish() = completeStage() + }) + + + override def onTimer(timerKey: Any) = hostFailures.clear() + + case class Connection(host: Host, connectionId: Int) { + connection => + private val connectionEventIn = new SubSinkInlet[Event[Evt]](s"Connection.[$host].[$connectionId].in") + private val connectionCommandOut = new SubSourceOutlet[Command[Cmd]](s"Connection.[$host].[$connectionId].out") + private val contexts = mutable.Queue.empty[Promise[Event[Evt]]] + + def canBePushedForCommand = connectionCommandOut.isAvailable + + def canBePulledForEvent = connectionEventIn.isAvailable + + def pushCommand(command: Command[Cmd], context: Promise[Event[Evt]]) = { + contexts.enqueue(context) + connectionCommandOut.push(command) + } + + def pullEvent() = { + val event = connectionEventIn.grab() + val context = contexts.dequeue() + connectionEventIn.pull() + (Success(event), context) + } + + def close(cause: Option[Throwable]) = { + val exception = cause match { + case Some(cause) => ConnectionClosedWithReasonException(s"Failure to process request to $host at antenna $connectionId", cause) + case None => ConnectionClosedWithoutReasonException(s"Failure to process request to $host antenna $connectionId") + } + + contexts.dequeueAll(_ => true).foreach(context => { + failures.enqueue((Failure(exception), context)) + }) + + connectionEventIn.cancel() + connectionCommandOut.complete() + } + + def initialize() = { + connectionEventIn.setHandler(new InHandler { + override def onPush() = if (isAvailable(eventOut)) { + push(eventOut, connection.pullEvent) + } + + override def onUpstreamFinish() = removeConnection(connection, None) + + override def onUpstreamFailure(reason: Throwable) = reason match { + case t: TimeoutException => removeConnection(connection, Some(t)) + case _ => connectionFailed(connection, reason) + } + }) + + connectionCommandOut.setHandler(new OutHandler { + override def onPull() = pullCommand(shouldInitializeConnection = true) + + override def onDownstreamFinish() = () + }) + + RunnableGraph.fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + + val pipeline = b.add(processor() + .flow + .atop(protocol().reversed) + .join(Tcp().outgoingConnection(host.host, host.port)) + ) + + connectionCommandOut.source ~> pipeline.in + pipeline.out ~> connectionEventIn.sink + + stream.ClosedShape + }).run()(subFusingMaterializer) + + connectionEventIn.pull() + } + } + + } +} diff --git a/src/main/scala/nl/gideondk/sentinel/Command.scala b/src/main/scala/nl/gideondk/sentinel/Command.scala index 18109a2..90bff7e 100644 --- a/src/main/scala/nl/gideondk/sentinel/Command.scala +++ b/src/main/scala/nl/gideondk/sentinel/Command.scala @@ -5,47 +5,46 @@ import akka.stream.scaladsl.Source import scala.concurrent.Promise -trait Response[Evt] +trait Event[A] -case class SingularResponse[Evt](data: Evt) extends Response[Evt] +case class SingularEvent[A](data: A) extends Event[A] -case class SingularErrorResponse[Evt](data: Evt) extends Response[Evt] +case class SingularErrorEvent[A](data: A) extends Event[A] -case class StreamResponse[Evt](source: Source[Evt, Any]) extends Response[Evt] +case class StreamEvent[A](chunks: Source[A, Any]) extends Event[A] -trait Registration[Evt, Resp <: Response[Evt]] { - def promise: Promise[Resp] +trait Registration[A, E <: Event[A]] { + def promise: Promise[E] } object Registration { + case class SingularResponseRegistration[A](promise: Promise[SingularEvent[A]]) extends Registration[A, SingularEvent[A]] - case class SingularResponseRegistration[Evt](promise: Promise[SingularResponse[Evt]]) extends Registration[Evt, SingularResponse[Evt]] - - case class StreamReplyRegistration[Evt](promise: Promise[StreamResponse[Evt]]) extends Registration[Evt, StreamResponse[Evt]] - + case class StreamReplyRegistration[A](promise: Promise[StreamEvent[A]]) extends Registration[A, StreamEvent[A]] } -trait Command[Cmd, Evt] { - def registration: Registration[Evt, _] -} +trait Command[Out] -trait ServerCommand[Cmd, Evt] +case class SingularCommand[Out](payload: Out) extends Command[Out] +case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] -trait ServerMetric +trait ServerCommand[Out, In] -trait Reply[Cmd] +trait ServerMetric object Command { import Registration._ - case class Ask[Cmd, Evt](payload: Cmd, registration: SingularResponseRegistration[Evt]) extends Command[Cmd, Evt] +// case class Ask[Out](payload: Out) extends Command[Out] - case class Tell[Cmd, Evt](payload: Cmd, registration: SingularResponseRegistration[Evt]) extends Command[Cmd, Evt] + object Ask - case class AskStream[Cmd, Evt](payload: Cmd, registration: StreamReplyRegistration[Evt]) extends Command[Cmd, Evt] - - case class SendStream[Cmd, Evt](stream: Source[Cmd, Any], registration: StreamReplyRegistration[Evt]) extends Command[Cmd, Evt] +// case class Tell[Out](payload: Out) extends Command[Out] +// +// case class AskStream[Out](payload: Out) extends Command[Out] +// +// case class SendStream[Out](stream: Source[Out, Any]) extends Command[Out] } @@ -67,13 +66,13 @@ object ServerMetric { } -object Reply { - - case class Response[Cmd](payload: Cmd) extends Reply[Cmd] - - case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] - -} +//object Reply { +// +// case class Response[Cmd](payload: Cmd) extends Reply[Cmd] +// +// case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] +// +//} object Management { diff --git a/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala new file mode 100644 index 0000000..fbaabd5 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala @@ -0,0 +1,124 @@ +package nl.gideondk.sentinel + +import akka.stream._ +import akka.stream.scaladsl.{BidiFlow, Broadcast, Flow, GraphDSL, Merge, Source} +import akka.stream.stage.GraphStageLogic.EagerTerminateOutput +import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} + +import nl.gideondk.sentinel.ConsumerAction._ +import nl.gideondk.sentinel.Registration.SingularResponseRegistration + +import scala.concurrent.{ExecutionContext, Future, Promise} + +class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { + + private val eventIn = Inlet[Evt]("ConsumerStage.Event.In") + private val actionOut = Outlet[(Evt, ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") + private val signalOut = Outlet[Event[Evt]]("ConsumerStage.Signal.Out") + + val shape = new FanOutShape2(eventIn, actionOut, signalOut) + + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { + private var chunkSource: SubSourceOutlet[Evt] = _ + + private def chunkSubStreamStarted = chunkSource != null + + private def idle = this + + def setInitialHandlers(): Unit = setHandlers(eventIn, signalOut, idle) + + /* + * + * Substream Logic + * + * */ + + val pullThroughHandler = new OutHandler { + override def onPull() = { + pull(eventIn) + } + } + + val substreamHandler = new InHandler with OutHandler { + def endStream(): Unit = { + chunkSource.complete() + chunkSource = null + + if (isAvailable(signalOut) && !hasBeenPulled(eventIn)) pull(eventIn) + setInitialHandlers() + } + + override def onPush(): Unit = { + val chunk = grab(eventIn) + resolver.process(chunk) match { + case ConsumeStreamChunk ⇒ + chunkSource.push(chunk) + + case EndStream ⇒ + endStream() + + case ConsumeChunkAndEndStream ⇒ + chunkSource.push(chunk) + endStream() + + case Ignore ⇒ () + } + } + + override def onPull(): Unit = { + // TODO: Recheck internal flow; checking should be obsolete + if (!hasBeenPulled(eventIn)) pull(eventIn) + } + + override def onUpstreamFinish(): Unit = { + chunkSource.complete() + completeStage() + } + + override def onUpstreamFailure(reason: Throwable): Unit = { + chunkSource.fail(reason) + failStage(reason) + } + } + + def startStream(initialChunk: Option[Evt]): Unit = { + chunkSource = new SubSourceOutlet[Evt]("ChunkSource") + chunkSource.setHandler(pullThroughHandler) + setHandler(eventIn, substreamHandler) + setHandler(signalOut, substreamHandler) + + initialChunk match { + case Some(x) ⇒ push(signalOut, StreamEvent(Source.single(x) ++ Source.fromGraph(chunkSource.source))) + case None ⇒ push(signalOut, StreamEvent(Source.fromGraph(chunkSource.source))) + } + } + + def onPush(): Unit = { + val evt = grab(eventIn) + + resolver.process(evt) match { + case x: ProducerAction[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + + case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) + + case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) + + case StartStream ⇒ startStream(None) + + case ConsumeStreamChunk ⇒ startStream(Some(evt)) + + case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) + + case Ignore ⇒ () + } + } + + def onPull(): Unit = { + if (!chunkSubStreamStarted && !hasBeenPulled(eventIn)) pull(eventIn) + } + + setHandler(actionOut, this) + + setInitialHandlers() + } +} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala index 6c695f6..b9b63db 100644 --- a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala +++ b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala @@ -1,12 +1,15 @@ package nl.gideondk.sentinel -import akka.actor.ActorSystem +import akka.actor.{Actor, ActorSystem} import akka.stream.OverflowStrategy -import akka.stream.scaladsl.{ BidiFlow, Flow } +import akka.stream.scaladsl.{BidiFlow, Flow, Tcp} import akka.util.ByteString -import scala.concurrent.Future - -object Pipeline { +import Protocol._ +import scala.concurrent.{ExecutionContext, Future} +object Pipeline { + def create[Cmd, Evt](protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any], resolver: Resolver[Evt], parallelism: Int, shouldReact: Boolean)(implicit ec: ExecutionContext) = { + protocol >> Processor(resolver, parallelism, shouldReact).flow.reversed + } } diff --git a/src/main/scala/nl/gideondk/sentinel/Processor.scala b/src/main/scala/nl/gideondk/sentinel/Processor.scala index 3b8c36c..c082a81 100644 --- a/src/main/scala/nl/gideondk/sentinel/Processor.scala +++ b/src/main/scala/nl/gideondk/sentinel/Processor.scala @@ -1,37 +1,44 @@ package nl.gideondk.sentinel -import akka.actor.{Actor, Props} import akka.stream.BidiShape -import akka.stream.scaladsl.{BidiFlow, Flow, Sink, Source} -import akka.util.ByteString -import akka.pattern.ask - -//case object Processor { -// -//} -// -//class Processor[I, R <: Rx[I], O, T <: Tx[O]](rxProps: Props, txProps: Props, protocol: BidiFlow[I, ByteString, O, ByteString, Nothing], connection: Flow[ByteString, ByteString, _]) extends Actor { -// -// val rx = Sink.actorSubscriber[O](rxProps) -// val tx = Source.actorPublisher[I](txProps) -// -// protocol >> connection -//// val flow = tx.via(connection).to(rx) -//// flow.run() -// -// def receive = { -// case x: I => -// tx. ? x -// } -//} -// -// -///* -// -// -//--> Request --> Processor --> Tx (bp) --> TCP --> Rx (bp) --> Processor --> Response -// -// -// -// -// */ \ No newline at end of file +import akka.stream.scaladsl.{BidiFlow, Broadcast, Flow, GraphDSL, Merge, Sink, Source} + +import scala.concurrent.{ExecutionContext, Promise} + +case class Processor[Cmd, Evt](flow: BidiFlow[Command[Cmd], Cmd, Evt, Event[Evt], Any]) + +object Processor { + def apply[Cmd, Evt](resolver: Resolver[Evt], producerParallism: Int, shouldReact: Boolean = false)(implicit ec: ExecutionContext): Processor[Cmd, Evt] = { + + val consumerStage = new ConsumerStage[Evt, Cmd](resolver) + val producerStage = new ProducerStage[Evt, Cmd]() + + val functionApply = Flow[(Evt, ProducerAction[Evt, Cmd])].mapAsync[Command[Cmd]](producerParallism) { + case (evt, x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(x ⇒ SingularCommand[Cmd](x)) + } + + Processor(BidiFlow.fromGraph[Command[Cmd], Cmd, Evt, Event[Evt], Any] { + GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + + val producer = b add producerStage + val consumer = b add consumerStage + + val commandIn = b add Flow[Command[Cmd]] + + if (shouldReact) { + val fa = b add functionApply + val merge = b add Merge[Command[Cmd]](2) + commandIn ~> merge.in(0) + consumer.out0 ~> fa ~> merge.in(1) + merge.out ~> producer + } else { + consumer.out0 ~> Sink.ignore + commandIn ~> producer + } + + BidiShape(commandIn.in, producer.out, consumer.in, consumer.out1) + } + }) + } +} diff --git a/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala new file mode 100644 index 0000000..182664b --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala @@ -0,0 +1,70 @@ +package nl.gideondk.sentinel + +import akka.stream._ +import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL, Source} +import akka.stream.stage.GraphStageLogic._ +import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} +import nl.gideondk.sentinel.ConsumerAction._ + +class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { + private val in = Inlet[Command[Out]]("ProducerStage.Command.In") + private val out = Outlet[Out]("ProducerStage.Command.Out") + + var streaming = false + var closeAfterCompletion = false + + val shape = new FlowShape(in, out) + + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) { + val defaultInHandler = new InHandler { + override def onPush(): Unit = grab(in) match { + case x: SingularCommand[Out] ⇒ push(out, x.payload) + case x: StreamingCommand[Out] => stream(x.stream) + } + + override def onUpstreamFinish(): Unit = { + if (streaming) closeAfterCompletion = true + else completeStage() + } + } + + val waitForDemandHandler = new OutHandler { + def onPull(): Unit = pull(in) + } + + setHandler(in, defaultInHandler) + setHandler(out, waitForDemandHandler) + + def stream(outStream: Source[Out, Any]): Unit = { + streaming = true + val sinkIn = new SubSinkInlet[Out]("RenderingSink") + sinkIn.setHandler(new InHandler { + override def onPush(): Unit = push(out, sinkIn.grab()) + + override def onUpstreamFinish(): Unit = { + if (closeAfterCompletion) { + completeStage() + } + else { + streaming = false + setHandler(out, waitForDemandHandler) + if (isAvailable(out)) pull(in) + } + } + }) + + setHandler(out, new OutHandler { + override def onPull(): Unit = sinkIn.pull() + + override def onDownstreamFinish(): Unit = { + completeStage() + sinkIn.cancel() + } + }) + + sinkIn.pull() + outStream.runWith(sinkIn.sink)(subFusingMaterializer) + } + + } +} diff --git a/src/main/scala/nl/gideondk/sentinel/Protocol.scala b/src/main/scala/nl/gideondk/sentinel/Protocol.scala new file mode 100644 index 0000000..0e672c7 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/Protocol.scala @@ -0,0 +1,14 @@ +package nl.gideondk.sentinel + +import akka.stream.{ BidiShape, Graph } +import akka.stream.scaladsl.{ BidiFlow, Framing } + +import scala.concurrent.Promise + +case class RequestContext[Cmd, Evt](request: Cmd, responsePromise: Promise[Evt]) + +object Protocol { + implicit class ProtocolChaining[IT, OT, IB, OB, Mat](bf: BidiFlow[IT, OT, IB, OB, Mat]) { + def >>[NextOT, NextIB, Mat2](bidi: Graph[BidiShape[OT, NextOT, NextIB, IB], Mat2]) = bf.atop(bidi) + } +} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Resolver.scala b/src/main/scala/nl/gideondk/sentinel/Resolver.scala index 32bc36e..447e2e5 100644 --- a/src/main/scala/nl/gideondk/sentinel/Resolver.scala +++ b/src/main/scala/nl/gideondk/sentinel/Resolver.scala @@ -1,99 +1,14 @@ package nl.gideondk.sentinel -import akka.stream.scaladsl.{Flow, Source} +import akka.stream.scaladsl.{ BidiFlow, Concat, Flow, GraphDSL, Source } import akka.stream._ -import akka.stream.stage._ +import akka.stream.actor.{ ActorPublisher, ActorSubscriber } +import akka.stream.stage.GraphStageLogic.EagerTerminateOutput +import akka.stream.stage.{ OutHandler, _ } import akka.util.ByteString import nl.gideondk.sentinel.ConsumerAction._ -import scala.util.{Failure, Success, Try} - - -class ResponseStage[Evt, Cmd](resolver: Processor[Evt]) extends GraphStage[FanOutShape2[Evt, Cmd, Response[Evt]]] { - - private val events = Inlet[Evt]("EventIn") - private val responses = Outlet[Response[Evt]]("ResponseOut") - private val signals = Outlet[Cmd]("SignalOut") - - val shape = new FanOutShape2(events, responses, signals) - - override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - private var chunkSource: SubSourceOutlet[Evt] = _ - - private def chunkSubStreamStarted = chunkSource != null - - private def idle = this - - def setInitialHandlers(): Unit = setHandlers(events, responses, idle) - - def startStream(): Unit = { - chunkSource = new SubSourceOutlet[Evt]("ChunkSource") - chunkSource.setHandler(substreamHandler) - setHandler(events, substreamHandler) - push(responses, StreamResponse(Source.fromGraph(chunkSource.source))) - } - - def onPush(): Unit = { - - val evt = grab(events) - resolver.process(evt) match { - case AcceptSignal ⇒ push(responses, SingularResponse(evt)) - - case AcceptError ⇒ push(responses, SingularErrorResponse(evt)) - - case StartStream => startStream() - - case ConsumeStreamChunk => startStream() - - case ConsumeChunkAndEndStream => push(responses, StreamResponse(Source.single(evt))) - - case Ignore ⇒ () - } - } - - def onPull(): Unit = { - if (!chunkSubStreamStarted) pull(events) - } - - private lazy val substreamHandler = new InHandler with OutHandler { - def endStream(): Unit = { - chunkSource.complete() - chunkSource = null - - if (isAvailable(responses)) pull(events) - setInitialHandlers() - } - - override def onPush(): Unit = { - val chunk = grab(events) - - resolver.process(chunk) match { - case ConsumeStreamChunk => chunkSource.push(chunk) - - case EndStream => endStream() - - case ConsumeChunkAndEndStream => chunkSource.push(chunk); endStream() - - case Ignore ⇒ () - } - } - - override def onPull(): Unit = pull(events) - - override def onUpstreamFinish(): Unit = { - chunkSource.complete() - completeStage() - } - - override def onUpstreamFailure(reason: Throwable): Unit = { - chunkSource.fail(reason) - failStage(reason) - } - } - } -} - - -trait Processor[In] { +trait Resolver[In] { def process: PartialFunction[In, Action] } + diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala new file mode 100644 index 0000000..5093d18 --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -0,0 +1,87 @@ +package nl.gideondk.sentinel + +import akka.actor.ActorSystem +import akka.event.Logging +import akka.stream.{ActorMaterializer, Attributes, ClosedShape, OverflowStrategy} +import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source, Tcp} +import akka.stream.testkit.{TestPublisher, TestSubscriber} +import akka.util.ByteString +import nl.gideondk.sentinel.protocol._ +import org.scalatest._ +import protocol.SimpleMessage._ + +import scala.concurrent._ +import duration._ +import scala.util.{Failure, Success, Try} + +object ClientStageSpec { + + val eventFlow = Flow[Event[SimpleMessageFormat]].flatMapConcat { + case x: StreamEvent[SimpleMessageFormat] => x.chunks + case x: SingularEvent[SimpleMessageFormat] => Source.single(x.data) + } + + val headSink = Sink.head[Event[SimpleMessageFormat]] + val seqSink = Sink.seq[SimpleMessageFormat] + val ignoreSink = Sink.ignore + + + def mockServer(system: ActorSystem, port: Int): Unit = { + implicit val sys = system + import system.dispatcher + implicit val materializer = ActorMaterializer() + + val handler = Sink.foreach[Tcp.IncomingConnection] { conn => + conn handleWith Flow[ByteString] + } + + val connections = Tcp().bind("localhost", port) + val binding = connections.to(handler).run() + + binding.onComplete { + case Success(b) => + println("Server started, listening on: " + b.localAddress) + case Failure(e) => + println(s"Server could not bind to localhost:$port: ${e.getMessage}") + system.terminate() + } + } +} + +class ClientStageSpec extends AkkaSpec { + + import ClientStageSpec._ + + "The ClientStage" should { + "Keep message order intact" in { + val server = mockServer(system, 9000) + implicit val materializer = ActorMaterializer() + + val numberOfMessages = 1024 + + val messages = (for (i <- 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)), Promise[Event[SimpleMessageFormat]]())).toList + val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) => context.complete(event) } + + val g = RunnableGraph.fromGraph(GraphDSL.create(Source.queue[(Command[SimpleMessageFormat], Promise[Event[SimpleMessageFormat]])](numberOfMessages, OverflowStrategy.backpressure)) { implicit b ⇒ + source => + import GraphDSL.Implicits._ + + val s = b.add(new ClientStage[SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, () => Processor(SimpleHandler, 1, false), () => SimpleMessage.protocol.reversed)) + + Source.single(ClientStage.LinkUp(Host("localhost", 9000))) ~> s.in0 + source.out ~> s.in1 + + s.out ~> b.add(sink) + + ClosedShape + }) + + val sourceQueue = g.run() + messages.foreach(sourceQueue.offer) + val results = Future.sequence(messages.map(_._2.future)) + + Await.result(results, 1 second) should be(messages.map(x => SingularEvent(x._1.payload))) + sourceQueue.complete() + } + } +} diff --git a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala new file mode 100644 index 0000000..9069f74 --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala @@ -0,0 +1,235 @@ +package nl.gideondk.sentinel + +import akka.actor.ActorSystem +import akka.event.Logging +import akka.stream.{ActorMaterializer, Attributes, ClosedShape} +import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source} +import akka.stream.testkit.{TestPublisher, TestSubscriber} +import nl.gideondk.sentinel.protocol._ +import org.scalatest._ +import protocol.SimpleMessage._ + +import scala.concurrent._ +import duration._ + +object ConsumerStageSpec { + + val eventFlow = Flow[Event[SimpleMessageFormat]].flatMapConcat { + case x: StreamEvent[SimpleMessageFormat] => x.chunks + case x: SingularEvent[SimpleMessageFormat] => Source.single(x.data) + } + + def stage() = new ConsumerStage[SimpleMessageFormat, SimpleMessageFormat](SimpleHandler) + + val headSink = Sink.head[Event[SimpleMessageFormat]] + val seqSink = Sink.seq[SimpleMessageFormat] + val ignoreSink = Sink.ignore +} + +class ConsumerStageSpec extends AkkaSpec { + + import ConsumerStageSpec._ + + "The ConsumerStage" should { + "handle incoming events" in { + implicit val materializer = ActorMaterializer() + + val g = RunnableGraph.fromGraph(GraphDSL.create(headSink) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + Source.single(SimpleReply("")) ~> s.in + s.out1 ~> sink.in + s.out0 ~> ignoreSink + + ClosedShape + }) + + Await.result(g.run(), 300.millis) should be(SingularEvent(SimpleReply(""))) + } + + "handle multiple incoming events" in { + implicit val materializer = ActorMaterializer() + + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Event[SimpleMessageFormat]]) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + + Source(List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"))) ~> s.in + s.out1 ~> sink.in + s.out0 ~> ignoreSink + + ClosedShape + }) + + Await.result(g.run(), 300.millis) should be(Vector(SingularEvent(SimpleReply("A")), SingularEvent(SimpleReply("B")), SingularEvent(SimpleReply("C")))) + } + + "not lose demand that comes in while handling incoming streams" in { + implicit val materializer = ActorMaterializer() + + val inProbe = TestPublisher.manualProbe[SimpleMessageFormat]() + val responseProbe = TestSubscriber.manualProbe[Event[SimpleMessageFormat]] + + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(responseProbe)) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + Source.fromPublisher(inProbe) ~> s.in + s.out1 ~> sink.in + s.out0 ~> ignoreSink + + ClosedShape + }) + + g.withAttributes(Attributes.inputBuffer(1, 1)).run() + + val inSub = inProbe.expectSubscription() + val responseSub = responseProbe.expectSubscription() + + // Pull first response + responseSub.request(1) + + // Expect propagation towards source + inSub.expectRequest(1) + + // Push one element into stream + inSub.sendNext(SimpleStreamChunk("A")) + + // Expect element flow towards response output + val response = responseProbe.expectNext() + + val entityProbe = TestSubscriber.manualProbe[SimpleMessageFormat]() + response.asInstanceOf[StreamEvent[SimpleMessageFormat]].chunks.to(Sink.fromSubscriber(entityProbe)).run() + + // Expect a subscription is made for the sub-stream + val entitySub = entityProbe.expectSubscription() + + // Request the initial element from the sub-source + entitySub.request(1) + +// // Pull is coming from merged stream for initial element +// inSub.expectRequest(1) + + // Expect initial element to be available + entityProbe.expectNext() + + // Request an additional chunk + entitySub.request(1) + + // Merged stream is empty, so expect demand to be propagated towards the source + inSub.expectRequest(1) + + // Send successive element + inSub.sendNext(SimpleStreamChunk("B")) + + // Expect the element to be pushed directly into the sub-source + entityProbe.expectNext() + + responseSub.request(1) + + inSub.sendNext(SimpleStreamChunk("")) + entityProbe.expectComplete() + + // and that demand should go downstream + // since the chunk end was consumed by the stage + inSub.expectRequest(1) + } + + "correctly output stream responses" in { + implicit val materializer = ActorMaterializer() + + val chunkSource = Source(List(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"), SimpleStreamChunk(""))) + + val g = RunnableGraph.fromGraph(GraphDSL.create(seqSink) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + chunkSource ~> s.in + s.out1 ~> eventFlow ~> sink.in + s.out0 ~> ignoreSink + + ClosedShape + }) + + Await.result(g.run(), 300.millis) should be(Seq(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"))) + } + + "correctly output multiple stream responses" in { + implicit val materializer = ActorMaterializer() + + val items = List.fill(10)(List(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"), SimpleStreamChunk(""))).flatten + val chunkSource = Source(items) + + val g = RunnableGraph.fromGraph(GraphDSL.create(seqSink) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + chunkSource ~> s.in + s.out1 ~> eventFlow ~> sink.in + s.out0 ~> ignoreSink + + ClosedShape + }) + + Await.result(g.run(), 300.millis) should be(items.filter(_.payload.length > 0)) + } + + "correctly handle asymmetrical message types" in { + implicit val materializer = ActorMaterializer() + + val a = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C")) + val b = List.fill(10)(List(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"), SimpleStreamChunk(""))).flatten + val c = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C")) + + val chunkSource = Source(a ++ b ++ c) + + val g = RunnableGraph.fromGraph(GraphDSL.create(seqSink) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + chunkSource ~> s.in + s.out1 ~> ConsumerStageSpec.eventFlow ~> sink.in + s.out0 ~> ignoreSink + + ClosedShape + }) + + Await.result(g.run(), 300.millis) should be(a ++ b.filter(_.payload.length > 0) ++ c) + } + + "correctly output signals on event-out pipe" in { + implicit val materializer = ActorMaterializer() + + val a = List(SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, "")) + + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[(SimpleMessageFormat, ProducerAction[SimpleMessageFormat, SimpleMessageFormat])]) { implicit b ⇒ + sink ⇒ + import GraphDSL.Implicits._ + + val s = b.add(stage()) + + Source(a) ~> s.in + s.out1 ~> ignoreSink + s.out0 ~> sink.in + + ClosedShape + }) + + Await.result(g.run(), 300.millis).map(_._1) should be(a) + } + } +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala deleted file mode 100644 index e044d20..0000000 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ /dev/null @@ -1,73 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import akka.actor._ -import akka.routing._ - -import scala.concurrent._ -import scala.concurrent.duration._ - -import protocols._ - -class FullDuplexSpec extends WordSpec with ShouldMatchers { - - import SimpleMessage._ - - implicit val duration = Duration(25, SECONDS) - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - s - } - - "A client and a server" should { - "be able to exchange requests simultaneously" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - Thread.sleep(500) - val action = c ? SimpleCommand(PING_PONG, "") - val serverAction = (s ?* SimpleCommand(PING_PONG, "")).map(_.head) - - val responses = Future.sequence(List(action, serverAction)) - - val results = Await.result(responses, 5 seconds) - - results.length should equal(2) - results.distinct.length should equal(1) - } - - "be able to exchange multiple requests simultaneously" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - Thread.sleep(1000) - - val c = client(portNumber) - val secC = client(portNumber) - Thread.sleep(1000) - - val numberOfRequests = 10 - - val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) - val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) - val serverActions = Future.sequence(List.fill(numberOfRequests)((s ?** SimpleCommand(PING_PONG, "")))) - - val combined = Future.sequence(List(actions, serverActions.map(_.flatten), secActions)) - - val aa = Await.result(actions, 5 seconds) - - val results = Await.result(combined, 5 seconds) - - results(0).length should equal(numberOfRequests) - results(2).length should equal(numberOfRequests) - results(1).length should equal(numberOfRequests * 2) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala new file mode 100644 index 0000000..3d447cc --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala @@ -0,0 +1,59 @@ +package nl.gideondk.sentinel + +import akka.event.Logging +import akka.stream.{ActorMaterializer, Attributes, ClosedShape} +import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} +import akka.stream.testkit.{TestPublisher, TestSubscriber} +import nl.gideondk.sentinel.Command.Ask +import nl.gideondk.sentinel.Registration.SingularResponseRegistration +import nl.gideondk.sentinel.protocol._ + +import scala.concurrent.{Await, Promise} +import scala.concurrent.duration._ + +object ProcessorSpec { + +} + +class ProcessorSpec extends AkkaSpec { + val processor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleHandler, 1) + val serverProcessor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleServerHandler, 1, true) + + import ProcessorSpec._ + + "The AntennaStage" should { + "correctly flow in a client, server situation" in { + import SimpleCommand._ + import nl.gideondk.sentinel.protocol.SimpleMessage._ + + implicit val materializer = ActorMaterializer() + + val pingCommand = SingularCommand[SimpleMessageFormat](SimpleCommand(PING_PONG, "")) + val zeroCommand = SingularCommand[SimpleMessageFormat](SimpleCommand(0, "")) + + val source = Source[SingularCommand[SimpleMessageFormat]](List(pingCommand, zeroCommand, pingCommand, zeroCommand)) + + val flow = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Event[SimpleMessageFormat]]) { implicit b => + sink => + import GraphDSL.Implicits._ + + val client = b.add(processor.flow) + val server = b.add(serverProcessor.flow.reversed) + + source ~> client.in1 + client.out1 ~> server.in1 + + server.out1 ~> b.add(Sink.ignore) + server.out2 ~> client.in2 + + client.out2 ~> sink.in + + Source.empty[SingularCommand[SimpleMessageFormat]] ~> server.in2 + + ClosedShape + }) + + Await.result(flow.run(), 5 seconds) shouldBe Vector(SingularEvent(SimpleReply("PONG")), SingularEvent(SimpleReply("PONG"))) + } + } +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala new file mode 100644 index 0000000..0beddcd --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala @@ -0,0 +1,48 @@ +package nl.gideondk.sentinel + +import akka.stream.ActorMaterializer +import akka.stream._ +import akka.stream.scaladsl._ + +import nl.gideondk.sentinel.Registration.SingularResponseRegistration +import nl.gideondk.sentinel.protocol.{SimpleMessageFormat, SimpleReply} + +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.concurrent.Promise + +object ProducerStageSpec { + def stage() = new ProducerStage[SimpleMessageFormat, SimpleMessageFormat]() +} + +class ProducerStageSpec extends AkkaSpec { + + import ProducerStageSpec._ + + "The ProducerStage" should { + "handle outgoing messages" in { + implicit val materializer = ActorMaterializer() + + val command = SingularCommand[SimpleMessageFormat](SimpleReply("A")) + val result = Await.result(Source(List(command)).via(stage()).runWith(Sink.seq), 5 seconds) + + result shouldBe Vector(SimpleReply("A")) + + val multiResult = Await.result(Source(List(command, command, command)).via(stage()).runWith(Sink.seq), 5 seconds) + multiResult shouldBe Vector(SimpleReply("A"), SimpleReply("A"), SimpleReply("A")) + } + + "handle outgoing streams" in { + implicit val materializer = ActorMaterializer() + + val items = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"), SimpleReply("D")) + val command = StreamingCommand[SimpleMessageFormat](Source(items)) + + val result = Await.result(Source(List(command)).via(stage()).runWith(Sink.seq), 5 seconds) + result shouldBe items + + val multiResult = Await.result(Source(List(command, command, command)).via(stage()).runWith(Sink.seq), 5 seconds) + multiResult shouldBe (items ++ items ++ items) + } + } +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala deleted file mode 100644 index c807c43..0000000 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ /dev/null @@ -1,93 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec - -import akka.actor._ -import scala.concurrent.duration._ -import scala.concurrent._ - -import scala.util.Try - -import protocols._ - -class RequestResponseSpec extends WordSpec { - - import SimpleMessage._ - - implicit val duration = Duration(5, SECONDS) - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) - s - } - - "A client" should { - "be able to request a response from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val action = c ? SimpleCommand(PING_PONG, "") - val result = Try(Await.result(action, 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to requests multiple requests from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - Thread.sleep(100) - - val numberOfRequests = 1000 - - val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(ECHO, LargerPayloadTestHelper.randomBSForSize(1024 * 10)))) - val result = Try(Await.result(action, 5 seconds)) - - result.get.length should equal(numberOfRequests) - result.isSuccess should equal(true) - } - - "be able to receive responses in correct order" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val numberOfRequests = 20 * 1000 - - val items = List.range(0, numberOfRequests).map(_.toString) - val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) - val result = Await.result(action, 5 seconds) - - result.map(_.payload) should equal(items) - } - - "should automatically reconnect" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - Thread.sleep(500) - - val action = c ? SimpleCommand(PING_PONG, "") - val result = Try(Await.result(action, 5 seconds)) - - result.isSuccess should equal(true) - - system.stop(s.actor) - Thread.sleep(250) - - val secAction = c ? SimpleCommand(PING_PONG, "") - val ss = server(portNumber) - - Thread.sleep(250) - val endResult = Try(Await.result(secAction, 10 seconds)) - - endResult.isSuccess should equal(true) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala deleted file mode 100644 index f8db82c..0000000 --- a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import akka.actor._ -import akka.routing._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import protocols._ -import akka.util.Timeout - -class ServerRequestSpec extends WordSpec { - - import SimpleMessage._ - - implicit val duration = Duration(5, SECONDS) - implicit val timeout = Timeout(Duration(5, SECONDS)) - - val numberOfConnections = 16 - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, numberOfConnections, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) - s - } - - "A server" should { - "be able to send a request to a client" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - Thread.sleep(500) - - val action = (s ? SimpleCommand(PING_PONG, "")) - val result = Await.result(action, 5 seconds) - - result should equal(SimpleReply("PONG")) - } - - "be able to send a request to a all unique connected hosts" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - - val numberOfClients = 5 - List.fill(numberOfClients)(client(portNumber)) - - Thread.sleep(500) - - val action = (s ?* SimpleCommand(PING_PONG, "")) - val result = Await.result(action, 5 seconds) - - result.length should equal(1) - } - - "be able to send a request to a all connected clients" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - - val numberOfClients = 5 - List.fill(numberOfClients)(client(portNumber)) - - Thread.sleep(500) - - val action = (s ?** SimpleCommand(PING_PONG, "")) - val result = Await.result(action, 5 seconds) - - result.length should equal(numberOfClients * numberOfConnections) - } - - "be able to retrieve the correct number of connected sockets" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - - val numberOfClients = 5 - val clients = List.fill(numberOfClients)(client(portNumber)) - - Thread.sleep(500) - - val connectedSockets = Await.result((s connectedSockets), 5 seconds) - connectedSockets should equal(numberOfClients * numberOfConnections) - - val connectedHosts = Await.result((s connectedHosts), 5 seconds) - connectedHosts should equal(1) - - val toBeKilledActors = clients.splitAt(3)._1.map(_.actor) - toBeKilledActors.foreach(x ⇒ x ! PoisonPill) - Thread.sleep(500) - - val stillConnectedSockets = Await.result((s connectedSockets), 5 seconds) - stillConnectedSockets should equal(2 * numberOfConnections) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala deleted file mode 100644 index 0f08092..0000000 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ /dev/null @@ -1,155 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import akka.actor._ -import akka.routing._ -import scala.concurrent.duration._ -import scala.concurrent._ - -import scala.util.Try -import play.api.libs.iteratee._ - -import protocols._ - -class StreamingSpec extends WordSpec { - - import SimpleMessage._ - - implicit val duration = Duration(5, SECONDS) - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) - def nonPipelinedClient(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler, false)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) - s - } - - "A client" should { - "be able to send a stream to a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val chunks = List.fill(count)(SimpleStreamChunk("ABCDEF")) ++ List(SimpleStreamChunk("")) - val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) - - val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) - - val result = Try(Await.result(action, 5 seconds)) - - result.isSuccess should equal(true) - result.get.payload.toInt should equal(localLength) - } - - "be able to receive streams from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val action = c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString) - - val f = action.flatMap(_ |>>> Iteratee.getChunks) - val result = Await.result(f, 5 seconds) - - result.length should equal(count) - } - - "be able to receive multiple streams simultaneously from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val numberOfActions = 8 - val actions = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - - val result = Await.result(actions.map(_.flatten), 5 seconds) - - result.length should equal(count * numberOfActions) - } - - "be able to receive multiple streams and normal commands simultaneously from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val numberOfActions = 8 - - val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, ""))) - - val actions = Future.sequence(List(streamAction, action)) - - val result = Try(Await.result(actions.map(_.flatten), 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to receive multiple streams and normal commands simultaneously from a server in a non-pipelined environment" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = nonPipelinedClient(portNumber) - - val count = 500 - val numberOfActions = 8 - - val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, ""))) - - val actions = Future.sequence(List(streamAction, action)) - - val result = Try(Await.result(actions.map(_.flatten), 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to handle slow or idle consumers while retrieving streams from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val numberOfActions = 8 - - val newAct = for { - takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) |>>> Iteratee.getChunks) - takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) &> Enumeratee.map(x ⇒ throw new Exception("")) |>>> Iteratee.getChunks).recover { case e ⇒ () } - act ← c ? SimpleCommand(PING_PONG, "") - act ← c ? SimpleCommand(PING_PONG, "") - takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks) - act ← c ? SimpleCommand(PING_PONG, "") - } yield act - - val result = Try(Await.result(newAct, 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to receive send streams simultaneously to a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val chunks = List.fill(count)(SimpleStreamChunk("ABCDEF")) ++ List(SimpleStreamChunk("")) - val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) - - val numberOfActions = 2 - val actions = Future.sequence(List.fill(numberOfActions)(c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)))) - - val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) - val result = Await.result(actions, 5 seconds) - - result.map(_.payload.toInt).sum should equal(localLength * numberOfActions) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index caf2598..1d69c97 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -1,9 +1,7 @@ package nl.gideondk.sentinel import org.scalatest.{ Suite, BeforeAndAfterAll, WordSpec } -import org.scalatest.matchers.ShouldMatchers -import akka.io.SymmetricPipelineStage import akka.util.ByteString import akka.actor._ @@ -11,16 +9,121 @@ import akka.testkit._ import java.util.concurrent.atomic.AtomicInteger -import protocols._ +import org.scalactic.Constraint -abstract class TestKitSpec extends TestKit(ActorSystem(java.util.UUID.randomUUID.toString)) - with Suite - with ShouldMatchers - with BeforeAndAfterAll - with ImplicitSender { - override def afterAll = { - system.shutdown() +import language.postfixOps +import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } +import org.scalatest.Matchers +import akka.actor.ActorSystem +import akka.event.{ Logging, LoggingAdapter } + +import scala.concurrent.duration._ +import scala.concurrent.Future +import com.typesafe.config.{ Config, ConfigFactory } +import akka.dispatch.Dispatchers +import akka.testkit.TestEvent._ +import org.scalactic.ConversionCheckedTripleEquals +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.time.Span + +object AkkaSpec { + val testConf: Config = ConfigFactory.parseString(""" + akka { + loggers = ["akka.testkit.TestEventListener"] + loglevel = "WARNING" + stdout-loglevel = "WARNING" + actor { + default-dispatcher { + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 2.0 + parallelism-max = 8 + } + } + } + } + """) + + def mapToConfig(map: Map[String, Any]): Config = { + import scala.collection.JavaConverters._ + ConfigFactory.parseMap(map.asJava) + } + + def getCallerName(clazz: Class[_]): String = { + val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1) + .dropWhile(_ matches "(java.lang.Thread|.*AkkaSpec.?$|.*StreamSpec.?$)") + val reduced = s.lastIndexWhere(_ == clazz.getName) match { + case -1 ⇒ s + case z ⇒ s drop (z + 1) + } + reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } + +} + + +abstract class AkkaSpec(_system: ActorSystem) + extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll + with ConversionCheckedTripleEquals with ScalaFutures { + + implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(100, org.scalatest.time.Millis)) + + implicit val ec = _system.dispatcher + + def this(config: Config) = this(ActorSystem( + AkkaSpec.getCallerName(getClass), + ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) + + def this(s: String) = this(ConfigFactory.parseString(s)) + + def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) + + def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf)) + + val log: LoggingAdapter = Logging(system, this.getClass) + + override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true + + final override def beforeAll { + atStartup() + } + + final override def afterAll { + beforeTermination() + shutdown() + afterTermination() + } + + protected def atStartup() {} + + protected def beforeTermination() {} + + protected def afterTermination() {} + + def spawn(dispatcherId: String = Dispatchers.DefaultDispatcherId)(body: ⇒ Unit): Unit = + Future(body)(system.dispatchers.lookup(dispatcherId)) + + def expectedTestDuration: FiniteDuration = 60 seconds + + def muteDeadLetters(messageClasses: Class[_]*)(sys: ActorSystem = system): Unit = + if (!sys.log.isDebugEnabled) { + def mute(clazz: Class[_]): Unit = + sys.eventStream.publish(Mute(DeadLettersFilter(clazz)(occurrences = Int.MaxValue))) + if (messageClasses.isEmpty) mute(classOf[AnyRef]) + else messageClasses foreach mute + } + + // for ScalaTest === compare of Class objects + implicit def classEqualityConstraint[A, B]: Constraint[Class[A], Class[B]] = + new Constraint[Class[A], Class[B]] { + def areEqual(a: Class[A], b: Class[B]) = a == b + } + + implicit def setEqualityConstraint[A, T <: Set[_ <: A]]: Constraint[Set[A], T] = + new Constraint[Set[A], T] { + def areEqual(a: Set[A], b: T) = a == b + } } object TestHelpers { @@ -54,7 +157,7 @@ object LargerPayloadTestHelper { val stringB = new StringBuilder(size) val paddingString = "abcdefghijklmnopqrs" - while (stringB.length() + paddingString.length() < size) stringB.append(paddingString) + while ((stringB.length + paddingString.length) < size) stringB.append(paddingString) stringB.toString() } diff --git a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala new file mode 100644 index 0000000..553ae28 --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala @@ -0,0 +1,104 @@ +package nl.gideondk.sentinel.protocol + +import akka.stream.scaladsl.{BidiFlow, Framing} +import akka.util.{ByteString, ByteStringBuilder} +import nl.gideondk.sentinel._ + +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global + +sealed trait SimpleMessageFormat { + def payload: String +} + +case class SimpleCommand(cmd: Int, payload: String) extends SimpleMessageFormat + +// 1 +case class SimpleReply(payload: String) extends SimpleMessageFormat + +// 2 +case class SimpleStreamChunk(payload: String) extends SimpleMessageFormat + +// 3 +case class SimpleError(payload: String) extends SimpleMessageFormat + + +object SimpleMessage { + val PING_PONG = 1 + val TOTAL_CHUNK_SIZE = 2 + val GENERATE_NUMBERS = 3 + val CHUNK_LENGTH = 4 + val ECHO = 5 + + implicit val byteOrder = java.nio.ByteOrder.BIG_ENDIAN + + def deserialize(bs: ByteString): SimpleMessageFormat = { + val iter = bs.iterator + iter.getByte.toInt match { + case 1 ⇒ + SimpleCommand(iter.getInt, new String(iter.toByteString.toArray)) + case 2 ⇒ + SimpleReply(new String(iter.toByteString.toArray)) + case 3 ⇒ + SimpleStreamChunk(new String(iter.toByteString.toArray)) + case 4 ⇒ + SimpleError(new String(iter.toByteString.toArray)) + } + } + + def serialize(m: SimpleMessageFormat): ByteString = { + val bsb = new ByteStringBuilder() + m match { + case x: SimpleCommand ⇒ + bsb.putByte(1.toByte) + bsb.putInt(x.cmd) + bsb.putBytes(x.payload.getBytes) + case x: SimpleReply ⇒ + bsb.putByte(2.toByte) + bsb.putBytes(x.payload.getBytes) + case x: SimpleStreamChunk ⇒ + bsb.putByte(3.toByte) + bsb.putBytes(x.payload.getBytes) + case x: SimpleError ⇒ + bsb.putByte(4.toByte) + bsb.putBytes(x.payload.getBytes) + case _ ⇒ + } + bsb.result + } + + val flow = BidiFlow.fromFunctions(serialize, deserialize) + + def protocol = flow.atop(Framing.simpleFramingProtocol(1024)) +} + +import SimpleMessage._ + +object SimpleHandler extends Resolver[SimpleMessageFormat] { + def process: PartialFunction[SimpleMessageFormat, Action] = { + case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream + case x: SimpleError ⇒ ConsumerAction.AcceptError + case x: SimpleReply ⇒ ConsumerAction.AcceptSignal + case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } + case x => println("Unhandled: " + x); ConsumerAction.Ignore + } +} + + +object SimpleServerHandler extends Resolver[SimpleMessageFormat] { + def process: PartialFunction[SimpleMessageFormat, Action] = { + case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } + case x => println("Unhandled: " + x); ConsumerAction.Ignore + + + // case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ + // s: Enumerator[SimpleStreamChunk] ⇒ + // s |>>> Iteratee.fold(0) { (b, a) ⇒ b + a.payload.length } map (x ⇒ SimpleReply(x.toString)) + // } + // case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ + // val count = payload.toInt + // Future((Enumerator(List.range(0, count): _*) &> Enumeratee.map(x ⇒ SimpleStreamChunk(x.toString))) >>> Enumerator(SimpleStreamChunk(""))) + // } + // case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } + } +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala deleted file mode 100644 index f041d8c..0000000 --- a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala +++ /dev/null @@ -1,103 +0,0 @@ -package nl.gideondk.sentinel.protocols - -import scala.concurrent._ -import scala.concurrent.ExecutionContext.Implicits.global - -import akka.io._ -import akka.util.{ ByteString, ByteStringBuilder } - -import nl.gideondk.sentinel._ -import play.api.libs.iteratee._ - -trait SimpleMessageFormat { - def payload: String -} - -case class SimpleCommand(cmd: Int, payload: String) extends SimpleMessageFormat // 1 -case class SimpleReply(payload: String) extends SimpleMessageFormat // 2 -case class SimpleStreamChunk(payload: String) extends SimpleMessageFormat // 3 -case class SimpleError(payload: String) extends SimpleMessageFormat // 4 - -class PingPongMessageStage extends SymmetricPipelineStage[PipelineContext, SimpleMessageFormat, ByteString] { - override def apply(ctx: PipelineContext) = new SymmetricPipePair[SimpleMessageFormat, ByteString] { - implicit val byteOrder = java.nio.ByteOrder.BIG_ENDIAN - - override val commandPipeline = { - msg: SimpleMessageFormat ⇒ - { - val bsb = new ByteStringBuilder() - msg match { - case x: SimpleCommand ⇒ - bsb.putByte(1.toByte) - bsb.putInt(x.cmd) - bsb.putBytes(x.payload.getBytes) - case x: SimpleReply ⇒ - bsb.putByte(2.toByte) - bsb.putBytes(x.payload.getBytes) - case x: SimpleStreamChunk ⇒ - bsb.putByte(3.toByte) - bsb.putBytes(x.payload.getBytes) - case x: SimpleError ⇒ - bsb.putByte(4.toByte) - bsb.putBytes(x.payload.getBytes) - case _ ⇒ - } - Seq(Right(bsb.result)) - } - - } - - override val eventPipeline = { - bs: ByteString ⇒ - val iter = bs.iterator - iter.getByte.toInt match { - case 1 ⇒ - Seq(Left(SimpleCommand(iter.getInt, new String(iter.toByteString.toArray)))) - case 2 ⇒ - Seq(Left(SimpleReply(new String(iter.toByteString.toArray)))) - case 3 ⇒ - Seq(Left(SimpleStreamChunk(new String(iter.toByteString.toArray)))) - case 4 ⇒ - Seq(Left(SimpleError(new String(iter.toByteString.toArray)))) - } - - } - } -} - -object SimpleMessage { - val stages = new PingPongMessageStage >> new LengthFieldFrame(1024 * 1024) - - val PING_PONG = 1 - val TOTAL_CHUNK_SIZE = 2 - val GENERATE_NUMBERS = 3 - val CHUNK_LENGTH = 4 - val ECHO = 5 -} - -import SimpleMessage._ -trait DefaultSimpleMessageHandler extends Resolver[SimpleMessageFormat, SimpleMessageFormat] { - def process = { - case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream - case x: SimpleError ⇒ ConsumerAction.AcceptError - case x: SimpleReply ⇒ ConsumerAction.AcceptSignal - } -} - -object SimpleClientHandler extends DefaultSimpleMessageHandler - -object SimpleServerHandler extends DefaultSimpleMessageHandler { - - override def process = super.process orElse { - case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ - s: Enumerator[SimpleStreamChunk] ⇒ - s |>>> Iteratee.fold(0) { (b, a) ⇒ b + a.payload.length } map (x ⇒ SimpleReply(x.toString)) - } - case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ - val count = payload.toInt - Future((Enumerator(List.range(0, count): _*) &> Enumeratee.map(x ⇒ SimpleStreamChunk(x.toString))) >>> Enumerator(SimpleStreamChunk(""))) - } - case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } - } -} \ No newline at end of file From b6b5cab076f85dd9b690abbf4b592209542fc459 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 10:58:17 +0100 Subject: [PATCH 34/54] Clean-up --- sbt | 2 - .../scala/nl/gideondk/sentinel/Action.scala | 2 +- .../nl/gideondk/sentinel/ConsumerStage.scala | 28 ++++---- .../scala/nl/gideondk/sentinel/Pipeline.scala | 6 +- .../nl/gideondk/sentinel/Processor.scala | 9 +-- .../nl/gideondk/sentinel/ProducerStage.scala | 17 +++-- .../nl/gideondk/sentinel/client/Client.scala | 64 ++++++++++++++++++ .../ClientStage.scala} | 61 ++++++++--------- .../gideondk/sentinel/ClientStageSpec.scala | 36 ++++------ .../gideondk/sentinel/ConsumerStageSpec.scala | 66 ++++++++----------- .../nl/gideondk/sentinel/ProcessorSpec.scala | 18 ++--- .../gideondk/sentinel/ProducerStageSpec.scala | 2 +- .../sentinel/protocol/SimpleMessage.scala | 17 ++--- 13 files changed, 182 insertions(+), 146 deletions(-) create mode 100644 src/main/scala/nl/gideondk/sentinel/client/Client.scala rename src/main/scala/nl/gideondk/sentinel/{Client.scala => client/ClientStage.scala} (80%) diff --git a/sbt b/sbt index 4a8d430..5f343ac 100755 --- a/sbt +++ b/sbt @@ -2,5 +2,3 @@ export SBT_OPTS="-XX:+UseNUMA -XX:-UseBiasedLocking -Xms3024M -Xmx3048M -Xss1M -XX:MaxPermSize=256m -XX:+UseParallelGC" sbt "$@" - - diff --git a/src/main/scala/nl/gideondk/sentinel/Action.scala b/src/main/scala/nl/gideondk/sentinel/Action.scala index 30dd279..4399376 100644 --- a/src/main/scala/nl/gideondk/sentinel/Action.scala +++ b/src/main/scala/nl/gideondk/sentinel/Action.scala @@ -32,7 +32,7 @@ object ProducerAction { } object ConsumeStream { - def apply[E, A <: E, B <: E, C](fun: A ⇒ Enumerator[B] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { + def apply[E, A <: E, B <: E, C](fun: A ⇒ Source[B, Any] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { val f = fun.asInstanceOf[E ⇒ Source[E, Any] ⇒ Future[C]] } } diff --git a/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala index fbaabd5..e3e3ab0 100644 --- a/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala @@ -1,14 +1,14 @@ package nl.gideondk.sentinel import akka.stream._ -import akka.stream.scaladsl.{BidiFlow, Broadcast, Flow, GraphDSL, Merge, Source} +import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, Source } import akka.stream.stage.GraphStageLogic.EagerTerminateOutput -import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} +import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import nl.gideondk.sentinel.ConsumerAction._ import nl.gideondk.sentinel.Registration.SingularResponseRegistration -import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.concurrent.{ ExecutionContext, Future, Promise } class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { @@ -89,27 +89,33 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut initialChunk match { case Some(x) ⇒ push(signalOut, StreamEvent(Source.single(x) ++ Source.fromGraph(chunkSource.source))) - case None ⇒ push(signalOut, StreamEvent(Source.fromGraph(chunkSource.source))) + case None ⇒ push(signalOut, StreamEvent(Source.fromGraph(chunkSource.source))) } } + def consumeStream(initialChunk: Evt): Unit = { + // emit(actionOut, (initialChunk, ProducerAction.ConsumeStream(Source.fromGraph(chunkSource.source)))) + } + def onPush(): Unit = { val evt = grab(eventIn) resolver.process(evt) match { - case x: ProducerAction[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + case x: ProducerAction.Signal[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + + // case x: ProducerAction.ProduceStream[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) - case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) + case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) - case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) + case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) - case StartStream ⇒ startStream(None) + case StartStream ⇒ startStream(None) - case ConsumeStreamChunk ⇒ startStream(Some(evt)) + case ConsumeStreamChunk ⇒ startStream(Some(evt)) - case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) + case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) - case Ignore ⇒ () + case Ignore ⇒ () } } diff --git a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala index b9b63db..bf18990 100644 --- a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala +++ b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala @@ -1,12 +1,12 @@ package nl.gideondk.sentinel -import akka.actor.{Actor, ActorSystem} +import akka.actor.{ Actor, ActorSystem } import akka.stream.OverflowStrategy -import akka.stream.scaladsl.{BidiFlow, Flow, Tcp} +import akka.stream.scaladsl.{ BidiFlow, Flow, Tcp } import akka.util.ByteString import Protocol._ -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.{ ExecutionContext, Future } object Pipeline { def create[Cmd, Evt](protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any], resolver: Resolver[Evt], parallelism: Int, shouldReact: Boolean)(implicit ec: ExecutionContext) = { diff --git a/src/main/scala/nl/gideondk/sentinel/Processor.scala b/src/main/scala/nl/gideondk/sentinel/Processor.scala index c082a81..a6edc03 100644 --- a/src/main/scala/nl/gideondk/sentinel/Processor.scala +++ b/src/main/scala/nl/gideondk/sentinel/Processor.scala @@ -1,9 +1,9 @@ package nl.gideondk.sentinel import akka.stream.BidiShape -import akka.stream.scaladsl.{BidiFlow, Broadcast, Flow, GraphDSL, Merge, Sink, Source} +import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, Sink, Source } -import scala.concurrent.{ExecutionContext, Promise} +import scala.concurrent.{ ExecutionContext, Promise } case class Processor[Cmd, Evt](flow: BidiFlow[Command[Cmd], Cmd, Evt, Event[Evt], Any]) @@ -14,11 +14,12 @@ object Processor { val producerStage = new ProducerStage[Evt, Cmd]() val functionApply = Flow[(Evt, ProducerAction[Evt, Cmd])].mapAsync[Command[Cmd]](producerParallism) { - case (evt, x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(x ⇒ SingularCommand[Cmd](x)) + case (evt, x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(SingularCommand[Cmd]) + case (evt, x: ProducerAction.ProduceStream[Evt, Cmd]) ⇒ x.f(evt).map(StreamingCommand[Cmd]) } Processor(BidiFlow.fromGraph[Command[Cmd], Cmd, Evt, Event[Evt], Any] { - GraphDSL.create() { implicit b => + GraphDSL.create() { implicit b ⇒ import GraphDSL.Implicits._ val producer = b add producerStage diff --git a/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala index 182664b..8169bf4 100644 --- a/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala @@ -1,25 +1,25 @@ package nl.gideondk.sentinel import akka.stream._ -import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL, Source} +import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Source } import akka.stream.stage.GraphStageLogic._ -import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} +import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import nl.gideondk.sentinel.ConsumerAction._ class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { private val in = Inlet[Command[Out]]("ProducerStage.Command.In") private val out = Outlet[Out]("ProducerStage.Command.Out") - var streaming = false - var closeAfterCompletion = false - val shape = new FlowShape(in, out) override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) { + var streaming = false + var closeAfterCompletion = false + val defaultInHandler = new InHandler { override def onPush(): Unit = grab(in) match { - case x: SingularCommand[Out] ⇒ push(out, x.payload) - case x: StreamingCommand[Out] => stream(x.stream) + case x: SingularCommand[Out] ⇒ push(out, x.payload) + case x: StreamingCommand[Out] ⇒ stream(x.stream) } override def onUpstreamFinish(): Unit = { @@ -44,8 +44,7 @@ class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { override def onUpstreamFinish(): Unit = { if (closeAfterCompletion) { completeStage() - } - else { + } else { streaming = false setHandler(out, waitForDemandHandler) if (isAvailable(out)) pull(in) diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala new file mode 100644 index 0000000..fa123d3 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -0,0 +1,64 @@ +package nl.gideondk.sentinel.client + +import akka.actor.ActorSystem +import akka.stream._ +import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Sink, Source, Tcp } +import akka.stream.stage._ +import akka.util.ByteString +import akka.{ Done, NotUsed, stream } +import nl.gideondk.sentinel.client.ClientStage.ConnectionEvent +import nl.gideondk.sentinel.{ Command, Event, Processor } + +import scala.collection.mutable +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util.{ Failure, Success, Try } +import ClientStage._ +import Client._ + +object Client { + + trait ClientException + + case class InputQueueClosed() extends Exception with ClientException + + case class InputQueueUnavailable() extends Exception with ClientException + +} + +class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], + connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, + inputBufferSize: Int, inputOverflowStrategy: OverflowStrategy, + processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) { + + val eventHandler = Sink.foreach[(Try[Event[Evt]], Promise[Event[Evt]])] { + case (evt, context) ⇒ context.complete(evt) + } + + val g = RunnableGraph.fromGraph(GraphDSL.create(Source.queue[(Command[Cmd], Promise[Event[Evt]])](inputBufferSize, inputOverflowStrategy)) { implicit b ⇒ + source ⇒ + import GraphDSL.Implicits._ + + val s = b.add(new ClientStage[Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, processor, protocol)) + + b.add(hosts) ~> s.in0 + source.out ~> s.in1 + + s.out ~> b.add(eventHandler) + + ClosedShape + }) + + val input = g.run() + + def request(command: Command[Cmd])(implicit ec: ExecutionContext): Future[Event[Evt]] = { + val context = Promise[Event[Evt]]() + input.offer((command, context)).flatMap { + case QueueOfferResult.Dropped ⇒ Future.failed(InputQueueUnavailable()) + case QueueOfferResult.QueueClosed ⇒ Future.failed(InputQueueClosed()) + case QueueOfferResult.Failure(reason) ⇒ Future.failed(reason) + case QueueOfferResult.Enqueued ⇒ context.future + } + } + +} diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala similarity index 80% rename from src/main/scala/nl/gideondk/sentinel/Client.scala rename to src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala index 014c56a..b2f366a 100644 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala @@ -1,17 +1,17 @@ -package nl.gideondk.sentinel +package nl.gideondk.sentinel.client -import akka.{Done, NotUsed, stream} import akka.actor.ActorSystem -import akka.event.Logging import akka.stream._ -import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp} +import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Sink, Source, Tcp } import akka.stream.stage._ import akka.util.ByteString +import akka.{ Done, NotUsed, stream } +import nl.gideondk.sentinel.{ Command, Event, Processor } import scala.collection.mutable import scala.concurrent._ import scala.concurrent.duration._ -import scala.util.{Failure, Success, Try} +import scala.util.{ Failure, Success, Try } case class Host(host: String, port: Int) @@ -35,10 +35,9 @@ object ClientStage { } -import ClientStage._ +import nl.gideondk.sentinel.client.ClientStage._ -class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, processor: () => Processor[Cmd, Evt], protocol: () => BidiFlow[ByteString, Evt, Cmd, ByteString, Any]) - (implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[ConnectionEvent, (Command[Cmd], Promise[Event[Evt]]), (Try[Event[Evt]], Promise[Event[Evt]])]] { +class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[ConnectionEvent, (Command[Cmd], Promise[Event[Evt]]), (Try[Event[Evt]], Promise[Event[Evt]])]] { type Context = Promise[Event[Evt]] @@ -76,11 +75,12 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int def ensureConnections() = { hosts .find(_._2 < connectionsPerHost) - .foreach { case (host, connectionCount) => - val connection = Connection(host, nextId()) - connection.initialize() - connectionPool.enqueue(connection) - hosts(connection.host) = connectionCount + 1 + .foreach { + case (host, connectionCount) ⇒ + val connection = Connection(host, nextId()) + connection.initialize() + connectionPool.enqueue(connection) + hosts(connection.host) = connectionCount + 1 } pullCommand(false) @@ -98,17 +98,16 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int pull(commandIn) } else if (isAvailable(commandIn)) { connectionPool.dequeueFirst(_.canBePushedForCommand) match { - case Some(connection) => + case Some(connection) ⇒ val (command, context) = grab(commandIn) connection.pushCommand(command, context) connectionPool.enqueue(connection) pull(commandIn) - case None => if (shouldInitializeConnection) ensureConnections() + case None ⇒ if (shouldInitializeConnection) ensureConnections() } } - def connectionFailed(connection: Connection, cause: Throwable) = { val host = connection.host val totalFailure = hostFailures.getOrElse(host, 0) + 1 @@ -150,8 +149,8 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int setHandler(connectionEventIn, new InHandler { override def onPush() = { grab(connectionEventIn) match { - case LinkUp(connection) => addHost(connection) - case LinkDown(connection) => removeHost(connection) + case LinkUp(connection) ⇒ addHost(connection) + case LinkDown(connection) ⇒ removeHost(connection) } pull(connectionEventIn) } @@ -177,7 +176,7 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int else { connectionPool .dequeueFirst(_.canBePulledForEvent) - .foreach(connection => { + .foreach(connection ⇒ { if (isAvailable(eventOut)) { push(eventOut, connection.pullEvent) } @@ -188,11 +187,10 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int override def onDownstreamFinish() = completeStage() }) - override def onTimer(timerKey: Any) = hostFailures.clear() case class Connection(host: Host, connectionId: Int) { - connection => + connection ⇒ private val connectionEventIn = new SubSinkInlet[Event[Evt]](s"Connection.[$host].[$connectionId].in") private val connectionCommandOut = new SubSourceOutlet[Command[Cmd]](s"Connection.[$host].[$connectionId].out") private val contexts = mutable.Queue.empty[Promise[Event[Evt]]] @@ -215,11 +213,11 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int def close(cause: Option[Throwable]) = { val exception = cause match { - case Some(cause) => ConnectionClosedWithReasonException(s"Failure to process request to $host at antenna $connectionId", cause) - case None => ConnectionClosedWithoutReasonException(s"Failure to process request to $host antenna $connectionId") + case Some(cause) ⇒ ConnectionClosedWithReasonException(s"Failure to process request to $host at antenna $connectionId", cause) + case None ⇒ ConnectionClosedWithoutReasonException(s"Failure to process request to $host antenna $connectionId") } - contexts.dequeueAll(_ => true).foreach(context => { + contexts.dequeueAll(_ ⇒ true).foreach(context ⇒ { failures.enqueue((Failure(exception), context)) }) @@ -236,8 +234,8 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int override def onUpstreamFinish() = removeConnection(connection, None) override def onUpstreamFailure(reason: Throwable) = reason match { - case t: TimeoutException => removeConnection(connection, Some(t)) - case _ => connectionFailed(connection, reason) + case t: TimeoutException ⇒ removeConnection(connection, Some(t)) + case _ ⇒ connectionFailed(connection, reason) } }) @@ -247,14 +245,13 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int override def onDownstreamFinish() = () }) - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => + RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒ import GraphDSL.Implicits._ - val pipeline = b.add(processor() + val pipeline = b.add(processor .flow - .atop(protocol().reversed) - .join(Tcp().outgoingConnection(host.host, host.port)) - ) + .atop(protocol.reversed) + .join(Tcp().outgoingConnection(host.host, host.port))) connectionCommandOut.source ~> pipeline.in pipeline.out ~> connectionEventIn.sink @@ -267,4 +264,4 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int } } -} +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala index 5093d18..c480ba0 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -2,36 +2,26 @@ package nl.gideondk.sentinel import akka.actor.ActorSystem import akka.event.Logging -import akka.stream.{ActorMaterializer, Attributes, ClosedShape, OverflowStrategy} -import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source, Tcp} -import akka.stream.testkit.{TestPublisher, TestSubscriber} +import akka.stream.{ ActorMaterializer, Attributes, ClosedShape, OverflowStrategy } +import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source, Tcp } +import akka.stream.testkit.{ TestPublisher, TestSubscriber } import akka.util.ByteString +import nl.gideondk.sentinel.client.{ ClientStage, Host } import nl.gideondk.sentinel.protocol._ import org.scalatest._ import protocol.SimpleMessage._ import scala.concurrent._ import duration._ -import scala.util.{Failure, Success, Try} +import scala.util.{ Failure, Success, Try } object ClientStageSpec { - - val eventFlow = Flow[Event[SimpleMessageFormat]].flatMapConcat { - case x: StreamEvent[SimpleMessageFormat] => x.chunks - case x: SingularEvent[SimpleMessageFormat] => Source.single(x.data) - } - - val headSink = Sink.head[Event[SimpleMessageFormat]] - val seqSink = Sink.seq[SimpleMessageFormat] - val ignoreSink = Sink.ignore - - def mockServer(system: ActorSystem, port: Int): Unit = { implicit val sys = system import system.dispatcher implicit val materializer = ActorMaterializer() - val handler = Sink.foreach[Tcp.IncomingConnection] { conn => + val handler = Sink.foreach[Tcp.IncomingConnection] { conn ⇒ conn handleWith Flow[ByteString] } @@ -39,9 +29,9 @@ object ClientStageSpec { val binding = connections.to(handler).run() binding.onComplete { - case Success(b) => + case Success(b) ⇒ println("Server started, listening on: " + b.localAddress) - case Failure(e) => + case Failure(e) ⇒ println(s"Server could not bind to localhost:$port: ${e.getMessage}") system.terminate() } @@ -59,14 +49,14 @@ class ClientStageSpec extends AkkaSpec { val numberOfMessages = 1024 - val messages = (for (i <- 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)), Promise[Event[SimpleMessageFormat]]())).toList - val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) => context.complete(event) } + val messages = (for (i ← 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)), Promise[Event[SimpleMessageFormat]]())).toList + val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) ⇒ context.complete(event) } val g = RunnableGraph.fromGraph(GraphDSL.create(Source.queue[(Command[SimpleMessageFormat], Promise[Event[SimpleMessageFormat]])](numberOfMessages, OverflowStrategy.backpressure)) { implicit b ⇒ - source => + source ⇒ import GraphDSL.Implicits._ - val s = b.add(new ClientStage[SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, () => Processor(SimpleHandler, 1, false), () => SimpleMessage.protocol.reversed)) + val s = b.add(new ClientStage[SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) Source.single(ClientStage.LinkUp(Host("localhost", 9000))) ~> s.in0 source.out ~> s.in1 @@ -80,7 +70,7 @@ class ClientStageSpec extends AkkaSpec { messages.foreach(sourceQueue.offer) val results = Future.sequence(messages.map(_._2.future)) - Await.result(results, 1 second) should be(messages.map(x => SingularEvent(x._1.payload))) + Await.result(results, 1 second) should be(messages.map(x ⇒ SingularEvent(x._1.payload))) sourceQueue.complete() } } diff --git a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala index 9069f74..0a871ef 100644 --- a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala @@ -2,9 +2,9 @@ package nl.gideondk.sentinel import akka.actor.ActorSystem import akka.event.Logging -import akka.stream.{ActorMaterializer, Attributes, ClosedShape} -import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source} -import akka.stream.testkit.{TestPublisher, TestSubscriber} +import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } +import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } +import akka.stream.testkit.{ TestPublisher, TestSubscriber } import nl.gideondk.sentinel.protocol._ import org.scalatest._ import protocol.SimpleMessage._ @@ -12,37 +12,28 @@ import protocol.SimpleMessage._ import scala.concurrent._ import duration._ -object ConsumerStageSpec { +class ConsumerStageSpec extends AkkaSpec { val eventFlow = Flow[Event[SimpleMessageFormat]].flatMapConcat { - case x: StreamEvent[SimpleMessageFormat] => x.chunks - case x: SingularEvent[SimpleMessageFormat] => Source.single(x.data) + case x: StreamEvent[SimpleMessageFormat] ⇒ x.chunks + case x: SingularEvent[SimpleMessageFormat] ⇒ Source.single(x.data) } - def stage() = new ConsumerStage[SimpleMessageFormat, SimpleMessageFormat](SimpleHandler) - - val headSink = Sink.head[Event[SimpleMessageFormat]] - val seqSink = Sink.seq[SimpleMessageFormat] - val ignoreSink = Sink.ignore -} - -class ConsumerStageSpec extends AkkaSpec { - - import ConsumerStageSpec._ + val stage = new ConsumerStage[SimpleMessageFormat, SimpleMessageFormat](SimpleHandler) "The ConsumerStage" should { "handle incoming events" in { implicit val materializer = ActorMaterializer() - val g = RunnableGraph.fromGraph(GraphDSL.create(headSink) { implicit b ⇒ + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Event[SimpleMessageFormat]]) { implicit b ⇒ sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) + val s = b add stage Source.single(SimpleReply("")) ~> s.in s.out1 ~> sink.in - s.out0 ~> ignoreSink + s.out0 ~> Sink.ignore ClosedShape }) @@ -57,12 +48,11 @@ class ConsumerStageSpec extends AkkaSpec { sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) - + val s = b add stage Source(List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"))) ~> s.in s.out1 ~> sink.in - s.out0 ~> ignoreSink + s.out0 ~> Sink.ignore ClosedShape }) @@ -80,11 +70,11 @@ class ConsumerStageSpec extends AkkaSpec { sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) + val s = b add stage Source.fromPublisher(inProbe) ~> s.in s.out1 ~> sink.in - s.out0 ~> ignoreSink + s.out0 ~> Sink.ignore ClosedShape }) @@ -115,8 +105,8 @@ class ConsumerStageSpec extends AkkaSpec { // Request the initial element from the sub-source entitySub.request(1) -// // Pull is coming from merged stream for initial element -// inSub.expectRequest(1) + // // Pull is coming from merged stream for initial element + // inSub.expectRequest(1) // Expect initial element to be available entityProbe.expectNext() @@ -148,15 +138,15 @@ class ConsumerStageSpec extends AkkaSpec { val chunkSource = Source(List(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"), SimpleStreamChunk(""))) - val g = RunnableGraph.fromGraph(GraphDSL.create(seqSink) { implicit b ⇒ + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[SimpleMessageFormat]) { implicit b ⇒ sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) + val s = b add stage chunkSource ~> s.in s.out1 ~> eventFlow ~> sink.in - s.out0 ~> ignoreSink + s.out0 ~> Sink.ignore ClosedShape }) @@ -170,15 +160,15 @@ class ConsumerStageSpec extends AkkaSpec { val items = List.fill(10)(List(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"), SimpleStreamChunk(""))).flatten val chunkSource = Source(items) - val g = RunnableGraph.fromGraph(GraphDSL.create(seqSink) { implicit b ⇒ + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[SimpleMessageFormat]) { implicit b ⇒ sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) + val s = b add stage chunkSource ~> s.in s.out1 ~> eventFlow ~> sink.in - s.out0 ~> ignoreSink + s.out0 ~> Sink.ignore ClosedShape }) @@ -195,15 +185,15 @@ class ConsumerStageSpec extends AkkaSpec { val chunkSource = Source(a ++ b ++ c) - val g = RunnableGraph.fromGraph(GraphDSL.create(seqSink) { implicit b ⇒ + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[SimpleMessageFormat]) { implicit b ⇒ sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) + val s = b add stage chunkSource ~> s.in - s.out1 ~> ConsumerStageSpec.eventFlow ~> sink.in - s.out0 ~> ignoreSink + s.out1 ~> eventFlow ~> sink.in + s.out0 ~> Sink.ignore ClosedShape }) @@ -220,10 +210,10 @@ class ConsumerStageSpec extends AkkaSpec { sink ⇒ import GraphDSL.Implicits._ - val s = b.add(stage()) + val s = b add stage Source(a) ~> s.in - s.out1 ~> ignoreSink + s.out1 ~> Sink.ignore s.out0 ~> sink.in ClosedShape diff --git a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala index 3d447cc..434c299 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala @@ -1,26 +1,20 @@ package nl.gideondk.sentinel import akka.event.Logging -import akka.stream.{ActorMaterializer, Attributes, ClosedShape} -import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} -import akka.stream.testkit.{TestPublisher, TestSubscriber} +import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } +import akka.stream.scaladsl.{ Flow, GraphDSL, RunnableGraph, Sink, Source } +import akka.stream.testkit.{ TestPublisher, TestSubscriber } import nl.gideondk.sentinel.Command.Ask import nl.gideondk.sentinel.Registration.SingularResponseRegistration import nl.gideondk.sentinel.protocol._ -import scala.concurrent.{Await, Promise} +import scala.concurrent.{ Await, Promise } import scala.concurrent.duration._ -object ProcessorSpec { - -} - class ProcessorSpec extends AkkaSpec { val processor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleHandler, 1) val serverProcessor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleServerHandler, 1, true) - import ProcessorSpec._ - "The AntennaStage" should { "correctly flow in a client, server situation" in { import SimpleCommand._ @@ -33,8 +27,8 @@ class ProcessorSpec extends AkkaSpec { val source = Source[SingularCommand[SimpleMessageFormat]](List(pingCommand, zeroCommand, pingCommand, zeroCommand)) - val flow = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Event[SimpleMessageFormat]]) { implicit b => - sink => + val flow = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Event[SimpleMessageFormat]]) { implicit b ⇒ + sink ⇒ import GraphDSL.Implicits._ val client = b.add(processor.flow) diff --git a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala index 0beddcd..754a3c0 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala @@ -5,7 +5,7 @@ import akka.stream._ import akka.stream.scaladsl._ import nl.gideondk.sentinel.Registration.SingularResponseRegistration -import nl.gideondk.sentinel.protocol.{SimpleMessageFormat, SimpleReply} +import nl.gideondk.sentinel.protocol.{ SimpleMessageFormat, SimpleReply } import scala.concurrent._ import scala.concurrent.duration._ diff --git a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala index 553ae28..21a0747 100644 --- a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala +++ b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala @@ -1,7 +1,7 @@ package nl.gideondk.sentinel.protocol -import akka.stream.scaladsl.{BidiFlow, Framing} -import akka.util.{ByteString, ByteStringBuilder} +import akka.stream.scaladsl.{ BidiFlow, Framing } +import akka.util.{ ByteString, ByteStringBuilder } import nl.gideondk.sentinel._ import scala.concurrent.Future @@ -22,7 +22,6 @@ case class SimpleStreamChunk(payload: String) extends SimpleMessageFormat // 3 case class SimpleError(payload: String) extends SimpleMessageFormat - object SimpleMessage { val PING_PONG = 1 val TOTAL_CHUNK_SIZE = 2 @@ -76,20 +75,18 @@ import SimpleMessage._ object SimpleHandler extends Resolver[SimpleMessageFormat] { def process: PartialFunction[SimpleMessageFormat, Action] = { - case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream - case x: SimpleError ⇒ ConsumerAction.AcceptError - case x: SimpleReply ⇒ ConsumerAction.AcceptSignal + case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream + case x: SimpleError ⇒ ConsumerAction.AcceptError + case x: SimpleReply ⇒ ConsumerAction.AcceptSignal case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - case x => println("Unhandled: " + x); ConsumerAction.Ignore + case x ⇒ println("Unhandled: " + x); ConsumerAction.Ignore } } - object SimpleServerHandler extends Resolver[SimpleMessageFormat] { def process: PartialFunction[SimpleMessageFormat, Action] = { case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - case x => println("Unhandled: " + x); ConsumerAction.Ignore - + case x ⇒ println("Unhandled: " + x); ConsumerAction.Ignore // case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ // s: Enumerator[SimpleStreamChunk] ⇒ From 80e39b08c24365b906a8010c454973790f41040d Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 11:28:16 +0100 Subject: [PATCH 35/54] Client initialisation --- .../scala/nl/gideondk/sentinel/Config.scala | 2 +- .../scala/nl/gideondk/sentinel/Pipeline.scala | 15 --- .../nl/gideondk/sentinel/client/Client.scala | 58 ++++++-- .../sentinel/client/ClientStage.scala | 12 +- .../sentinel/pipeline/ConsumerStage.scala | 127 ++++++++++++++++++ .../sentinel/pipeline/Processor.scala | 47 +++++++ .../sentinel/pipeline/ProducerStage.scala | 68 ++++++++++ .../gideondk/sentinel/pipeline/Resolver.scala | 8 ++ .../gideondk/sentinel/protocol/Action.scala | 71 ++++++++++ .../gideondk/sentinel/protocol/Command.scala | 82 +++++++++++ .../gideondk/sentinel/protocol/Protocol.scala | 14 ++ .../gideondk/sentinel/ConsumerStageSpec.scala | 1 + .../nl/gideondk/sentinel/ProcessorSpec.scala | 5 +- .../gideondk/sentinel/ProducerStageSpec.scala | 6 +- .../sentinel/protocol/SimpleMessage.scala | 1 + 15 files changed, 482 insertions(+), 35 deletions(-) delete mode 100644 src/main/scala/nl/gideondk/sentinel/Pipeline.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/protocol/Action.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/protocol/Command.scala create mode 100644 src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala diff --git a/src/main/scala/nl/gideondk/sentinel/Config.scala b/src/main/scala/nl/gideondk/sentinel/Config.scala index bcd4757..657de77 100644 --- a/src/main/scala/nl/gideondk/sentinel/Config.scala +++ b/src/main/scala/nl/gideondk/sentinel/Config.scala @@ -5,7 +5,7 @@ import com.typesafe.config.ConfigFactory object Config { private lazy val config = ConfigFactory.load().getConfig("sentinel") - val parallelism = config.getInt("pipeline.parallelism") + val producerParallelism = config.getInt("pipeline.parallelism") val framesize = config.getInt("pipeline.framesize") val buffersize = config.getInt("pipeline.buffersize") } diff --git a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala b/src/main/scala/nl/gideondk/sentinel/Pipeline.scala deleted file mode 100644 index bf18990..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Pipeline.scala +++ /dev/null @@ -1,15 +0,0 @@ -package nl.gideondk.sentinel - -import akka.actor.{ Actor, ActorSystem } -import akka.stream.OverflowStrategy -import akka.stream.scaladsl.{ BidiFlow, Flow, Tcp } -import akka.util.ByteString -import Protocol._ - -import scala.concurrent.{ ExecutionContext, Future } - -object Pipeline { - def create[Cmd, Evt](protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any], resolver: Resolver[Evt], parallelism: Int, shouldReact: Boolean)(implicit ec: ExecutionContext) = { - protocol >> Processor(resolver, parallelism, shouldReact).flow.reversed - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index fa123d3..f157559 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -1,20 +1,37 @@ package nl.gideondk.sentinel.client +import java.util.concurrent.TimeUnit + import akka.actor.ActorSystem import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Sink, Source, Tcp } +import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp} import akka.stream.stage._ import akka.util.ByteString -import akka.{ Done, NotUsed, stream } +import akka.{Done, NotUsed, stream} import nl.gideondk.sentinel.client.ClientStage.ConnectionEvent -import nl.gideondk.sentinel.{ Command, Event, Processor } import scala.collection.mutable import scala.concurrent._ import scala.concurrent.duration._ -import scala.util.{ Failure, Success, Try } +import scala.util.{Failure, Success, Try} import ClientStage._ import Client._ +import nl.gideondk.sentinel.Config +import nl.gideondk.sentinel.pipeline.{Processor, Resolver} +import nl.gideondk.sentinel.protocol._ + +object ClientConfig { + + import com.typesafe.config.ConfigFactory + + private lazy val config = ConfigFactory.load().getConfig("sentinel") + + val connectionsPerHost = config.getInt("client.host.max-connections") + val maxFailuresPerHost = config.getInt("client.host.max-failures") + val failureRecoveryPeriod = Duration(config.getDuration("client.host.failure-recovery-duration").toNanos, TimeUnit.NANOSECONDS) + + val inputBufferSize = config.getInt("client.input-buffer-size") +} object Client { @@ -24,6 +41,29 @@ object Client { case class InputQueueUnavailable() extends Exception with ClientException + def flow[Context, Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { + + val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + + Flow.fromGraph(GraphDSL.create(hosts) { implicit b => + connections => + import GraphDSL.Implicits._ + + val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, processor, protocol.reversed)) + connections ~> s.in0 + FlowShape(s.in1, s.out) + }) + } + + def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { + val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) + } + + def apply[Cmd, Evt](hosts: List[Host], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { + val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + new Client(Source(hosts.map(LinkUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) + } } class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], @@ -31,6 +71,8 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], inputBufferSize: Int, inputOverflowStrategy: OverflowStrategy, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) { + type Context = Promise[Event[Evt]] + val eventHandler = Sink.foreach[(Try[Event[Evt]], Promise[Event[Evt]])] { case (evt, context) ⇒ context.complete(evt) } @@ -39,7 +81,7 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], source ⇒ import GraphDSL.Implicits._ - val s = b.add(new ClientStage[Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, processor, protocol)) + val s = b.add(new ClientStage[Context, Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, processor, protocol)) b.add(hosts) ~> s.in0 source.out ~> s.in1 @@ -54,10 +96,10 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], def request(command: Command[Cmd])(implicit ec: ExecutionContext): Future[Event[Evt]] = { val context = Promise[Event[Evt]]() input.offer((command, context)).flatMap { - case QueueOfferResult.Dropped ⇒ Future.failed(InputQueueUnavailable()) - case QueueOfferResult.QueueClosed ⇒ Future.failed(InputQueueClosed()) + case QueueOfferResult.Dropped ⇒ Future.failed(InputQueueUnavailable()) + case QueueOfferResult.QueueClosed ⇒ Future.failed(InputQueueClosed()) case QueueOfferResult.Failure(reason) ⇒ Future.failed(reason) - case QueueOfferResult.Enqueued ⇒ context.future + case QueueOfferResult.Enqueued ⇒ context.future } } diff --git a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala index b2f366a..89e5119 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala @@ -6,7 +6,9 @@ import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Sink, Source, T import akka.stream.stage._ import akka.util.ByteString import akka.{ Done, NotUsed, stream } -import nl.gideondk.sentinel.{ Command, Event, Processor } +import nl.gideondk.sentinel.pipeline.Processor + +import nl.gideondk.sentinel.protocol.{ Command, Event } import scala.collection.mutable import scala.concurrent._ @@ -37,9 +39,7 @@ object ClientStage { import nl.gideondk.sentinel.client.ClientStage._ -class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[ConnectionEvent, (Command[Cmd], Promise[Event[Evt]]), (Try[Event[Evt]], Promise[Event[Evt]])]] { - - type Context = Promise[Event[Evt]] +class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[ConnectionEvent, (Command[Cmd], Context), (Try[Event[Evt]], Context)]] { val connectionEventIn = Inlet[ConnectionEvent]("ClientStage.ConnectionEvent.In") val commandIn = Inlet[(Command[Cmd], Context)]("ClientStage.Command.In") @@ -193,13 +193,13 @@ class ClientStage[Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int connection ⇒ private val connectionEventIn = new SubSinkInlet[Event[Evt]](s"Connection.[$host].[$connectionId].in") private val connectionCommandOut = new SubSourceOutlet[Command[Cmd]](s"Connection.[$host].[$connectionId].out") - private val contexts = mutable.Queue.empty[Promise[Event[Evt]]] + private val contexts = mutable.Queue.empty[Context] def canBePushedForCommand = connectionCommandOut.isAvailable def canBePulledForEvent = connectionEventIn.isAvailable - def pushCommand(command: Command[Cmd], context: Promise[Event[Evt]]) = { + def pushCommand(command: Command[Cmd], context: Context) = { contexts.enqueue(context) connectionCommandOut.push(command) } diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala new file mode 100644 index 0000000..f77febb --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala @@ -0,0 +1,127 @@ +package nl.gideondk.sentinel.pipeline + +import akka.stream._ +import akka.stream.scaladsl.Source +import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } +import nl.gideondk.sentinel.protocol.ConsumerAction._ +import nl.gideondk.sentinel._ +import nl.gideondk.sentinel.protocol._ + +class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { + + private val eventIn = Inlet[Evt]("ConsumerStage.Event.In") + private val actionOut = Outlet[(Evt, ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") + private val signalOut = Outlet[Event[Evt]]("ConsumerStage.Signal.Out") + + val shape = new FanOutShape2(eventIn, actionOut, signalOut) + + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { + private var chunkSource: SubSourceOutlet[Evt] = _ + + private def chunkSubStreamStarted = chunkSource != null + + private def idle = this + + def setInitialHandlers(): Unit = setHandlers(eventIn, signalOut, idle) + + /* + * + * Substream Logic + * + * */ + + val pullThroughHandler = new OutHandler { + override def onPull() = { + pull(eventIn) + } + } + + val substreamHandler = new InHandler with OutHandler { + def endStream(): Unit = { + chunkSource.complete() + chunkSource = null + + if (isAvailable(signalOut) && !hasBeenPulled(eventIn)) pull(eventIn) + setInitialHandlers() + } + + override def onPush(): Unit = { + val chunk = grab(eventIn) + resolver.process(chunk) match { + case ConsumeStreamChunk ⇒ + chunkSource.push(chunk) + + case EndStream ⇒ + endStream() + + case ConsumeChunkAndEndStream ⇒ + chunkSource.push(chunk) + endStream() + + case Ignore ⇒ () + } + } + + override def onPull(): Unit = { + // TODO: Recheck internal flow; checking should be obsolete + if (!hasBeenPulled(eventIn)) pull(eventIn) + } + + override def onUpstreamFinish(): Unit = { + chunkSource.complete() + completeStage() + } + + override def onUpstreamFailure(reason: Throwable): Unit = { + chunkSource.fail(reason) + failStage(reason) + } + } + + def startStream(initialChunk: Option[Evt]): Unit = { + chunkSource = new SubSourceOutlet[Evt]("ChunkSource") + chunkSource.setHandler(pullThroughHandler) + setHandler(eventIn, substreamHandler) + setHandler(signalOut, substreamHandler) + + initialChunk match { + case Some(x) ⇒ push(signalOut, StreamEvent(Source.single(x) ++ Source.fromGraph(chunkSource.source))) + case None ⇒ push(signalOut, StreamEvent(Source.fromGraph(chunkSource.source))) + } + } + + def consumeStream(initialChunk: Evt): Unit = { + // emit(actionOut, (initialChunk, ProducerAction.ConsumeStream(Source.fromGraph(chunkSource.source)))) + } + + def onPush(): Unit = { + val evt = grab(eventIn) + + resolver.process(evt) match { + case x: ProducerAction.Signal[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + + // case x: ProducerAction.ProduceStream[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + + case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) + + case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) + + case StartStream ⇒ startStream(None) + + case ConsumeStreamChunk ⇒ startStream(Some(evt)) + + case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) + + case Ignore ⇒ () + } + } + + def onPull(): Unit = { + if (!chunkSubStreamStarted && !hasBeenPulled(eventIn)) pull(eventIn) + } + + setHandler(actionOut, this) + + setInitialHandlers() + } +} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala new file mode 100644 index 0000000..949e9e1 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala @@ -0,0 +1,47 @@ +package nl.gideondk.sentinel.pipeline + +import akka.stream.BidiShape +import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Merge, Sink } +import nl.gideondk.sentinel._ +import nl.gideondk.sentinel.protocol._ + +import scala.concurrent.ExecutionContext + +case class Processor[Cmd, Evt](flow: BidiFlow[Command[Cmd], Cmd, Evt, Event[Evt], Any]) + +object Processor { + def apply[Cmd, Evt](resolver: Resolver[Evt], producerParallism: Int, shouldReact: Boolean = false)(implicit ec: ExecutionContext): Processor[Cmd, Evt] = { + + val consumerStage = new ConsumerStage[Evt, Cmd](resolver) + val producerStage = new ProducerStage[Evt, Cmd]() + + val functionApply = Flow[(Evt, ProducerAction[Evt, Cmd])].mapAsync[Command[Cmd]](producerParallism) { + case (evt, x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(SingularCommand[Cmd]) + case (evt, x: ProducerAction.ProduceStream[Evt, Cmd]) ⇒ x.f(evt).map(StreamingCommand[Cmd]) + } + + Processor(BidiFlow.fromGraph[Command[Cmd], Cmd, Evt, Event[Evt], Any] { + GraphDSL.create() { implicit b ⇒ + import GraphDSL.Implicits._ + + val producer = b add producerStage + val consumer = b add consumerStage + + val commandIn = b add Flow[Command[Cmd]] + + if (shouldReact) { + val fa = b add functionApply + val merge = b add Merge[Command[Cmd]](2) + commandIn ~> merge.in(0) + consumer.out0 ~> fa ~> merge.in(1) + merge.out ~> producer + } else { + consumer.out0 ~> Sink.ignore + commandIn ~> producer + } + + BidiShape(commandIn.in, producer.out, consumer.in, consumer.out1) + } + }) + } +} diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala new file mode 100644 index 0000000..55217b3 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala @@ -0,0 +1,68 @@ +package nl.gideondk.sentinel.pipeline + +import akka.stream._ +import akka.stream.scaladsl.Source +import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } +import nl.gideondk.sentinel.protocol.{ Command, SingularCommand, StreamingCommand } + +class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { + private val in = Inlet[Command[Out]]("ProducerStage.Command.In") + private val out = Outlet[Out]("ProducerStage.Command.Out") + + val shape = new FlowShape(in, out) + + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) { + var streaming = false + var closeAfterCompletion = false + + val defaultInHandler = new InHandler { + override def onPush(): Unit = grab(in) match { + case x: SingularCommand[Out] ⇒ push(out, x.payload) + case x: StreamingCommand[Out] ⇒ stream(x.stream) + } + + override def onUpstreamFinish(): Unit = { + if (streaming) closeAfterCompletion = true + else completeStage() + } + } + + val waitForDemandHandler = new OutHandler { + def onPull(): Unit = pull(in) + } + + setHandler(in, defaultInHandler) + setHandler(out, waitForDemandHandler) + + def stream(outStream: Source[Out, Any]): Unit = { + streaming = true + val sinkIn = new SubSinkInlet[Out]("RenderingSink") + sinkIn.setHandler(new InHandler { + override def onPush(): Unit = push(out, sinkIn.grab()) + + override def onUpstreamFinish(): Unit = { + if (closeAfterCompletion) { + completeStage() + } else { + streaming = false + setHandler(out, waitForDemandHandler) + if (isAvailable(out)) pull(in) + } + } + }) + + setHandler(out, new OutHandler { + override def onPull(): Unit = sinkIn.pull() + + override def onDownstreamFinish(): Unit = { + completeStage() + sinkIn.cancel() + } + }) + + sinkIn.pull() + outStream.runWith(sinkIn.sink)(subFusingMaterializer) + } + + } +} diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala new file mode 100644 index 0000000..07bd85c --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala @@ -0,0 +1,8 @@ +package nl.gideondk.sentinel.pipeline + +import nl.gideondk.sentinel.protocol.Action + +trait Resolver[In] { + def process: PartialFunction[In, Action] +} + diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala new file mode 100644 index 0000000..f705894 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala @@ -0,0 +1,71 @@ +package nl.gideondk.sentinel.protocol + +import akka.stream.scaladsl.Source + +import scala.concurrent.Future + +trait Action + +trait ProducerAction[E, C] extends Action + +trait ConsumerAction extends Action + +object ProducerAction { + + trait Reaction[E, C] extends ProducerAction[E, C] + + trait StreamReaction[E, C] extends Reaction[E, C] + + trait Signal[In, Out] extends Reaction[In, Out] { + def f: In ⇒ Future[Out] + } + + object Signal { + def apply[E, C](fun: E ⇒ Future[C]): Signal[E, C] = new Signal[E, C] { + val f = fun + } + } + + trait ConsumeStream[E, C] extends StreamReaction[E, C] { + def f: E ⇒ Source[E, Any] ⇒ Future[C] + } + + object ConsumeStream { + def apply[E, A <: E, B <: E, C](fun: A ⇒ Source[B, Any] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { + val f = fun.asInstanceOf[E ⇒ Source[E, Any] ⇒ Future[C]] + } + } + + trait ProduceStream[E, C] extends StreamReaction[E, C] { + def f: E ⇒ Future[Source[C, Any]] + } + + object ProduceStream { + def apply[E, C](fun: E ⇒ Future[Source[C, Any]]): ProduceStream[E, C] = new ProduceStream[E, C] { + val f = fun + } + } + +} + +case class ProducerActionAndData[Evt, Cmd](action: ProducerAction[Evt, Cmd], data: Evt) + +object ConsumerAction { + + case object AcceptSignal extends ConsumerAction + + case object AcceptError extends ConsumerAction + + case object StartStream extends ConsumerAction + + case object ConsumeStreamChunk extends ConsumerAction + + case object EndStream extends ConsumerAction + + case object ConsumeChunkAndEndStream extends ConsumerAction + + case object Ignore extends ConsumerAction + +} + +case class ConsumerActionAndData[Evt](action: ConsumerAction, data: Evt) \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala new file mode 100644 index 0000000..75ef8e9 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala @@ -0,0 +1,82 @@ +package nl.gideondk.sentinel.protocol + +import akka.actor.ActorRef +import akka.stream.scaladsl.Source + +import scala.concurrent.Promise + +trait Event[A] + +case class SingularEvent[A](data: A) extends Event[A] + +case class SingularErrorEvent[A](data: A) extends Event[A] + +case class StreamEvent[A](chunks: Source[A, Any]) extends Event[A] + +trait Registration[A, E <: Event[A]] { + def promise: Promise[E] +} + +object Registration { + case class SingularResponseRegistration[A](promise: Promise[SingularEvent[A]]) extends Registration[A, SingularEvent[A]] + + case class StreamReplyRegistration[A](promise: Promise[StreamEvent[A]]) extends Registration[A, StreamEvent[A]] +} + +trait Command[Out] + +case class SingularCommand[Out](payload: Out) extends Command[Out] +case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] + +trait ServerCommand[Out, In] + +trait ServerMetric + +object Command { + +// case class Ask[Out](payload: Out) extends Command[Out] + + object Ask + +// case class Tell[Out](payload: Out) extends Command[Out] +// +// case class AskStream[Out](payload: Out) extends Command[Out] +// +// case class SendStream[Out](stream: Source[Out, Any]) extends Command[Out] + +} + +object ServerCommand { + + case class AskAll[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] + + case class AskAllHosts[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] + + case class AskAny[Cmd, Evt](payload: Cmd, promise: Promise[Evt]) extends ServerCommand[Cmd, Evt] + +} + +object ServerMetric { + + case object ConnectedSockets extends ServerMetric + + case object ConnectedHosts extends ServerMetric + +} + +//object Reply { +// +// case class Response[Cmd](payload: Cmd) extends Reply[Cmd] +// +// case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] +// +//} + +object Management { + + trait ManagementMessage + + case class RegisterTcpHandler(h: ActorRef) extends ManagementMessage + +} + diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala new file mode 100644 index 0000000..5fad136 --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala @@ -0,0 +1,14 @@ +package nl.gideondk.sentinel.protocol + +import akka.stream.scaladsl.BidiFlow +import akka.stream.{ BidiShape, Graph } + +import scala.concurrent.Promise + +case class RequestContext[Cmd, Evt](request: Cmd, responsePromise: Promise[Evt]) + +object Protocol { + implicit class ProtocolChaining[IT, OT, IB, OB, Mat](bf: BidiFlow[IT, OT, IB, OB, Mat]) { + def >>[NextOT, NextIB, Mat2](bidi: Graph[BidiShape[OT, NextOT, NextIB, IB], Mat2]) = bf.atop(bidi) + } +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala index 0a871ef..64a3e5c 100644 --- a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala @@ -5,6 +5,7 @@ import akka.event.Logging import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } import akka.stream.testkit.{ TestPublisher, TestSubscriber } +import nl.gideondk.sentinel.pipeline.ConsumerStage import nl.gideondk.sentinel.protocol._ import org.scalatest._ import protocol.SimpleMessage._ diff --git a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala index 434c299..fb78a69 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala @@ -4,8 +4,9 @@ import akka.event.Logging import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } import akka.stream.scaladsl.{ Flow, GraphDSL, RunnableGraph, Sink, Source } import akka.stream.testkit.{ TestPublisher, TestSubscriber } -import nl.gideondk.sentinel.Command.Ask -import nl.gideondk.sentinel.Registration.SingularResponseRegistration +import nl.gideondk.sentinel.protocol.Command.Ask +import nl.gideondk.sentinel.protocol.Registration.SingularResponseRegistration +import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ import scala.concurrent.{ Await, Promise } diff --git a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala index 754a3c0..8d5ef1f 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala @@ -3,9 +3,9 @@ package nl.gideondk.sentinel import akka.stream.ActorMaterializer import akka.stream._ import akka.stream.scaladsl._ - -import nl.gideondk.sentinel.Registration.SingularResponseRegistration -import nl.gideondk.sentinel.protocol.{ SimpleMessageFormat, SimpleReply } +import nl.gideondk.sentinel.protocol.Registration.SingularResponseRegistration +import nl.gideondk.sentinel.pipeline.ProducerStage +import nl.gideondk.sentinel.protocol.{ SimpleMessageFormat, SimpleReply, SingularCommand, StreamingCommand } import scala.concurrent._ import scala.concurrent.duration._ diff --git a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala index 21a0747..8a97860 100644 --- a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala +++ b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala @@ -3,6 +3,7 @@ package nl.gideondk.sentinel.protocol import akka.stream.scaladsl.{ BidiFlow, Framing } import akka.util.{ ByteString, ByteStringBuilder } import nl.gideondk.sentinel._ +import nl.gideondk.sentinel.pipeline.Resolver import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global From a727a104c3bef17b3996c715c3f0d20af0bce333 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 11:52:32 +0100 Subject: [PATCH 36/54] Add handles for Client flows --- project/Build.scala | 6 +- .../scala/nl/gideondk/sentinel/Action.scala | 72 ---------- .../scala/nl/gideondk/sentinel/Command.scala | 84 ----------- .../nl/gideondk/sentinel/ConsumerStage.scala | 130 ------------------ .../nl/gideondk/sentinel/Processor.scala | 45 ------ .../nl/gideondk/sentinel/ProducerStage.scala | 69 ---------- .../scala/nl/gideondk/sentinel/Protocol.scala | 14 -- .../scala/nl/gideondk/sentinel/Resolver.scala | 14 -- .../nl/gideondk/sentinel/client/Client.scala | 48 ++++--- 9 files changed, 29 insertions(+), 453 deletions(-) delete mode 100644 src/main/scala/nl/gideondk/sentinel/Action.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Command.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Processor.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/ProducerStage.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Protocol.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Resolver.scala diff --git a/project/Build.scala b/project/Build.scala index 4f72ab6..05e83ae 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -8,7 +8,7 @@ object ApplicationBuild extends Build { name := "sentinel", version := "0.8-SNAPSHOT", organization := "nl.gideondk", - scalaVersion := "2.11.8", + scalaVersion := "2.12.0", parallelExecution in Test := false, resolvers ++= Seq(Resolver.mavenLocal, "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master", @@ -22,13 +22,11 @@ object ApplicationBuild extends Build { publishTo := Some(Resolver.file("file", new File("/Users/gideondk/Development/gideondk-mvn-repo"))) ) - val akkaVersion = "2.4.11" + val akkaVersion = "2.4.14" val appDependencies = Seq( "org.scalatest" %% "scalatest" % "3.0.0" % "test", - "com.typesafe.play" %% "play-iteratees" % "2.3.1", - "com.typesafe.akka" %% "akka-stream" % akkaVersion, "com.typesafe.akka" %% "akka-stream-testkit" % akkaVersion, diff --git a/src/main/scala/nl/gideondk/sentinel/Action.scala b/src/main/scala/nl/gideondk/sentinel/Action.scala deleted file mode 100644 index 4399376..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Action.scala +++ /dev/null @@ -1,72 +0,0 @@ -package nl.gideondk.sentinel - -import akka.stream.scaladsl.Source - -import scala.concurrent.Future -import play.api.libs.iteratee._ - -trait Action - -trait ProducerAction[E, C] extends Action - -trait ConsumerAction extends Action - -object ProducerAction { - - trait Reaction[E, C] extends ProducerAction[E, C] - - trait StreamReaction[E, C] extends Reaction[E, C] - - trait Signal[In, Out] extends Reaction[In, Out] { - def f: In ⇒ Future[Out] - } - - object Signal { - def apply[E, C](fun: E ⇒ Future[C]): Signal[E, C] = new Signal[E, C] { - val f = fun - } - } - - trait ConsumeStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Source[E, Any] ⇒ Future[C] - } - - object ConsumeStream { - def apply[E, A <: E, B <: E, C](fun: A ⇒ Source[B, Any] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { - val f = fun.asInstanceOf[E ⇒ Source[E, Any] ⇒ Future[C]] - } - } - - trait ProduceStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Future[Source[C, Any]] - } - - object ProduceStream { - def apply[E, C](fun: E ⇒ Future[Source[C, Any]]): ProduceStream[E, C] = new ProduceStream[E, C] { - val f = fun - } - } - -} - -case class ProducerActionAndData[Evt, Cmd](action: ProducerAction[Evt, Cmd], data: Evt) - -object ConsumerAction { - - case object AcceptSignal extends ConsumerAction - - case object AcceptError extends ConsumerAction - - case object StartStream extends ConsumerAction - - case object ConsumeStreamChunk extends ConsumerAction - - case object EndStream extends ConsumerAction - - case object ConsumeChunkAndEndStream extends ConsumerAction - - case object Ignore extends ConsumerAction - -} - -case class ConsumerActionAndData[Evt](action: ConsumerAction, data: Evt) \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Command.scala b/src/main/scala/nl/gideondk/sentinel/Command.scala deleted file mode 100644 index 90bff7e..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Command.scala +++ /dev/null @@ -1,84 +0,0 @@ -package nl.gideondk.sentinel - -import akka.actor.ActorRef -import akka.stream.scaladsl.Source - -import scala.concurrent.Promise - -trait Event[A] - -case class SingularEvent[A](data: A) extends Event[A] - -case class SingularErrorEvent[A](data: A) extends Event[A] - -case class StreamEvent[A](chunks: Source[A, Any]) extends Event[A] - -trait Registration[A, E <: Event[A]] { - def promise: Promise[E] -} - -object Registration { - case class SingularResponseRegistration[A](promise: Promise[SingularEvent[A]]) extends Registration[A, SingularEvent[A]] - - case class StreamReplyRegistration[A](promise: Promise[StreamEvent[A]]) extends Registration[A, StreamEvent[A]] -} - -trait Command[Out] - -case class SingularCommand[Out](payload: Out) extends Command[Out] -case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] - -trait ServerCommand[Out, In] - -trait ServerMetric - -object Command { - - import Registration._ - -// case class Ask[Out](payload: Out) extends Command[Out] - - object Ask - -// case class Tell[Out](payload: Out) extends Command[Out] -// -// case class AskStream[Out](payload: Out) extends Command[Out] -// -// case class SendStream[Out](stream: Source[Out, Any]) extends Command[Out] - -} - -object ServerCommand { - - case class AskAll[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] - - case class AskAllHosts[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] - - case class AskAny[Cmd, Evt](payload: Cmd, promise: Promise[Evt]) extends ServerCommand[Cmd, Evt] - -} - -object ServerMetric { - - case object ConnectedSockets extends ServerMetric - - case object ConnectedHosts extends ServerMetric - -} - -//object Reply { -// -// case class Response[Cmd](payload: Cmd) extends Reply[Cmd] -// -// case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] -// -//} - -object Management { - - trait ManagementMessage - - case class RegisterTcpHandler(h: ActorRef) extends ManagementMessage - -} - diff --git a/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala deleted file mode 100644 index e3e3ab0..0000000 --- a/src/main/scala/nl/gideondk/sentinel/ConsumerStage.scala +++ /dev/null @@ -1,130 +0,0 @@ -package nl.gideondk.sentinel - -import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, Source } -import akka.stream.stage.GraphStageLogic.EagerTerminateOutput -import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } - -import nl.gideondk.sentinel.ConsumerAction._ -import nl.gideondk.sentinel.Registration.SingularResponseRegistration - -import scala.concurrent.{ ExecutionContext, Future, Promise } - -class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { - - private val eventIn = Inlet[Evt]("ConsumerStage.Event.In") - private val actionOut = Outlet[(Evt, ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") - private val signalOut = Outlet[Event[Evt]]("ConsumerStage.Signal.Out") - - val shape = new FanOutShape2(eventIn, actionOut, signalOut) - - override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - private var chunkSource: SubSourceOutlet[Evt] = _ - - private def chunkSubStreamStarted = chunkSource != null - - private def idle = this - - def setInitialHandlers(): Unit = setHandlers(eventIn, signalOut, idle) - - /* - * - * Substream Logic - * - * */ - - val pullThroughHandler = new OutHandler { - override def onPull() = { - pull(eventIn) - } - } - - val substreamHandler = new InHandler with OutHandler { - def endStream(): Unit = { - chunkSource.complete() - chunkSource = null - - if (isAvailable(signalOut) && !hasBeenPulled(eventIn)) pull(eventIn) - setInitialHandlers() - } - - override def onPush(): Unit = { - val chunk = grab(eventIn) - resolver.process(chunk) match { - case ConsumeStreamChunk ⇒ - chunkSource.push(chunk) - - case EndStream ⇒ - endStream() - - case ConsumeChunkAndEndStream ⇒ - chunkSource.push(chunk) - endStream() - - case Ignore ⇒ () - } - } - - override def onPull(): Unit = { - // TODO: Recheck internal flow; checking should be obsolete - if (!hasBeenPulled(eventIn)) pull(eventIn) - } - - override def onUpstreamFinish(): Unit = { - chunkSource.complete() - completeStage() - } - - override def onUpstreamFailure(reason: Throwable): Unit = { - chunkSource.fail(reason) - failStage(reason) - } - } - - def startStream(initialChunk: Option[Evt]): Unit = { - chunkSource = new SubSourceOutlet[Evt]("ChunkSource") - chunkSource.setHandler(pullThroughHandler) - setHandler(eventIn, substreamHandler) - setHandler(signalOut, substreamHandler) - - initialChunk match { - case Some(x) ⇒ push(signalOut, StreamEvent(Source.single(x) ++ Source.fromGraph(chunkSource.source))) - case None ⇒ push(signalOut, StreamEvent(Source.fromGraph(chunkSource.source))) - } - } - - def consumeStream(initialChunk: Evt): Unit = { - // emit(actionOut, (initialChunk, ProducerAction.ConsumeStream(Source.fromGraph(chunkSource.source)))) - } - - def onPush(): Unit = { - val evt = grab(eventIn) - - resolver.process(evt) match { - case x: ProducerAction.Signal[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) - - // case x: ProducerAction.ProduceStream[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) - - case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) - - case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) - - case StartStream ⇒ startStream(None) - - case ConsumeStreamChunk ⇒ startStream(Some(evt)) - - case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) - - case Ignore ⇒ () - } - } - - def onPull(): Unit = { - if (!chunkSubStreamStarted && !hasBeenPulled(eventIn)) pull(eventIn) - } - - setHandler(actionOut, this) - - setInitialHandlers() - } -} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Processor.scala b/src/main/scala/nl/gideondk/sentinel/Processor.scala deleted file mode 100644 index a6edc03..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Processor.scala +++ /dev/null @@ -1,45 +0,0 @@ -package nl.gideondk.sentinel - -import akka.stream.BidiShape -import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, Sink, Source } - -import scala.concurrent.{ ExecutionContext, Promise } - -case class Processor[Cmd, Evt](flow: BidiFlow[Command[Cmd], Cmd, Evt, Event[Evt], Any]) - -object Processor { - def apply[Cmd, Evt](resolver: Resolver[Evt], producerParallism: Int, shouldReact: Boolean = false)(implicit ec: ExecutionContext): Processor[Cmd, Evt] = { - - val consumerStage = new ConsumerStage[Evt, Cmd](resolver) - val producerStage = new ProducerStage[Evt, Cmd]() - - val functionApply = Flow[(Evt, ProducerAction[Evt, Cmd])].mapAsync[Command[Cmd]](producerParallism) { - case (evt, x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(SingularCommand[Cmd]) - case (evt, x: ProducerAction.ProduceStream[Evt, Cmd]) ⇒ x.f(evt).map(StreamingCommand[Cmd]) - } - - Processor(BidiFlow.fromGraph[Command[Cmd], Cmd, Evt, Event[Evt], Any] { - GraphDSL.create() { implicit b ⇒ - import GraphDSL.Implicits._ - - val producer = b add producerStage - val consumer = b add consumerStage - - val commandIn = b add Flow[Command[Cmd]] - - if (shouldReact) { - val fa = b add functionApply - val merge = b add Merge[Command[Cmd]](2) - commandIn ~> merge.in(0) - consumer.out0 ~> fa ~> merge.in(1) - merge.out ~> producer - } else { - consumer.out0 ~> Sink.ignore - commandIn ~> producer - } - - BidiShape(commandIn.in, producer.out, consumer.in, consumer.out1) - } - }) - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala deleted file mode 100644 index 8169bf4..0000000 --- a/src/main/scala/nl/gideondk/sentinel/ProducerStage.scala +++ /dev/null @@ -1,69 +0,0 @@ -package nl.gideondk.sentinel - -import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Source } -import akka.stream.stage.GraphStageLogic._ -import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } -import nl.gideondk.sentinel.ConsumerAction._ - -class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { - private val in = Inlet[Command[Out]]("ProducerStage.Command.In") - private val out = Outlet[Out]("ProducerStage.Command.Out") - - val shape = new FlowShape(in, out) - - override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) { - var streaming = false - var closeAfterCompletion = false - - val defaultInHandler = new InHandler { - override def onPush(): Unit = grab(in) match { - case x: SingularCommand[Out] ⇒ push(out, x.payload) - case x: StreamingCommand[Out] ⇒ stream(x.stream) - } - - override def onUpstreamFinish(): Unit = { - if (streaming) closeAfterCompletion = true - else completeStage() - } - } - - val waitForDemandHandler = new OutHandler { - def onPull(): Unit = pull(in) - } - - setHandler(in, defaultInHandler) - setHandler(out, waitForDemandHandler) - - def stream(outStream: Source[Out, Any]): Unit = { - streaming = true - val sinkIn = new SubSinkInlet[Out]("RenderingSink") - sinkIn.setHandler(new InHandler { - override def onPush(): Unit = push(out, sinkIn.grab()) - - override def onUpstreamFinish(): Unit = { - if (closeAfterCompletion) { - completeStage() - } else { - streaming = false - setHandler(out, waitForDemandHandler) - if (isAvailable(out)) pull(in) - } - } - }) - - setHandler(out, new OutHandler { - override def onPull(): Unit = sinkIn.pull() - - override def onDownstreamFinish(): Unit = { - completeStage() - sinkIn.cancel() - } - }) - - sinkIn.pull() - outStream.runWith(sinkIn.sink)(subFusingMaterializer) - } - - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/Protocol.scala b/src/main/scala/nl/gideondk/sentinel/Protocol.scala deleted file mode 100644 index 0e672c7..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Protocol.scala +++ /dev/null @@ -1,14 +0,0 @@ -package nl.gideondk.sentinel - -import akka.stream.{ BidiShape, Graph } -import akka.stream.scaladsl.{ BidiFlow, Framing } - -import scala.concurrent.Promise - -case class RequestContext[Cmd, Evt](request: Cmd, responsePromise: Promise[Evt]) - -object Protocol { - implicit class ProtocolChaining[IT, OT, IB, OB, Mat](bf: BidiFlow[IT, OT, IB, OB, Mat]) { - def >>[NextOT, NextIB, Mat2](bidi: Graph[BidiShape[OT, NextOT, NextIB, IB], Mat2]) = bf.atop(bidi) - } -} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Resolver.scala b/src/main/scala/nl/gideondk/sentinel/Resolver.scala deleted file mode 100644 index 447e2e5..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Resolver.scala +++ /dev/null @@ -1,14 +0,0 @@ -package nl.gideondk.sentinel - -import akka.stream.scaladsl.{ BidiFlow, Concat, Flow, GraphDSL, Source } -import akka.stream._ -import akka.stream.actor.{ ActorPublisher, ActorSubscriber } -import akka.stream.stage.GraphStageLogic.EagerTerminateOutput -import akka.stream.stage.{ OutHandler, _ } -import akka.util.ByteString -import nl.gideondk.sentinel.ConsumerAction._ - -trait Resolver[In] { - def process: PartialFunction[In, Action] -} - diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index f157559..c1237e5 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -4,20 +4,20 @@ import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.stream._ -import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp} +import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp } import akka.stream.stage._ import akka.util.ByteString -import akka.{Done, NotUsed, stream} +import akka.{ Done, NotUsed, stream } import nl.gideondk.sentinel.client.ClientStage.ConnectionEvent import scala.collection.mutable import scala.concurrent._ import scala.concurrent.duration._ -import scala.util.{Failure, Success, Try} +import scala.util.{ Failure, Success, Try } import ClientStage._ import Client._ import nl.gideondk.sentinel.Config -import nl.gideondk.sentinel.pipeline.{Processor, Resolver} +import nl.gideondk.sentinel.pipeline.{ Processor, Resolver } import nl.gideondk.sentinel.protocol._ object ClientConfig { @@ -41,12 +41,28 @@ object Client { case class InputQueueUnavailable() extends Exception with ClientException - def flow[Context, Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { + def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) + } + + def apply[Cmd, Evt](hosts: List[Host], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + new Client(Source(hosts.map(LinkUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) + } + def flow[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + val client = new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) + Flow[Command[Cmd]].mapAsync(1)(cmd => client.send(cmd)) + } - Flow.fromGraph(GraphDSL.create(hosts) { implicit b => - connections => + def rawFlow[Context, Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { + + val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) + + Flow.fromGraph(GraphDSL.create(hosts) { implicit b ⇒ + connections ⇒ import GraphDSL.Implicits._ val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, processor, protocol.reversed)) @@ -54,16 +70,6 @@ object Client { FlowShape(s.in1, s.out) }) } - - def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { - val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) - new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) - } - - def apply[Cmd, Evt](hosts: List[Host], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { - val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) - new Client(Source(hosts.map(LinkUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) - } } class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], @@ -93,13 +99,13 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], val input = g.run() - def request(command: Command[Cmd])(implicit ec: ExecutionContext): Future[Event[Evt]] = { + def send(command: Command[Cmd])(implicit ec: ExecutionContext): Future[Event[Evt]] = { val context = Promise[Event[Evt]]() input.offer((command, context)).flatMap { - case QueueOfferResult.Dropped ⇒ Future.failed(InputQueueUnavailable()) - case QueueOfferResult.QueueClosed ⇒ Future.failed(InputQueueClosed()) + case QueueOfferResult.Dropped ⇒ Future.failed(InputQueueUnavailable()) + case QueueOfferResult.QueueClosed ⇒ Future.failed(InputQueueClosed()) case QueueOfferResult.Failure(reason) ⇒ Future.failed(reason) - case QueueOfferResult.Enqueued ⇒ context.future + case QueueOfferResult.Enqueued ⇒ context.future } } From 408aa72083d0341d70a23b307ee296abe21c18fd Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 12:15:39 +0100 Subject: [PATCH 37/54] Revert to Scala 2.11 for now, add cross-compilation later (and check what issues are with Akka 2.4.14) --- project/Build.scala | 4 ++-- .../nl/gideondk/sentinel/client/Client.scala | 19 +++++++++++++------ .../gideondk/sentinel/ClientStageSpec.scala | 7 +++++-- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/project/Build.scala b/project/Build.scala index 05e83ae..d7b5d59 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -8,7 +8,7 @@ object ApplicationBuild extends Build { name := "sentinel", version := "0.8-SNAPSHOT", organization := "nl.gideondk", - scalaVersion := "2.12.0", + scalaVersion := "2.11.8", parallelExecution in Test := false, resolvers ++= Seq(Resolver.mavenLocal, "gideondk-repo" at "https://raw.github.com/gideondk/gideondk-mvn-repo/master", @@ -22,7 +22,7 @@ object ApplicationBuild extends Build { publishTo := Some(Resolver.file("file", new File("/Users/gideondk/Development/gideondk-mvn-repo"))) ) - val akkaVersion = "2.4.14" + val akkaVersion = "2.4.11" val appDependencies = Seq( "org.scalatest" %% "scalatest" % "3.0.0" % "test", diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index c1237e5..aae51ca 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -41,24 +41,31 @@ object Client { case class InputQueueUnavailable() extends Exception with ClientException - def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], + shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) } - def apply[Cmd, Evt](hosts: List[Host], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + def apply[Cmd, Evt](hosts: List[Host], resolver: Resolver[Evt], + shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) new Client(Source(hosts.map(LinkUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) } - def flow[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], inputOverflowStrategy: OverflowStrategy, shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + def flow[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], + shouldReact: Boolean = false, inputOverflowStrategy: OverflowStrategy, + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) val client = new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) - Flow[Command[Cmd]].mapAsync(1)(cmd => client.send(cmd)) + Flow[Command[Cmd]].mapAsync(1)(cmd ⇒ client.send(cmd)) } - def rawFlow[Context, Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer) = { - + def rawFlow[Context, Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], + shouldReact: Boolean = false, + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) Flow.fromGraph(GraphDSL.create(hosts) { implicit b ⇒ diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala index c480ba0..2680756 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -7,6 +7,7 @@ import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, S import akka.stream.testkit.{ TestPublisher, TestSubscriber } import akka.util.ByteString import nl.gideondk.sentinel.client.{ ClientStage, Host } +import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ import org.scalatest._ import protocol.SimpleMessage._ @@ -43,10 +44,12 @@ class ClientStageSpec extends AkkaSpec { import ClientStageSpec._ "The ClientStage" should { - "Keep message order intact" in { + "keep message order intact" in { val server = mockServer(system, 9000) implicit val materializer = ActorMaterializer() + type Context = Promise[Event[SimpleMessageFormat]] + val numberOfMessages = 1024 val messages = (for (i ← 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)), Promise[Event[SimpleMessageFormat]]())).toList @@ -56,7 +59,7 @@ class ClientStageSpec extends AkkaSpec { source ⇒ import GraphDSL.Implicits._ - val s = b.add(new ClientStage[SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) + val s = b.add(new ClientStage[Context, SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) Source.single(ClientStage.LinkUp(Host("localhost", 9000))) ~> s.in0 source.out ~> s.in1 From 8ca8d22af1ebf91028424aa762b707e0ad790acf Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 12:23:09 +0100 Subject: [PATCH 38/54] Clean-up imports --- .../nl/gideondk/sentinel/client/Client.scala | 31 ++++----- .../sentinel/client/ClientStage.scala | 17 +++-- .../sentinel/pipeline/ConsumerStage.scala | 4 +- .../sentinel/pipeline/Processor.scala | 1 - .../sentinel/pipeline/ProducerStage.scala | 3 +- .../gideondk/sentinel/protocol/Action.scala | 16 ++--- .../gideondk/sentinel/protocol/Command.scala | 15 +++-- .../gideondk/sentinel/protocol/Protocol.scala | 2 + .../gideondk/sentinel/ClientStageSpec.scala | 14 ++-- .../gideondk/sentinel/ConsumerStageSpec.scala | 15 ++--- .../nl/gideondk/sentinel/ProcessorSpec.scala | 11 +--- .../gideondk/sentinel/ProducerStageSpec.scala | 5 +- .../nl/gideondk/sentinel/TestHelpers.scala | 64 ++++++++----------- 13 files changed, 86 insertions(+), 112 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index aae51ca..0620560 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -2,24 +2,21 @@ package nl.gideondk.sentinel.client import java.util.concurrent.TimeUnit +import akka.NotUsed import akka.actor.ActorSystem import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp } -import akka.stream.stage._ +import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source } import akka.util.ByteString -import akka.{ Done, NotUsed, stream } -import nl.gideondk.sentinel.client.ClientStage.ConnectionEvent - -import scala.collection.mutable -import scala.concurrent._ -import scala.concurrent.duration._ -import scala.util.{ Failure, Success, Try } -import ClientStage._ -import Client._ import nl.gideondk.sentinel.Config +import nl.gideondk.sentinel.client.Client._ +import nl.gideondk.sentinel.client.ClientStage.{ ConnectionEvent, _ } import nl.gideondk.sentinel.pipeline.{ Processor, Resolver } import nl.gideondk.sentinel.protocol._ +import scala.concurrent._ +import scala.concurrent.duration._ +import scala.util.Try + object ClientConfig { import com.typesafe.config.ConfigFactory @@ -35,12 +32,6 @@ object ClientConfig { object Client { - trait ClientException - - case class InputQueueClosed() extends Exception with ClientException - - case class InputQueueUnavailable() extends Exception with ClientException - def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { @@ -77,6 +68,12 @@ object Client { FlowShape(s.in1, s.out) }) } + + trait ClientException + + case class InputQueueClosed() extends Exception with ClientException + + case class InputQueueUnavailable() extends Exception with ClientException } class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], diff --git a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala index 89e5119..7e5a066 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala @@ -2,12 +2,11 @@ package nl.gideondk.sentinel.client import akka.actor.ActorSystem import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Sink, Source, Tcp } +import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Tcp } import akka.stream.stage._ import akka.util.ByteString -import akka.{ Done, NotUsed, stream } +import akka.{ Done, stream } import nl.gideondk.sentinel.pipeline.Processor - import nl.gideondk.sentinel.protocol.{ Command, Event } import scala.collection.mutable @@ -21,14 +20,14 @@ object ClientStage { trait ConnectionClosedException - case class ConnectionClosedWithReasonException(message: String, cause: Throwable) extends Exception(message, cause) with ConnectionClosedException - - case class ConnectionClosedWithoutReasonException(message: String) extends Exception(message) with ConnectionClosedException - trait ConnectionEvent { def host: Host } + case class ConnectionClosedWithReasonException(message: String, cause: Throwable) extends Exception(message, cause) with ConnectionClosedException + + case class ConnectionClosedWithoutReasonException(message: String) extends Exception(message) with ConnectionClosedException + case class LinkUp(host: Host) extends ConnectionEvent case class LinkDown(host: Host) extends ConnectionEvent @@ -45,8 +44,6 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer val commandIn = Inlet[(Command[Cmd], Context)]("ClientStage.Command.In") val eventOut = Outlet[(Try[Event[Evt]], Context)]("ClientStage.Event.Out") - override def shape = new FanInShape2(connectionEventIn, commandIn, eventOut) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) { private val hosts = mutable.Map.empty[Host, Int] private val hostFailures = mutable.Map.empty[Host, Int] @@ -264,4 +261,6 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer } } + + override def shape = new FanInShape2(connectionEventIn, commandIn, eventOut) } \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala index f77febb..215ba68 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala @@ -4,17 +4,15 @@ import akka.stream._ import akka.stream.scaladsl.Source import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import nl.gideondk.sentinel.protocol.ConsumerAction._ -import nl.gideondk.sentinel._ import nl.gideondk.sentinel.protocol._ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { + val shape = new FanOutShape2(eventIn, actionOut, signalOut) private val eventIn = Inlet[Evt]("ConsumerStage.Event.In") private val actionOut = Outlet[(Evt, ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") private val signalOut = Outlet[Event[Evt]]("ConsumerStage.Signal.Out") - val shape = new FanOutShape2(eventIn, actionOut, signalOut) - override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { private var chunkSource: SubSourceOutlet[Evt] = _ diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala index 949e9e1..e681693 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala @@ -2,7 +2,6 @@ package nl.gideondk.sentinel.pipeline import akka.stream.BidiShape import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Merge, Sink } -import nl.gideondk.sentinel._ import nl.gideondk.sentinel.protocol._ import scala.concurrent.ExecutionContext diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala index 55217b3..cd8550b 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala @@ -6,11 +6,10 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import nl.gideondk.sentinel.protocol.{ Command, SingularCommand, StreamingCommand } class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { + val shape = new FlowShape(in, out) private val in = Inlet[Command[Out]]("ProducerStage.Command.In") private val out = Outlet[Out]("ProducerStage.Command.Out") - val shape = new FlowShape(in, out) - override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) { var streaming = false var closeAfterCompletion = false diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala index f705894..2e2edf8 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala @@ -20,26 +20,26 @@ object ProducerAction { def f: In ⇒ Future[Out] } + trait ConsumeStream[E, C] extends StreamReaction[E, C] { + def f: E ⇒ Source[E, Any] ⇒ Future[C] + } + + trait ProduceStream[E, C] extends StreamReaction[E, C] { + def f: E ⇒ Future[Source[C, Any]] + } + object Signal { def apply[E, C](fun: E ⇒ Future[C]): Signal[E, C] = new Signal[E, C] { val f = fun } } - trait ConsumeStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Source[E, Any] ⇒ Future[C] - } - object ConsumeStream { def apply[E, A <: E, B <: E, C](fun: A ⇒ Source[B, Any] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { val f = fun.asInstanceOf[E ⇒ Source[E, Any] ⇒ Future[C]] } } - trait ProduceStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Future[Source[C, Any]] - } - object ProduceStream { def apply[E, C](fun: E ⇒ Future[Source[C, Any]]): ProduceStream[E, C] = new ProduceStream[E, C] { val f = fun diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala index 75ef8e9..37028d3 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala @@ -18,14 +18,17 @@ trait Registration[A, E <: Event[A]] { } object Registration { + case class SingularResponseRegistration[A](promise: Promise[SingularEvent[A]]) extends Registration[A, SingularEvent[A]] case class StreamReplyRegistration[A](promise: Promise[StreamEvent[A]]) extends Registration[A, StreamEvent[A]] + } trait Command[Out] case class SingularCommand[Out](payload: Out) extends Command[Out] + case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] trait ServerCommand[Out, In] @@ -34,15 +37,15 @@ trait ServerMetric object Command { -// case class Ask[Out](payload: Out) extends Command[Out] + // case class Ask[Out](payload: Out) extends Command[Out] object Ask -// case class Tell[Out](payload: Out) extends Command[Out] -// -// case class AskStream[Out](payload: Out) extends Command[Out] -// -// case class SendStream[Out](stream: Source[Out, Any]) extends Command[Out] + // case class Tell[Out](payload: Out) extends Command[Out] + // + // case class AskStream[Out](payload: Out) extends Command[Out] + // + // case class SendStream[Out](stream: Source[Out, Any]) extends Command[Out] } diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala index 5fad136..eb416db 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala @@ -8,7 +8,9 @@ import scala.concurrent.Promise case class RequestContext[Cmd, Evt](request: Cmd, responsePromise: Promise[Evt]) object Protocol { + implicit class ProtocolChaining[IT, OT, IB, OB, Mat](bf: BidiFlow[IT, OT, IB, OB, Mat]) { def >>[NextOT, NextIB, Mat2](bidi: Graph[BidiShape[OT, NextOT, NextIB, IB], Mat2]) = bf.atop(bidi) } + } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala index 2680756..914a41b 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -1,20 +1,16 @@ package nl.gideondk.sentinel import akka.actor.ActorSystem -import akka.event.Logging -import akka.stream.{ ActorMaterializer, Attributes, ClosedShape, OverflowStrategy } -import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source, Tcp } -import akka.stream.testkit.{ TestPublisher, TestSubscriber } +import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp} +import akka.stream.{ActorMaterializer, ClosedShape, OverflowStrategy} import akka.util.ByteString -import nl.gideondk.sentinel.client.{ ClientStage, Host } +import nl.gideondk.sentinel.client.{ClientStage, Host} import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ -import org.scalatest._ -import protocol.SimpleMessage._ import scala.concurrent._ -import duration._ -import scala.util.{ Failure, Success, Try } +import scala.concurrent.duration._ +import scala.util.{Failure, Success, Try} object ClientStageSpec { def mockServer(system: ActorSystem, port: Int): Unit = { diff --git a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala index 64a3e5c..7c5cffc 100644 --- a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala @@ -1,22 +1,19 @@ package nl.gideondk.sentinel -import akka.actor.ActorSystem -import akka.event.Logging -import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } -import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } -import akka.stream.testkit.{ TestPublisher, TestSubscriber } +import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} +import akka.stream.testkit.{TestPublisher, TestSubscriber} +import akka.stream.{ActorMaterializer, Attributes, ClosedShape} import nl.gideondk.sentinel.pipeline.ConsumerStage +import nl.gideondk.sentinel.protocol.SimpleMessage._ import nl.gideondk.sentinel.protocol._ -import org.scalatest._ -import protocol.SimpleMessage._ import scala.concurrent._ -import duration._ +import scala.concurrent.duration._ class ConsumerStageSpec extends AkkaSpec { val eventFlow = Flow[Event[SimpleMessageFormat]].flatMapConcat { - case x: StreamEvent[SimpleMessageFormat] ⇒ x.chunks + case x: StreamEvent[SimpleMessageFormat] ⇒ x.chunks case x: SingularEvent[SimpleMessageFormat] ⇒ Source.single(x.data) } diff --git a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala index fb78a69..25c0b99 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala @@ -1,15 +1,11 @@ package nl.gideondk.sentinel -import akka.event.Logging -import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } -import akka.stream.scaladsl.{ Flow, GraphDSL, RunnableGraph, Sink, Source } -import akka.stream.testkit.{ TestPublisher, TestSubscriber } -import nl.gideondk.sentinel.protocol.Command.Ask -import nl.gideondk.sentinel.protocol.Registration.SingularResponseRegistration +import akka.stream.scaladsl.{GraphDSL, RunnableGraph, Sink, Source} +import akka.stream.{ActorMaterializer, ClosedShape} import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ -import scala.concurrent.{ Await, Promise } +import scala.concurrent.Await import scala.concurrent.duration._ class ProcessorSpec extends AkkaSpec { @@ -18,7 +14,6 @@ class ProcessorSpec extends AkkaSpec { "The AntennaStage" should { "correctly flow in a client, server situation" in { - import SimpleCommand._ import nl.gideondk.sentinel.protocol.SimpleMessage._ implicit val materializer = ActorMaterializer() diff --git a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala index 8d5ef1f..a82d5f3 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala @@ -1,15 +1,12 @@ package nl.gideondk.sentinel import akka.stream.ActorMaterializer -import akka.stream._ import akka.stream.scaladsl._ -import nl.gideondk.sentinel.protocol.Registration.SingularResponseRegistration import nl.gideondk.sentinel.pipeline.ProducerStage -import nl.gideondk.sentinel.protocol.{ SimpleMessageFormat, SimpleReply, SingularCommand, StreamingCommand } +import nl.gideondk.sentinel.protocol.{SimpleMessageFormat, SimpleReply, SingularCommand, StreamingCommand} import scala.concurrent._ import scala.concurrent.duration._ -import scala.concurrent.Promise object ProducerStageSpec { def stage() = new ProducerStage[SimpleMessageFormat, SimpleMessageFormat]() diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index 1d69c97..97a6c4a 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -1,33 +1,25 @@ package nl.gideondk.sentinel -import org.scalatest.{ Suite, BeforeAndAfterAll, WordSpec } - -import akka.util.ByteString - -import akka.actor._ -import akka.testkit._ - import java.util.concurrent.atomic.AtomicInteger -import org.scalactic.Constraint - -import language.postfixOps -import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } -import org.scalatest.Matchers import akka.actor.ActorSystem -import akka.event.{ Logging, LoggingAdapter } - -import scala.concurrent.duration._ -import scala.concurrent.Future -import com.typesafe.config.{ Config, ConfigFactory } import akka.dispatch.Dispatchers +import akka.event.{Logging, LoggingAdapter} import akka.testkit.TestEvent._ -import org.scalactic.ConversionCheckedTripleEquals +import akka.testkit._ +import com.typesafe.config.{Config, ConfigFactory} +import org.scalactic.{Constraint, ConversionCheckedTripleEquals} import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.Span +import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} + +import scala.concurrent.Future +import scala.concurrent.duration._ +import scala.language.postfixOps object AkkaSpec { - val testConf: Config = ConfigFactory.parseString(""" + val testConf: Config = ConfigFactory.parseString( + """ akka { loggers = ["akka.testkit.TestEventListener"] loglevel = "WARNING" @@ -55,7 +47,7 @@ object AkkaSpec { .dropWhile(_ matches "(java.lang.Thread|.*AkkaSpec.?$|.*StreamSpec.?$)") val reduced = s.lastIndexWhere(_ == clazz.getName) match { case -1 ⇒ s - case z ⇒ s drop (z + 1) + case z ⇒ s drop (z + 1) } reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } @@ -64,43 +56,42 @@ object AkkaSpec { abstract class AkkaSpec(_system: ActorSystem) - extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll - with ConversionCheckedTripleEquals with ScalaFutures { + extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll + with ConversionCheckedTripleEquals with ScalaFutures { implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(100, org.scalatest.time.Millis)) implicit val ec = _system.dispatcher - def this(config: Config) = this(ActorSystem( - AkkaSpec.getCallerName(getClass), - ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) - - def this(s: String) = this(ConfigFactory.parseString(s)) - - def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) - - def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf)) - - val log: LoggingAdapter = Logging(system, this.getClass) - override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true + val log: LoggingAdapter = Logging(system, this.getClass) final override def beforeAll { atStartup() } + protected def atStartup() {} + final override def afterAll { beforeTermination() shutdown() afterTermination() } - protected def atStartup() {} - protected def beforeTermination() {} protected def afterTermination() {} + def this(config: Config) = this(ActorSystem( + AkkaSpec.getCallerName(getClass), + ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) + + def this(s: String) = this(ConfigFactory.parseString(s)) + + def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) + + def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf)) + def spawn(dispatcherId: String = Dispatchers.DefaultDispatcherId)(body: ⇒ Unit): Unit = Future(body)(system.dispatchers.lookup(dispatcherId)) @@ -110,6 +101,7 @@ abstract class AkkaSpec(_system: ActorSystem) if (!sys.log.isDebugEnabled) { def mute(clazz: Class[_]): Unit = sys.eventStream.publish(Mute(DeadLettersFilter(clazz)(occurrences = Int.MaxValue))) + if (messageClasses.isEmpty) mute(classOf[AnyRef]) else messageClasses foreach mute } From e15ba592130a8ac0aebb19215b719b66451e6c0b Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 13:06:35 +0100 Subject: [PATCH 39/54] Improve test formatting --- .../sentinel/pipeline/ConsumerStage.scala | 4 +- .../sentinel/pipeline/ProducerStage.scala | 3 +- .../gideondk/sentinel/ClientStageSpec.scala | 16 +-- .../gideondk/sentinel/ConsumerStageSpec.scala | 39 ++++--- .../nl/gideondk/sentinel/ProcessorSpec.scala | 11 +- .../gideondk/sentinel/ProducerStageSpec.scala | 45 ++++--- .../nl/gideondk/sentinel/TestHelpers.scala | 110 ++---------------- 7 files changed, 81 insertions(+), 147 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala index 215ba68..6ea7a54 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala @@ -7,12 +7,12 @@ import nl.gideondk.sentinel.protocol.ConsumerAction._ import nl.gideondk.sentinel.protocol._ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { - - val shape = new FanOutShape2(eventIn, actionOut, signalOut) private val eventIn = Inlet[Evt]("ConsumerStage.Event.In") private val actionOut = Outlet[(Evt, ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") private val signalOut = Outlet[Event[Evt]]("ConsumerStage.Signal.Out") + val shape = new FanOutShape2(eventIn, actionOut, signalOut) + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { private var chunkSource: SubSourceOutlet[Evt] = _ diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala index cd8550b..55217b3 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala @@ -6,10 +6,11 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import nl.gideondk.sentinel.protocol.{ Command, SingularCommand, StreamingCommand } class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { - val shape = new FlowShape(in, out) private val in = Inlet[Command[Out]]("ProducerStage.Command.In") private val out = Outlet[Out]("ProducerStage.Command.Out") + val shape = new FlowShape(in, out) + override def createLogic(effectiveAttributes: Attributes) = new GraphStageLogic(shape) { var streaming = false var closeAfterCompletion = false diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala index 914a41b..554cc0f 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -1,16 +1,16 @@ package nl.gideondk.sentinel import akka.actor.ActorSystem -import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp} -import akka.stream.{ActorMaterializer, ClosedShape, OverflowStrategy} +import akka.stream.scaladsl.{ Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp } +import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } import akka.util.ByteString -import nl.gideondk.sentinel.client.{ClientStage, Host} +import nl.gideondk.sentinel.client.{ ClientStage, Host } import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ import scala.concurrent._ import scala.concurrent.duration._ -import scala.util.{Failure, Success, Try} +import scala.util.{ Failure, Success, Try } object ClientStageSpec { def mockServer(system: ActorSystem, port: Int): Unit = { @@ -35,7 +35,7 @@ object ClientStageSpec { } } -class ClientStageSpec extends AkkaSpec { +class ClientStageSpec extends SentinelSpec(ActorSystem()) { import ClientStageSpec._ @@ -69,8 +69,10 @@ class ClientStageSpec extends AkkaSpec { messages.foreach(sourceQueue.offer) val results = Future.sequence(messages.map(_._2.future)) - Await.result(results, 1 second) should be(messages.map(x ⇒ SingularEvent(x._1.payload))) - sourceQueue.complete() + whenReady(results) { result ⇒ + sourceQueue.complete() + result should equal(messages.map(x ⇒ SingularEvent(x._1.payload))) + } } } } diff --git a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala index 7c5cffc..4918241 100644 --- a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala @@ -1,8 +1,9 @@ package nl.gideondk.sentinel -import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} -import akka.stream.testkit.{TestPublisher, TestSubscriber} -import akka.stream.{ActorMaterializer, Attributes, ClosedShape} +import akka.actor.ActorSystem +import akka.stream.scaladsl.{ Flow, GraphDSL, RunnableGraph, Sink, Source } +import akka.stream.testkit.{ TestPublisher, TestSubscriber } +import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } import nl.gideondk.sentinel.pipeline.ConsumerStage import nl.gideondk.sentinel.protocol.SimpleMessage._ import nl.gideondk.sentinel.protocol._ @@ -10,10 +11,10 @@ import nl.gideondk.sentinel.protocol._ import scala.concurrent._ import scala.concurrent.duration._ -class ConsumerStageSpec extends AkkaSpec { +class ConsumerStageSpec extends SentinelSpec(ActorSystem()) { val eventFlow = Flow[Event[SimpleMessageFormat]].flatMapConcat { - case x: StreamEvent[SimpleMessageFormat] ⇒ x.chunks + case x: StreamEvent[SimpleMessageFormat] ⇒ x.chunks case x: SingularEvent[SimpleMessageFormat] ⇒ Source.single(x.data) } @@ -36,7 +37,9 @@ class ConsumerStageSpec extends AkkaSpec { ClosedShape }) - Await.result(g.run(), 300.millis) should be(SingularEvent(SimpleReply(""))) + whenReady(g.run()) { result ⇒ + result should equal(SingularEvent(SimpleReply(""))) + } } "handle multiple incoming events" in { @@ -55,7 +58,9 @@ class ConsumerStageSpec extends AkkaSpec { ClosedShape }) - Await.result(g.run(), 300.millis) should be(Vector(SingularEvent(SimpleReply("A")), SingularEvent(SimpleReply("B")), SingularEvent(SimpleReply("C")))) + whenReady(g.run()) { result ⇒ + result should equal(Seq(SingularEvent(SimpleReply("A")), SingularEvent(SimpleReply("B")), SingularEvent(SimpleReply("C")))) + } } "not lose demand that comes in while handling incoming streams" in { @@ -149,7 +154,9 @@ class ConsumerStageSpec extends AkkaSpec { ClosedShape }) - Await.result(g.run(), 300.millis) should be(Seq(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"))) + whenReady(g.run()) { result ⇒ + result should equal(Seq(SimpleStreamChunk("A"), SimpleStreamChunk("B"), SimpleStreamChunk("C"))) + } } "correctly output multiple stream responses" in { @@ -171,7 +178,9 @@ class ConsumerStageSpec extends AkkaSpec { ClosedShape }) - Await.result(g.run(), 300.millis) should be(items.filter(_.payload.length > 0)) + whenReady(g.run()) { result ⇒ + result should equal(items.filter(_.payload.length > 0)) + } } "correctly handle asymmetrical message types" in { @@ -196,13 +205,15 @@ class ConsumerStageSpec extends AkkaSpec { ClosedShape }) - Await.result(g.run(), 300.millis) should be(a ++ b.filter(_.payload.length > 0) ++ c) + whenReady(g.run()) { result ⇒ + result should equal(a ++ b.filter(_.payload.length > 0) ++ c) + } } "correctly output signals on event-out pipe" in { implicit val materializer = ActorMaterializer() - val a = List(SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, "")) + val items = List(SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, "")) val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[(SimpleMessageFormat, ProducerAction[SimpleMessageFormat, SimpleMessageFormat])]) { implicit b ⇒ sink ⇒ @@ -210,14 +221,16 @@ class ConsumerStageSpec extends AkkaSpec { val s = b add stage - Source(a) ~> s.in + Source(items) ~> s.in s.out1 ~> Sink.ignore s.out0 ~> sink.in ClosedShape }) - Await.result(g.run(), 300.millis).map(_._1) should be(a) + whenReady(g.run()) { result ⇒ + result.map(_._1) should equal(items) + } } } } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala index 25c0b99..a16dd33 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala @@ -1,14 +1,15 @@ package nl.gideondk.sentinel -import akka.stream.scaladsl.{GraphDSL, RunnableGraph, Sink, Source} -import akka.stream.{ActorMaterializer, ClosedShape} +import akka.actor.ActorSystem +import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source } +import akka.stream.{ ActorMaterializer, ClosedShape } import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ import scala.concurrent.Await import scala.concurrent.duration._ -class ProcessorSpec extends AkkaSpec { +class ProcessorSpec extends SentinelSpec(ActorSystem()) { val processor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleHandler, 1) val serverProcessor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleServerHandler, 1, true) @@ -43,7 +44,9 @@ class ProcessorSpec extends AkkaSpec { ClosedShape }) - Await.result(flow.run(), 5 seconds) shouldBe Vector(SingularEvent(SimpleReply("PONG")), SingularEvent(SimpleReply("PONG"))) + whenReady(flow.run()) { result ⇒ + result should equal(Seq(SingularEvent(SimpleReply("PONG")), SingularEvent(SimpleReply("PONG")))) + } } } } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala index a82d5f3..6028227 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala @@ -1,18 +1,19 @@ package nl.gideondk.sentinel +import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import nl.gideondk.sentinel.pipeline.ProducerStage -import nl.gideondk.sentinel.protocol.{SimpleMessageFormat, SimpleReply, SingularCommand, StreamingCommand} +import nl.gideondk.sentinel.protocol.{ SimpleMessageFormat, SimpleReply, SingularCommand, StreamingCommand } import scala.concurrent._ import scala.concurrent.duration._ object ProducerStageSpec { - def stage() = new ProducerStage[SimpleMessageFormat, SimpleMessageFormat]() + val stage = new ProducerStage[SimpleMessageFormat, SimpleMessageFormat]() } -class ProducerStageSpec extends AkkaSpec { +class ProducerStageSpec extends SentinelSpec(ActorSystem()) { import ProducerStageSpec._ @@ -21,25 +22,31 @@ class ProducerStageSpec extends AkkaSpec { implicit val materializer = ActorMaterializer() val command = SingularCommand[SimpleMessageFormat](SimpleReply("A")) - val result = Await.result(Source(List(command)).via(stage()).runWith(Sink.seq), 5 seconds) + val singularResult = Source(List(command)).via(stage).runWith(Sink.seq) - result shouldBe Vector(SimpleReply("A")) + Await.result(singularResult, 5 seconds) should equal(Seq(SimpleReply("A"))) - val multiResult = Await.result(Source(List(command, command, command)).via(stage()).runWith(Sink.seq), 5 seconds) - multiResult shouldBe Vector(SimpleReply("A"), SimpleReply("A"), SimpleReply("A")) + // val multiResult = Source(List(command, command, command)).via(stage).runWith(Sink.seq) + // whenReady(multiResult) { result ⇒ + // result should equal(Seq(SimpleReply("A"), SimpleReply("A"), SimpleReply("A"))) + // } } - "handle outgoing streams" in { - implicit val materializer = ActorMaterializer() - - val items = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"), SimpleReply("D")) - val command = StreamingCommand[SimpleMessageFormat](Source(items)) - - val result = Await.result(Source(List(command)).via(stage()).runWith(Sink.seq), 5 seconds) - result shouldBe items - - val multiResult = Await.result(Source(List(command, command, command)).via(stage()).runWith(Sink.seq), 5 seconds) - multiResult shouldBe (items ++ items ++ items) - } + // "handle outgoing streams" in { + // implicit val materializer = ActorMaterializer() + // + // val items = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"), SimpleReply("D")) + // val command = StreamingCommand[SimpleMessageFormat](Source(items)) + // + // val singularResult = Source(List(command)).via(stage).runWith(Sink.seq) + // whenReady(singularResult) { result ⇒ + // result should equal(items) + // } + // + // val multiResult = Source(List(command)).via(stage).runWith(Sink.seq) + // whenReady(multiResult) { result ⇒ + // result should equal(items ++ items ++ items) + // } + // } } } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index 97a6c4a..ab3360e 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -3,119 +3,27 @@ package nl.gideondk.sentinel import java.util.concurrent.atomic.AtomicInteger import akka.actor.ActorSystem -import akka.dispatch.Dispatchers -import akka.event.{Logging, LoggingAdapter} -import akka.testkit.TestEvent._ +import akka.event.{ Logging, LoggingAdapter } import akka.testkit._ -import com.typesafe.config.{Config, ConfigFactory} -import org.scalactic.{Constraint, ConversionCheckedTripleEquals} import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.Span -import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} +import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import scala.concurrent.Future -import scala.concurrent.duration._ import scala.language.postfixOps -object AkkaSpec { - val testConf: Config = ConfigFactory.parseString( - """ - akka { - loggers = ["akka.testkit.TestEventListener"] - loglevel = "WARNING" - stdout-loglevel = "WARNING" - actor { - default-dispatcher { - executor = "fork-join-executor" - fork-join-executor { - parallelism-min = 8 - parallelism-factor = 2.0 - parallelism-max = 8 - } - } - } - } - """) - - def mapToConfig(map: Map[String, Any]): Config = { - import scala.collection.JavaConverters._ - ConfigFactory.parseMap(map.asJava) - } - - def getCallerName(clazz: Class[_]): String = { - val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1) - .dropWhile(_ matches "(java.lang.Thread|.*AkkaSpec.?$|.*StreamSpec.?$)") - val reduced = s.lastIndexWhere(_ == clazz.getName) match { - case -1 ⇒ s - case z ⇒ s drop (z + 1) - } - reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") - } - -} - - -abstract class AkkaSpec(_system: ActorSystem) - extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll - with ConversionCheckedTripleEquals with ScalaFutures { - - implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(100, org.scalatest.time.Millis)) +abstract class SentinelSpec(_system: ActorSystem) + extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures { + implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(500, org.scalatest.time.Millis)) + override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true implicit val ec = _system.dispatcher - override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true val log: LoggingAdapter = Logging(system, this.getClass) - final override def beforeAll { - atStartup() - } - - protected def atStartup() {} - - final override def afterAll { - beforeTermination() - shutdown() - afterTermination() + override protected def afterAll(): Unit = { + super.afterAll() + TestKit.shutdownActorSystem(system) } - - protected def beforeTermination() {} - - protected def afterTermination() {} - - def this(config: Config) = this(ActorSystem( - AkkaSpec.getCallerName(getClass), - ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) - - def this(s: String) = this(ConfigFactory.parseString(s)) - - def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) - - def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf)) - - def spawn(dispatcherId: String = Dispatchers.DefaultDispatcherId)(body: ⇒ Unit): Unit = - Future(body)(system.dispatchers.lookup(dispatcherId)) - - def expectedTestDuration: FiniteDuration = 60 seconds - - def muteDeadLetters(messageClasses: Class[_]*)(sys: ActorSystem = system): Unit = - if (!sys.log.isDebugEnabled) { - def mute(clazz: Class[_]): Unit = - sys.eventStream.publish(Mute(DeadLettersFilter(clazz)(occurrences = Int.MaxValue))) - - if (messageClasses.isEmpty) mute(classOf[AnyRef]) - else messageClasses foreach mute - } - - // for ScalaTest === compare of Class objects - implicit def classEqualityConstraint[A, B]: Constraint[Class[A], Class[B]] = - new Constraint[Class[A], Class[B]] { - def areEqual(a: Class[A], b: Class[B]) = a == b - } - - implicit def setEqualityConstraint[A, T <: Set[_ <: A]]: Constraint[Set[A], T] = - new Constraint[Set[A], T] { - def areEqual(a: Set[A], b: T) = a == b - } } object TestHelpers { From 675c3c09b7d6d5bae8ed1c261d22c957aed48a0e Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Sun, 25 Dec 2016 15:35:26 +0100 Subject: [PATCH 40/54] Increase timeout in `PatienceConfig` --- .../gideondk/sentinel/ProducerStageSpec.scala | 45 ++++++++++--------- .../nl/gideondk/sentinel/TestHelpers.scala | 2 +- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala index 6028227..e49aaa0 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProducerStageSpec.scala @@ -23,30 +23,31 @@ class ProducerStageSpec extends SentinelSpec(ActorSystem()) { val command = SingularCommand[SimpleMessageFormat](SimpleReply("A")) val singularResult = Source(List(command)).via(stage).runWith(Sink.seq) + whenReady(singularResult) { result ⇒ + result should equal(Seq(SimpleReply("A"))) + } + + val multiResult = Source(List(command, command, command)).via(stage).runWith(Sink.seq) + whenReady(multiResult) { result ⇒ + result should equal(Seq(SimpleReply("A"), SimpleReply("A"), SimpleReply("A"))) + } + } - Await.result(singularResult, 5 seconds) should equal(Seq(SimpleReply("A"))) + "handle outgoing streams" in { + implicit val materializer = ActorMaterializer() - // val multiResult = Source(List(command, command, command)).via(stage).runWith(Sink.seq) - // whenReady(multiResult) { result ⇒ - // result should equal(Seq(SimpleReply("A"), SimpleReply("A"), SimpleReply("A"))) - // } - } + val items = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"), SimpleReply("D")) + val command = StreamingCommand[SimpleMessageFormat](Source(items)) - // "handle outgoing streams" in { - // implicit val materializer = ActorMaterializer() - // - // val items = List(SimpleReply("A"), SimpleReply("B"), SimpleReply("C"), SimpleReply("D")) - // val command = StreamingCommand[SimpleMessageFormat](Source(items)) - // - // val singularResult = Source(List(command)).via(stage).runWith(Sink.seq) - // whenReady(singularResult) { result ⇒ - // result should equal(items) - // } - // - // val multiResult = Source(List(command)).via(stage).runWith(Sink.seq) - // whenReady(multiResult) { result ⇒ - // result should equal(items ++ items ++ items) - // } - // } + val singularResult = Source(List(command)).via(stage).runWith(Sink.seq) + whenReady(singularResult) { result ⇒ + result should equal(items) + } + + val multiResult = Source(List(command, command, command)).via(stage).runWith(Sink.seq) + whenReady(multiResult) { result ⇒ + result should equal((items ++ items ++ items)) + } + } } } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index ab3360e..40b014a 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -14,7 +14,7 @@ import scala.language.postfixOps abstract class SentinelSpec(_system: ActorSystem) extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures { - implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(500, org.scalatest.time.Millis)) + implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(1500, org.scalatest.time.Millis)) override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true implicit val ec = _system.dispatcher From 598654c679165d7dbbd6087736925c2e3508cf49 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Wed, 4 Jan 2017 13:52:10 +0100 Subject: [PATCH 41/54] Improve tests --- src/main/resources/reference.conf | 28 +++--- .../scala/nl/gideondk/sentinel/Config.scala | 4 +- .../nl/gideondk/sentinel/client/Client.scala | 88 +++++++++++++++---- .../sentinel/client/ClientStage.scala | 65 ++++++++++---- .../sentinel/pipeline/ConsumerStage.scala | 2 +- .../sentinel/pipeline/ProducerStage.scala | 2 +- .../gideondk/sentinel/protocol/Action.scala | 1 - .../gideondk/sentinel/protocol/Command.scala | 73 ++++++--------- .../gideondk/sentinel/protocol/Protocol.scala | 16 ---- .../nl/gideondk/sentinel/ClientSpec.scala | 32 +++++++ .../gideondk/sentinel/ClientStageSpec.scala | 62 +++++++++++-- .../nl/gideondk/sentinel/TestHelpers.scala | 41 ++------- 12 files changed, 258 insertions(+), 156 deletions(-) delete mode 100644 src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/ClientSpec.scala diff --git a/src/main/resources/reference.conf b/src/main/resources/reference.conf index 2794e48..087b1fe 100644 --- a/src/main/resources/reference.conf +++ b/src/main/resources/reference.conf @@ -1,15 +1,15 @@ -nl { - gideondk { - sentinel { - sentinel-dispatcher { - mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox" - } - sentinel-antenna-dispatcher { - mailbox-type = "nl.gideondk.sentinel.AntennaMailbox" - } - sentinel-consumer-dispatcher { - mailbox-type = "nl.gideondk.sentinel.rx.ConsumerMailbox" - } - } - } +nl.gideondk.sentinel { + client { + host { + max-connections = 32 + max-failures = 16 + failure-recovery-duration = 4 seconds + } + input-buffer-size = 1024 + parallelism = 32 + } + + pipeline { + parallelism = 32 + } } diff --git a/src/main/scala/nl/gideondk/sentinel/Config.scala b/src/main/scala/nl/gideondk/sentinel/Config.scala index 657de77..dd9f763 100644 --- a/src/main/scala/nl/gideondk/sentinel/Config.scala +++ b/src/main/scala/nl/gideondk/sentinel/Config.scala @@ -3,9 +3,7 @@ package nl.gideondk.sentinel import com.typesafe.config.ConfigFactory object Config { - private lazy val config = ConfigFactory.load().getConfig("sentinel") + private lazy val config = ConfigFactory.load().getConfig("nl.gideondk.sentinel") val producerParallelism = config.getInt("pipeline.parallelism") - val framesize = config.getInt("pipeline.framesize") - val buffersize = config.getInt("pipeline.buffersize") } diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index 0620560..3519348 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -4,12 +4,13 @@ import java.util.concurrent.TimeUnit import akka.NotUsed import akka.actor.ActorSystem +import akka.event.Logging import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, RunnableGraph, Sink, Source } +import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, RunnableGraph, Sink, Source } import akka.util.ByteString import nl.gideondk.sentinel.Config import nl.gideondk.sentinel.client.Client._ -import nl.gideondk.sentinel.client.ClientStage.{ ConnectionEvent, _ } +import nl.gideondk.sentinel.client.ClientStage.{ HostEvent, _ } import nl.gideondk.sentinel.pipeline.{ Processor, Resolver } import nl.gideondk.sentinel.protocol._ @@ -21,18 +22,19 @@ object ClientConfig { import com.typesafe.config.ConfigFactory - private lazy val config = ConfigFactory.load().getConfig("sentinel") + private lazy val config = ConfigFactory.load().getConfig("nl.gideondk.sentinel") val connectionsPerHost = config.getInt("client.host.max-connections") val maxFailuresPerHost = config.getInt("client.host.max-failures") val failureRecoveryPeriod = Duration(config.getDuration("client.host.failure-recovery-duration").toNanos, TimeUnit.NANOSECONDS) + val clientParallelism = config.getInt("client.parallelism") val inputBufferSize = config.getInt("client.input-buffer-size") } object Client { - def apply[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], + def apply[Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) @@ -43,18 +45,40 @@ object Client { shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) - new Client(Source(hosts.map(LinkUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) + new Client(Source(hosts.map(HostUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) } - def flow[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], - shouldReact: Boolean = false, inputOverflowStrategy: OverflowStrategy, - protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + def flow[Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], + shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) - val client = new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) - Flow[Command[Cmd]].mapAsync(1)(cmd ⇒ client.send(cmd)) + type Context = Promise[Event[Evt]] + + val eventHandler = Sink.foreach[(Try[Event[Evt]], Promise[Event[Evt]])] { + case (evt, context) ⇒ context.complete(evt) + } + + Flow.fromGraph(GraphDSL.create(hosts) { implicit b ⇒ + connections ⇒ + import GraphDSL.Implicits._ + + val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, true, processor, protocol.reversed)) + connections ~> s.in0 + + val input = b add Flow[Command[Cmd]].map(x ⇒ (x, Promise[Event[Evt]]())) + val broadcast = b add Broadcast[(Command[Cmd], Promise[Event[Evt]])](2) + + val output = b add Flow[(Command[Cmd], Promise[Event[Evt]])].mapAsync(ClientConfig.clientParallelism)(_._2.future).withAttributes(Attributes.logLevels(onElement = Logging.WarningLevel)) + + s.out ~> eventHandler + input ~> broadcast + broadcast ~> output + broadcast ~> s.in1 + + FlowShape(input.in, output.out) + }) } - def rawFlow[Context, Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], resolver: Resolver[Evt], + def rawFlow[Context, Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) @@ -63,7 +87,7 @@ object Client { connections ⇒ import GraphDSL.Implicits._ - val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, processor, protocol.reversed)) + val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, true, processor, protocol.reversed)) connections ~> s.in0 FlowShape(s.in1, s.out) }) @@ -74,9 +98,14 @@ object Client { case class InputQueueClosed() extends Exception with ClientException case class InputQueueUnavailable() extends Exception with ClientException + + case class IncorrectEventType[A](event: A) extends Exception with ClientException + + case class EventException[A](cause: A) extends Throwable + } -class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], +class Client[Cmd, Evt](hosts: Source[HostEvent, NotUsed], connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, inputBufferSize: Int, inputOverflowStrategy: OverflowStrategy, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) { @@ -91,7 +120,7 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], source ⇒ import GraphDSL.Implicits._ - val s = b.add(new ClientStage[Context, Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, processor, protocol)) + val s = b.add(new ClientStage[Context, Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, true, processor, protocol)) b.add(hosts) ~> s.in0 source.out ~> s.in1 @@ -103,7 +132,7 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], val input = g.run() - def send(command: Command[Cmd])(implicit ec: ExecutionContext): Future[Event[Evt]] = { + private def send(command: Command[Cmd])(implicit ec: ExecutionContext): Future[Event[Evt]] = { val context = Promise[Event[Evt]]() input.offer((command, context)).flatMap { case QueueOfferResult.Dropped ⇒ Future.failed(InputQueueUnavailable()) @@ -113,4 +142,33 @@ class Client[Cmd, Evt](hosts: Source[ConnectionEvent, NotUsed], } } + def ask(command: Cmd)(implicit ec: ExecutionContext): Future[Evt] = send(SingularCommand(command)) flatMap { + case SingularEvent(x) ⇒ Future(x) + case SingularErrorEvent(x) ⇒ Future.failed(EventException(x)) + case x ⇒ Future.failed(IncorrectEventType(x)) + } + + def askStream(command: Cmd)(implicit ec: ExecutionContext): Future[Source[Evt, Any]] = send(SingularCommand(command)) flatMap { + case StreamEvent(x) ⇒ Future(x) + case SingularErrorEvent(x) ⇒ Future.failed(EventException(x)) + case x ⇒ Future.failed(IncorrectEventType(x)) + } + + def sendStream(stream: Source[Cmd, Any])(implicit ec: ExecutionContext): Future[Evt] = send(StreamingCommand(stream)) flatMap { + case SingularEvent(x) ⇒ Future(x) + case SingularErrorEvent(x) ⇒ Future.failed(EventException(x)) + case x ⇒ Future.failed(IncorrectEventType(x)) + } + + def sendStream(command: Cmd, stream: Source[Cmd, Any])(implicit ec: ExecutionContext): Future[Evt] = send(StreamingCommand(Source.single(command) ++ stream)) flatMap { + case SingularEvent(x) ⇒ Future(x) + case SingularErrorEvent(x) ⇒ Future.failed(EventException(x)) + case x ⇒ Future.failed(IncorrectEventType(x)) + } + + def react(stream: Source[Cmd, Any])(implicit ec: ExecutionContext): Future[Source[Evt, Any]] = send(StreamingCommand(stream)) flatMap { + case StreamEvent(x) ⇒ Future(x) + case SingularErrorEvent(x) ⇒ Future.failed(EventException(x)) + case x ⇒ Future.failed(IncorrectEventType(x)) + } } diff --git a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala index 7e5a066..251e186 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala @@ -20,7 +20,7 @@ object ClientStage { trait ConnectionClosedException - trait ConnectionEvent { + trait HostEvent { def host: Host } @@ -28,9 +28,9 @@ object ClientStage { case class ConnectionClosedWithoutReasonException(message: String) extends Exception(message) with ConnectionClosedException - case class LinkUp(host: Host) extends ConnectionEvent + case class HostUp(host: Host) extends HostEvent - case class LinkDown(host: Host) extends ConnectionEvent + case class HostDown(host: Host) extends HostEvent case object NoConnectionsAvailableException extends Exception @@ -38,9 +38,9 @@ object ClientStage { import nl.gideondk.sentinel.client.ClientStage._ -class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[ConnectionEvent, (Command[Cmd], Context), (Try[Event[Evt]], Context)]] { +class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, finishGracefully: Boolean, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[HostEvent, (Command[Cmd], Context), (Try[Event[Evt]], Context)]] { - val connectionEventIn = Inlet[ConnectionEvent]("ClientStage.ConnectionEvent.In") + val connectionEventIn = Inlet[HostEvent]("ClientStage.ConnectionEvent.In") val commandIn = Inlet[(Command[Cmd], Context)]("ClientStage.Command.In") val eventOut = Outlet[(Try[Event[Evt]], Context)]("ClientStage.Event.Out") @@ -50,6 +50,7 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer private val connectionPool = mutable.Queue.empty[Connection] private val failures = mutable.Queue.empty[(Try[Event[Evt]], Context)] private var antennaId = 0 + private var closingOnCommandIn = false override def preStart() = { pull(connectionEventIn) @@ -58,7 +59,7 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer } def nextId() = { - antennaId += 1; + antennaId += 1 antennaId } @@ -109,7 +110,6 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer val host = connection.host val totalFailure = hostFailures.getOrElse(host, 0) + 1 hostFailures(host) = totalFailure - system.log.warning(s"Connection ${connection.connectionId} to $host failed due to ${cause.getMessage}") if (hostFailures(host) >= maximumFailuresPerHost) { @@ -146,8 +146,8 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer setHandler(connectionEventIn, new InHandler { override def onPush() = { grab(connectionEventIn) match { - case LinkUp(connection) ⇒ addHost(connection) - case LinkDown(connection) ⇒ removeHost(connection) + case HostUp(connection) ⇒ addHost(connection) + case HostDown(connection) ⇒ removeHost(connection) } pull(connectionEventIn) } @@ -161,7 +161,15 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer setHandler(commandIn, new InHandler { override def onPush() = pullCommand(shouldInitializeConnection = true) - override def onUpstreamFinish() = completeStage() + override def onUpstreamFinish() = { + if (finishGracefully) { + closingOnCommandIn = true + connectionPool.foreach(_.requestClose()) + } else { + connectionPool.foreach(_.close(None)) + completeStage() + } + } override def onUpstreamFailure(ex: Throwable) = failStage(throw new IllegalStateException(s"Requests stream failed", ex)) @@ -175,13 +183,16 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer .dequeueFirst(_.canBePulledForEvent) .foreach(connection ⇒ { if (isAvailable(eventOut)) { - push(eventOut, connection.pullEvent) + val event = connection.pullEvent + push(eventOut, event) } connectionPool.enqueue(connection) }) } - override def onDownstreamFinish() = completeStage() + override def onDownstreamFinish() = { + completeStage() + } }) override def onTimer(timerKey: Any) = hostFailures.clear() @@ -191,6 +202,7 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer private val connectionEventIn = new SubSinkInlet[Event[Evt]](s"Connection.[$host].[$connectionId].in") private val connectionCommandOut = new SubSourceOutlet[Command[Cmd]](s"Connection.[$host].[$connectionId].out") private val contexts = mutable.Queue.empty[Context] + private var closing = false def canBePushedForCommand = connectionCommandOut.isAvailable @@ -204,14 +216,27 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer def pullEvent() = { val event = connectionEventIn.grab() val context = contexts.dequeue() - connectionEventIn.pull() - (Success(event), context) + + if (closing) { + close(None) + (Success(event), context) + } else { + connectionEventIn.pull() + (Success(event), context) + } + } + + def requestClose() = { + closing = true + if (contexts.length == 0) { + close(None) + } } def close(cause: Option[Throwable]) = { val exception = cause match { - case Some(cause) ⇒ ConnectionClosedWithReasonException(s"Failure to process request to $host at antenna $connectionId", cause) - case None ⇒ ConnectionClosedWithoutReasonException(s"Failure to process request to $host antenna $connectionId") + case Some(cause) ⇒ ConnectionClosedWithReasonException(s"Failure to process request to $host at connection $connectionId", cause) + case None ⇒ ConnectionClosedWithoutReasonException(s"Failure to process request to $host connection $connectionId") } contexts.dequeueAll(_ ⇒ true).foreach(context ⇒ { @@ -228,7 +253,9 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer push(eventOut, connection.pullEvent) } - override def onUpstreamFinish() = removeConnection(connection, None) + override def onUpstreamFinish() = { + removeConnection(connection, None) + } override def onUpstreamFailure(reason: Throwable) = reason match { case t: TimeoutException ⇒ removeConnection(connection, Some(t)) @@ -239,7 +266,9 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer connectionCommandOut.setHandler(new OutHandler { override def onPull() = pullCommand(shouldInitializeConnection = true) - override def onDownstreamFinish() = () + override def onDownstreamFinish() = { + () + } }) RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒ diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala index 6ea7a54..7a204b8 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala @@ -77,7 +77,7 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut } def startStream(initialChunk: Option[Evt]): Unit = { - chunkSource = new SubSourceOutlet[Evt]("ChunkSource") + chunkSource = new SubSourceOutlet[Evt]("ConsumerStage.Event.In.ChunkSubStream") chunkSource.setHandler(pullThroughHandler) setHandler(eventIn, substreamHandler) setHandler(signalOut, substreamHandler) diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala index 55217b3..c2a3555 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala @@ -36,7 +36,7 @@ class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { def stream(outStream: Source[Out, Any]): Unit = { streaming = true - val sinkIn = new SubSinkInlet[Out]("RenderingSink") + val sinkIn = new SubSinkInlet[Out]("ProducerStage.Command.Out.ChunkSubStream") sinkIn.setHandler(new InHandler { override def onPush(): Unit = push(out, sinkIn.grab()) diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala index 2e2edf8..fa27d76 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala @@ -65,7 +65,6 @@ object ConsumerAction { case object ConsumeChunkAndEndStream extends ConsumerAction case object Ignore extends ConsumerAction - } case class ConsumerActionAndData[Evt](action: ConsumerAction, data: Evt) \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala index 37028d3..1654ee0 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala @@ -31,55 +31,32 @@ case class SingularCommand[Out](payload: Out) extends Command[Out] case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] -trait ServerCommand[Out, In] - -trait ServerMetric - -object Command { - - // case class Ask[Out](payload: Out) extends Command[Out] - - object Ask - - // case class Tell[Out](payload: Out) extends Command[Out] - // - // case class AskStream[Out](payload: Out) extends Command[Out] - // - // case class SendStream[Out](stream: Source[Out, Any]) extends Command[Out] - -} - -object ServerCommand { - - case class AskAll[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] - - case class AskAllHosts[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] - - case class AskAny[Cmd, Evt](payload: Cmd, promise: Promise[Evt]) extends ServerCommand[Cmd, Evt] - -} - -object ServerMetric { - - case object ConnectedSockets extends ServerMetric - - case object ConnectedHosts extends ServerMetric - -} - -//object Reply { +//trait ServerCommand[Out, In] +// +//trait ServerMetric +// +//object ServerCommand { +// +// case class AskAll[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] // -// case class Response[Cmd](payload: Cmd) extends Reply[Cmd] +// case class AskAllHosts[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] // -// case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] +// case class AskAny[Cmd, Evt](payload: Cmd, promise: Promise[Evt]) extends ServerCommand[Cmd, Evt] // //} - -object Management { - - trait ManagementMessage - - case class RegisterTcpHandler(h: ActorRef) extends ManagementMessage - -} - +// +//object ServerMetric { +// +// case object ConnectedSockets extends ServerMetric +// +// case object ConnectedHosts extends ServerMetric +// +//} +// +////object Reply { +//// +//// case class Response[Cmd](payload: Cmd) extends Reply[Cmd] +//// +//// case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] +//// +////} diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala deleted file mode 100644 index eb416db..0000000 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Protocol.scala +++ /dev/null @@ -1,16 +0,0 @@ -package nl.gideondk.sentinel.protocol - -import akka.stream.scaladsl.BidiFlow -import akka.stream.{ BidiShape, Graph } - -import scala.concurrent.Promise - -case class RequestContext[Cmd, Evt](request: Cmd, responsePromise: Promise[Evt]) - -object Protocol { - - implicit class ProtocolChaining[IT, OT, IB, OB, Mat](bf: BidiFlow[IT, OT, IB, OB, Mat]) { - def >>[NextOT, NextIB, Mat2](bidi: Graph[BidiShape[OT, NextOT, NextIB, IB], Mat2]) = bf.atop(bidi) - } - -} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala new file mode 100644 index 0000000..b1e1eaa --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala @@ -0,0 +1,32 @@ +package nl.gideondk.sentinel + +import akka.actor.ActorSystem +import akka.stream.ActorMaterializer +import akka.stream.scaladsl.{ Sink, Source } +import nl.gideondk.sentinel.client.{ Client, ClientStage, Host } +import nl.gideondk.sentinel.protocol._ + +import scala.concurrent.Promise +import scala.util.Try + +class ClientSpec extends SentinelSpec(ActorSystem()) { + "a Client" should { + "keep message order intact" in { + val port = TestHelpers.portNumber.incrementAndGet() + val server = ClientStageSpec.mockServer(system, port) + implicit val materializer = ActorMaterializer() + + val numberOfMessages = 100 + + val messages = (for (i ← 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)))).toList + val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) ⇒ context.complete(event) } + + val client = Client.flow(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, SimpleMessage.protocol) + val results = Source(messages).via(client).runWith(Sink.seq) + + whenReady(results) { result ⇒ + result should equal(messages.map(x ⇒ SingularEvent(x.payload))) + } + } + } +} \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala index 554cc0f..8ed5074 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -4,6 +4,7 @@ import akka.actor.ActorSystem import akka.stream.scaladsl.{ Flow, GraphDSL, RunnableGraph, Sink, Source, Tcp } import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } import akka.util.ByteString +import nl.gideondk.sentinel.client.ClientStage.{ HostEvent, NoConnectionsAvailableException } import nl.gideondk.sentinel.client.{ ClientStage, Host } import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ @@ -27,12 +28,14 @@ object ClientStageSpec { binding.onComplete { case Success(b) ⇒ - println("Server started, listening on: " + b.localAddress) case Failure(e) ⇒ - println(s"Server could not bind to localhost:$port: ${e.getMessage}") system.terminate() } } + + def createCommand(s: String) = { + (SingularCommand[SimpleMessageFormat](SimpleReply(s)), Promise[Event[SimpleMessageFormat]]()) + } } class ClientStageSpec extends SentinelSpec(ActorSystem()) { @@ -41,23 +44,25 @@ class ClientStageSpec extends SentinelSpec(ActorSystem()) { "The ClientStage" should { "keep message order intact" in { - val server = mockServer(system, 9000) + val port = TestHelpers.portNumber.incrementAndGet() + val server = mockServer(system, port) + implicit val materializer = ActorMaterializer() type Context = Promise[Event[SimpleMessageFormat]] val numberOfMessages = 1024 - val messages = (for (i ← 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)), Promise[Event[SimpleMessageFormat]]())).toList - val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) ⇒ context.complete(event) } + val messages = (for (i ← 0 to numberOfMessages) yield (createCommand(i.toString))).toList + val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Context)] { case (event, context) ⇒ context.complete(event) } val g = RunnableGraph.fromGraph(GraphDSL.create(Source.queue[(Command[SimpleMessageFormat], Promise[Event[SimpleMessageFormat]])](numberOfMessages, OverflowStrategy.backpressure)) { implicit b ⇒ source ⇒ import GraphDSL.Implicits._ - val s = b.add(new ClientStage[Context, SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) + val s = b.add(new ClientStage[Context, SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, true, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) - Source.single(ClientStage.LinkUp(Host("localhost", 9000))) ~> s.in0 + Source.single(ClientStage.HostUp(Host("localhost", port))) ~> s.in0 source.out ~> s.in1 s.out ~> b.add(sink) @@ -74,5 +79,48 @@ class ClientStageSpec extends SentinelSpec(ActorSystem()) { result should equal(messages.map(x ⇒ SingularEvent(x._1.payload))) } } + + "handle host up and down events" in { + val port = TestHelpers.portNumber.incrementAndGet() + val server = mockServer(system, port) + + implicit val materializer = ActorMaterializer() + + type Context = Promise[Event[SimpleMessageFormat]] + + val hostEvents = Source.queue[HostEvent](10, OverflowStrategy.backpressure) + val commands = Source.queue[(Command[SimpleMessageFormat], Context)](10, OverflowStrategy.backpressure) + val events = Sink.queue[(Try[Event[SimpleMessageFormat]], Context)] + + val (hostQueue, commandQueue, eventQueue) = RunnableGraph.fromGraph(GraphDSL.create(hostEvents, commands, events)((_, _, _)) { implicit b ⇒ + (hostEvents, commands, events) ⇒ + + import GraphDSL.Implicits._ + + val s = b.add(new ClientStage[Context, SimpleMessageFormat, SimpleMessageFormat](1, 8, 2 seconds, true, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) + + hostEvents ~> s.in0 + commands ~> s.in1 + + s.out ~> events + + ClosedShape + }).run() + + commandQueue.offer(createCommand("")) + Await.result(eventQueue.pull(), 5 seconds).get._1 shouldEqual Failure(NoConnectionsAvailableException) + + hostQueue.offer(ClientStage.HostUp(Host("localhost", port))) + Thread.sleep(200) + + commandQueue.offer(createCommand("")) + Await.result(eventQueue.pull(), 5 seconds).get._1 shouldEqual Success(SingularEvent(SimpleReply(""))) + + hostQueue.offer(ClientStage.HostDown(Host("localhost", port))) + Thread.sleep(200) + + commandQueue.offer(createCommand("")) + Await.result(eventQueue.pull(), 5 seconds).get._1 shouldEqual Failure(NoConnectionsAvailableException) + } } } diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index 40b014a..9f5dcf1 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -1,5 +1,6 @@ package nl.gideondk.sentinel +import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicInteger import akka.actor.ActorSystem @@ -9,6 +10,8 @@ import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.Span import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } +import scala.concurrent.{ Await, Future } +import scala.concurrent.duration.Duration import scala.language.postfixOps abstract class SentinelSpec(_system: ActorSystem) @@ -24,41 +27,15 @@ abstract class SentinelSpec(_system: ActorSystem) super.afterAll() TestKit.shutdownActorSystem(system) } -} - -object TestHelpers { - val portNumber = new AtomicInteger(10500) -} -object BenchmarkHelpers { - def timed(desc: String, n: Int)(benchmark: ⇒ Unit) = { - println("* " + desc) + def benchmark[A](f: Future[A], numberOfItems: Int, waitFor: Duration = Duration(10, TimeUnit.SECONDS)): Unit = { val t = System.currentTimeMillis - benchmark + Await.result(f, waitFor) val d = System.currentTimeMillis - t - - println("* - number of ops/s: " + n / (d / 1000.0) + "\n") - } - - def throughput(desc: String, size: Double, n: Int)(benchmark: ⇒ Unit) = { - println("* " + desc) - val t = System.currentTimeMillis - benchmark - val d = System.currentTimeMillis - t - - val totalSize = n * size - println("* - number of mb/s: " + totalSize / (d / 1000.0) + "\n") + println("Number of ops/s: " + numberOfItems / (d / 1000.0) + "\n") } } -object LargerPayloadTestHelper { - def randomBSForSize(size: Int) = { - implicit val be = java.nio.ByteOrder.BIG_ENDIAN - val stringB = new StringBuilder(size) - val paddingString = "abcdefghijklmnopqrs" - - while ((stringB.length + paddingString.length) < size) stringB.append(paddingString) - - stringB.toString() - } -} +object TestHelpers { + val portNumber = new AtomicInteger(10500) +} \ No newline at end of file From ef369547718893dd9b9fffcd86b6ae69b41cf199 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Wed, 4 Jan 2017 20:24:37 +0100 Subject: [PATCH 42/54] Add naive implementation for reconnection handling --- src/main/resources/reference.conf | 2 + .../nl/gideondk/sentinel/client/Client.scala | 41 +++++++++++++++---- .../sentinel/client/ClientStage.scala | 18 ++++++-- .../nl/gideondk/sentinel/ClientSpec.scala | 35 ++++++++++++++-- .../gideondk/sentinel/ClientStageSpec.scala | 14 ++++--- 5 files changed, 88 insertions(+), 22 deletions(-) diff --git a/src/main/resources/reference.conf b/src/main/resources/reference.conf index 087b1fe..6609eb4 100644 --- a/src/main/resources/reference.conf +++ b/src/main/resources/reference.conf @@ -4,6 +4,8 @@ nl.gideondk.sentinel { max-connections = 32 max-failures = 16 failure-recovery-duration = 4 seconds + auto-reconnect = true + reconnect-duration = 2 seconds } input-buffer-size = 1024 parallelism = 32 diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index 3519348..5a8dbbc 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -6,7 +6,7 @@ import akka.NotUsed import akka.actor.ActorSystem import akka.event.Logging import akka.stream._ -import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, RunnableGraph, Sink, Source } +import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } import akka.util.ByteString import nl.gideondk.sentinel.Config import nl.gideondk.sentinel.client.Client._ @@ -28,12 +28,33 @@ object ClientConfig { val maxFailuresPerHost = config.getInt("client.host.max-failures") val failureRecoveryPeriod = Duration(config.getDuration("client.host.failure-recovery-duration").toNanos, TimeUnit.NANOSECONDS) + val reconnectDuration = Duration(config.getDuration("client.host.reconnect-duration").toNanos, TimeUnit.NANOSECONDS) + val shouldReconnect = config.getBoolean("client.host.auto-reconnect") + val clientParallelism = config.getInt("client.parallelism") val inputBufferSize = config.getInt("client.input-buffer-size") } object Client { + private def reconnectLogic[M](builder: GraphDSL.Builder[M], hostEventSource: Source[HostEvent, NotUsed]#Shape, hostEventIn: Inlet[HostEvent], hostEventOut: Outlet[HostEvent])(implicit system: ActorSystem) = { + import GraphDSL.Implicits._ + implicit val b = builder + + val delay = ClientConfig.reconnectDuration + val groupDelay = Flow[HostEvent].groupBy[Host](1024, { x: HostEvent ⇒ x.host }).delay(delay).map { x ⇒ system.log.warning(s"Reconnecting after ${delay.toSeconds}s for ${x.host}"); HostUp(x.host) }.mergeSubstreams + + if (ClientConfig.shouldReconnect) { + val connectionMerge = builder.add(Merge[HostEvent](2)) + hostEventSource ~> connectionMerge ~> hostEventIn + hostEventOut ~> b.add(groupDelay) ~> connectionMerge + } else { + println("No reconnect handler") + hostEventSource ~> hostEventIn + hostEventOut ~> Sink.ignore + } + } + def apply[Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { @@ -62,14 +83,15 @@ object Client { import GraphDSL.Implicits._ val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, true, processor, protocol.reversed)) - connections ~> s.in0 + + reconnectLogic(b, connections, s.in2, s.out2) val input = b add Flow[Command[Cmd]].map(x ⇒ (x, Promise[Event[Evt]]())) val broadcast = b add Broadcast[(Command[Cmd], Promise[Event[Evt]])](2) - val output = b add Flow[(Command[Cmd], Promise[Event[Evt]])].mapAsync(ClientConfig.clientParallelism)(_._2.future).withAttributes(Attributes.logLevels(onElement = Logging.WarningLevel)) + val output = b add Flow[(Command[Cmd], Promise[Event[Evt]])].mapAsync(ClientConfig.clientParallelism)(_._2.future) - s.out ~> eventHandler + s.out1 ~> eventHandler input ~> broadcast broadcast ~> output broadcast ~> s.in1 @@ -88,8 +110,10 @@ object Client { import GraphDSL.Implicits._ val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, true, processor, protocol.reversed)) - connections ~> s.in0 - FlowShape(s.in1, s.out) + + reconnectLogic(b, connections, s.in2, s.out2) + + FlowShape(s.in1, s.out2) }) } @@ -122,10 +146,11 @@ class Client[Cmd, Evt](hosts: Source[HostEvent, NotUsed], val s = b.add(new ClientStage[Context, Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, true, processor, protocol)) - b.add(hosts) ~> s.in0 + reconnectLogic(b, b.add(hosts), s.in2, s.out2) + source.out ~> s.in1 - s.out ~> b.add(eventHandler) + s.out1 ~> b.add(eventHandler) ClosedShape }) diff --git a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala index 251e186..a9499e8 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala @@ -3,6 +3,7 @@ package nl.gideondk.sentinel.client import akka.actor.ActorSystem import akka.stream._ import akka.stream.scaladsl.{ BidiFlow, GraphDSL, RunnableGraph, Tcp } +import akka.stream.stage.GraphStageLogic.EagerTerminateOutput import akka.stream.stage._ import akka.util.ByteString import akka.{ Done, stream } @@ -38,9 +39,14 @@ object ClientStage { import nl.gideondk.sentinel.client.ClientStage._ -class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, finishGracefully: Boolean, processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) extends GraphStage[FanInShape2[HostEvent, (Command[Cmd], Context), (Try[Event[Evt]], Context)]] { +class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, + recoveryPeriod: FiniteDuration, finishGracefully: Boolean, processor: Processor[Cmd, Evt], + protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) + + extends GraphStage[BidiShape[(Command[Cmd], Context), (Try[Event[Evt]], Context), HostEvent, HostEvent]] { val connectionEventIn = Inlet[HostEvent]("ClientStage.ConnectionEvent.In") + val connectionEventOut = Outlet[HostEvent]("ClientStage.ConnectionEvent.Out") val commandIn = Inlet[(Command[Cmd], Context)]("ClientStage.Command.In") val eventOut = Outlet[(Try[Event[Evt]], Context)]("ClientStage.Event.Out") @@ -114,6 +120,7 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer if (hostFailures(host) >= maximumFailuresPerHost) { system.log.error(cause, s"Dropping $host, failed $totalFailure times") + emit(connectionEventOut, HostDown(host)) removeHost(host, Some(cause)) } else { removeConnection(connection, Some(cause)) @@ -143,6 +150,8 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer pullCommand(true) } + setHandler(connectionEventOut, EagerTerminateOutput) + setHandler(connectionEventIn, new InHandler { override def onPush() = { grab(connectionEventIn) match { @@ -195,7 +204,9 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer } }) - override def onTimer(timerKey: Any) = hostFailures.clear() + override def onTimer(timerKey: Any) = { + hostFailures.clear() + } case class Connection(host: Host, connectionId: Int) { connection ⇒ @@ -288,8 +299,7 @@ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPer connectionEventIn.pull() } } - } - override def shape = new FanInShape2(connectionEventIn, commandIn, eventOut) + override def shape = new BidiShape(commandIn, eventOut, connectionEventIn, connectionEventOut) } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala index b1e1eaa..5cd8d5a 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientSpec.scala @@ -1,13 +1,16 @@ package nl.gideondk.sentinel import akka.actor.ActorSystem -import akka.stream.ActorMaterializer -import akka.stream.scaladsl.{ Sink, Source } +import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } +import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source } +import nl.gideondk.sentinel.client.ClientStage.NoConnectionsAvailableException import nl.gideondk.sentinel.client.{ Client, ClientStage, Host } +import nl.gideondk.sentinel.pipeline.Processor import nl.gideondk.sentinel.protocol._ -import scala.concurrent.Promise -import scala.util.Try +import scala.concurrent.{ Await, Promise, duration } +import duration._ +import scala.util.{ Failure, Success, Try } class ClientSpec extends SentinelSpec(ActorSystem()) { "a Client" should { @@ -28,5 +31,29 @@ class ClientSpec extends SentinelSpec(ActorSystem()) { result should equal(messages.map(x ⇒ SingularEvent(x.payload))) } } + + "handle connection issues" in { + val port = TestHelpers.portNumber.incrementAndGet() + val serverSystem = ActorSystem() + ClientStageSpec.mockServer(serverSystem, port) + + implicit val materializer = ActorMaterializer() + + type Context = Promise[Event[SimpleMessageFormat]] + + val client = Client(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, OverflowStrategy.backpressure, SimpleMessage.protocol) + + Await.result(client.ask(SimpleReply("1")), 5 seconds) shouldEqual (SimpleReply("1")) + + serverSystem.terminate() + Thread.sleep(100) + + Try(Await.result(client.ask(SimpleReply("1")), 5 seconds)) shouldEqual (Failure(NoConnectionsAvailableException)) + + ClientStageSpec.mockServer(system, port) + Thread.sleep(3000) + + Await.result(client.ask(SimpleReply("1")), 5 seconds) shouldEqual (SimpleReply("1")) + } } } \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala index 8ed5074..c899f85 100644 --- a/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ClientStageSpec.scala @@ -23,11 +23,11 @@ object ClientStageSpec { conn handleWith Flow[ByteString] } - val connections = Tcp().bind("localhost", port) + val connections = Tcp().bind("localhost", port, halfClose = true) val binding = connections.to(handler).run() binding.onComplete { - case Success(b) ⇒ + case Success(b) ⇒ println("Bound to: " + b.localAddress) case Failure(e) ⇒ system.terminate() } @@ -62,10 +62,11 @@ class ClientStageSpec extends SentinelSpec(ActorSystem()) { val s = b.add(new ClientStage[Context, SimpleMessageFormat, SimpleMessageFormat](32, 8, 2 seconds, true, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) - Source.single(ClientStage.HostUp(Host("localhost", port))) ~> s.in0 + Source.single(ClientStage.HostUp(Host("localhost", port))) ~> s.in2 source.out ~> s.in1 - s.out ~> b.add(sink) + s.out1 ~> b.add(sink) + s.out2 ~> b.add(Sink.ignore) ClosedShape }) @@ -99,10 +100,11 @@ class ClientStageSpec extends SentinelSpec(ActorSystem()) { val s = b.add(new ClientStage[Context, SimpleMessageFormat, SimpleMessageFormat](1, 8, 2 seconds, true, Processor(SimpleHandler, 1, false), SimpleMessage.protocol.reversed)) - hostEvents ~> s.in0 + hostEvents ~> s.in2 commands ~> s.in1 - s.out ~> events + s.out1 ~> events + s.out2 ~> b.add(Sink.ignore) ClosedShape }).run() From bef08fd2402672466c43c2485befc3d16fd53054 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Thu, 5 Jan 2017 19:39:12 +0100 Subject: [PATCH 43/54] Add stream consumption for actions --- .../nl/gideondk/sentinel/client/Client.scala | 1 - .../sentinel/pipeline/ConsumerStage.scala | 33 ++++++++++++------- .../sentinel/pipeline/Processor.scala | 8 +++-- .../sentinel/pipeline/ProducerStage.scala | 1 - .../gideondk/sentinel/protocol/Action.scala | 11 +++++-- .../gideondk/sentinel/ConsumerStageSpec.scala | 4 +-- 6 files changed, 36 insertions(+), 22 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index 5a8dbbc..595ab7e 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -49,7 +49,6 @@ object Client { hostEventSource ~> connectionMerge ~> hostEventIn hostEventOut ~> b.add(groupDelay) ~> connectionMerge } else { - println("No reconnect handler") hostEventSource ~> hostEventIn hostEventOut ~> Sink.ignore } diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala index 7a204b8..7c6533c 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala @@ -6,9 +6,9 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } import nl.gideondk.sentinel.protocol.ConsumerAction._ import nl.gideondk.sentinel.protocol._ -class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Evt, ProducerAction[Evt, Cmd]), Event[Evt]]] { +class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOutShape2[Evt, (Event[Evt], ProducerAction[Evt, Cmd]), Event[Evt]]] { private val eventIn = Inlet[Evt]("ConsumerStage.Event.In") - private val actionOut = Outlet[(Evt, ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") + private val actionOut = Outlet[(Event[Evt], ProducerAction[Evt, Cmd])]("ConsumerStage.Action.Out") private val signalOut = Outlet[Event[Evt]]("ConsumerStage.Signal.Out") val shape = new FanOutShape2(eventIn, actionOut, signalOut) @@ -88,29 +88,38 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut } } - def consumeStream(initialChunk: Evt): Unit = { - // emit(actionOut, (initialChunk, ProducerAction.ConsumeStream(Source.fromGraph(chunkSource.source)))) + def startStreamForAction(initialChunk: Evt, action: ProducerAction.StreamReaction[Evt, Cmd]): Unit = { + chunkSource = new SubSourceOutlet[Evt]("ConsumerStage.Event.In.ChunkSubStream") + chunkSource.setHandler(pullThroughHandler) + setHandler(eventIn, substreamHandler) + setHandler(actionOut, substreamHandler) + + push(actionOut, (StreamEvent(Source.single(initialChunk) ++ Source.fromGraph(chunkSource.source)), action)) } def onPush(): Unit = { val evt = grab(eventIn) resolver.process(evt) match { - case x: ProducerAction.Signal[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + case x: ProducerAction.Signal[Evt, Cmd] ⇒ push(actionOut, (SingularEvent(evt), x)) + + case x: ProducerAction.ProduceStream[Evt, Cmd] ⇒ push(actionOut, (SingularEvent(evt), x)) + + case x: ProducerAction.ConsumeStream[Evt, Cmd] ⇒ startStreamForAction(evt, x) - // case x: ProducerAction.ProduceStream[Evt, Cmd] ⇒ emit(actionOut, (evt, x)) + case x: ProducerAction.ProcessStream[Evt, Cmd] ⇒ startStreamForAction(evt, x) - case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) + case AcceptSignal ⇒ push(signalOut, SingularEvent(evt)) - case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) + case AcceptError ⇒ push(signalOut, SingularErrorEvent(evt)) - case StartStream ⇒ startStream(None) + case StartStream ⇒ startStream(None) - case ConsumeStreamChunk ⇒ startStream(Some(evt)) + case ConsumeStreamChunk ⇒ startStream(Some(evt)) - case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) + case ConsumeChunkAndEndStream ⇒ push(signalOut, StreamEvent(Source.single(evt))) - case Ignore ⇒ () + case Ignore ⇒ () } } diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala index e681693..b74148d 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala @@ -14,9 +14,11 @@ object Processor { val consumerStage = new ConsumerStage[Evt, Cmd](resolver) val producerStage = new ProducerStage[Evt, Cmd]() - val functionApply = Flow[(Evt, ProducerAction[Evt, Cmd])].mapAsync[Command[Cmd]](producerParallism) { - case (evt, x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(SingularCommand[Cmd]) - case (evt, x: ProducerAction.ProduceStream[Evt, Cmd]) ⇒ x.f(evt).map(StreamingCommand[Cmd]) + val functionApply = Flow[(Event[Evt], ProducerAction[Evt, Cmd])].mapAsync[Command[Cmd]](producerParallism) { + case (SingularEvent(evt), x: ProducerAction.Signal[Evt, Cmd]) ⇒ x.f(evt).map(SingularCommand[Cmd]) + case (SingularEvent(evt), x: ProducerAction.ProduceStream[Evt, Cmd]) ⇒ x.f(evt).map(StreamingCommand[Cmd]) + case (StreamEvent(evt), x: ProducerAction.ConsumeStream[Evt, Cmd]) ⇒ x.f(evt).map(SingularCommand[Cmd]) + case (StreamEvent(evt), x: ProducerAction.ProcessStream[Evt, Cmd]) ⇒ x.f(evt).map(StreamingCommand[Cmd]) } Processor(BidiFlow.fromGraph[Command[Cmd], Cmd, Evt, Event[Evt], Any] { diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala index c2a3555..8462577 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ProducerStage.scala @@ -63,6 +63,5 @@ class ProducerStage[In, Out] extends GraphStage[FlowShape[Command[Out], Out]] { sinkIn.pull() outStream.runWith(sinkIn.sink)(subFusingMaterializer) } - } } diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala index fa27d76..117cd9e 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Action.scala @@ -21,13 +21,17 @@ object ProducerAction { } trait ConsumeStream[E, C] extends StreamReaction[E, C] { - def f: E ⇒ Source[E, Any] ⇒ Future[C] + def f: Source[E, Any] ⇒ Future[C] } trait ProduceStream[E, C] extends StreamReaction[E, C] { def f: E ⇒ Future[Source[C, Any]] } + trait ProcessStream[E, C] extends StreamReaction[E, C] { + def f: Source[E, Any] ⇒ Future[Source[C, Any]] + } + object Signal { def apply[E, C](fun: E ⇒ Future[C]): Signal[E, C] = new Signal[E, C] { val f = fun @@ -35,8 +39,8 @@ object ProducerAction { } object ConsumeStream { - def apply[E, A <: E, B <: E, C](fun: A ⇒ Source[B, Any] ⇒ Future[C]): ConsumeStream[E, C] = new ConsumeStream[E, C] { - val f = fun.asInstanceOf[E ⇒ Source[E, Any] ⇒ Future[C]] + def apply[Evt, Cmd](fun: Source[Evt, Any] ⇒ Future[Cmd]): ConsumeStream[Evt, Cmd] = new ConsumeStream[Evt, Cmd] { + val f = fun } } @@ -65,6 +69,7 @@ object ConsumerAction { case object ConsumeChunkAndEndStream extends ConsumerAction case object Ignore extends ConsumerAction + } case class ConsumerActionAndData[Evt](action: ConsumerAction, data: Evt) \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala index 4918241..6c7bcac 100644 --- a/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ConsumerStageSpec.scala @@ -215,7 +215,7 @@ class ConsumerStageSpec extends SentinelSpec(ActorSystem()) { val items = List(SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, ""), SimpleCommand(PING_PONG, "")) - val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[(SimpleMessageFormat, ProducerAction[SimpleMessageFormat, SimpleMessageFormat])]) { implicit b ⇒ + val g = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[(Event[SimpleMessageFormat], ProducerAction[SimpleMessageFormat, SimpleMessageFormat])]) { implicit b ⇒ sink ⇒ import GraphDSL.Implicits._ @@ -229,7 +229,7 @@ class ConsumerStageSpec extends SentinelSpec(ActorSystem()) { }) whenReady(g.run()) { result ⇒ - result.map(_._1) should equal(items) + result.map(_._1).asInstanceOf[Seq[SingularEvent[SimpleMessageFormat]]].map(_.data) should equal(items) } } } From 6064aabe2039224db1bb885e2d7597ad2979680d Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 10:16:51 +0100 Subject: [PATCH 44/54] Add (very basic) server functionality, add integration tests between client and server --- .../nl/gideondk/sentinel/client/Client.scala | 4 - .../sentinel/pipeline/ConsumerStage.scala | 12 ++- .../sentinel/pipeline/Processor.scala | 2 +- .../gideondk/sentinel/pipeline/Resolver.scala | 3 +- .../gideondk/sentinel/protocol/Command.scala | 32 +------ .../nl/gideondk/sentinel/server/Server.scala | 43 +++++++++ .../nl/gideondk/sentinel/ProcessorSpec.scala | 2 +- .../gideondk/sentinel/ServerClientSpec.scala | 88 +++++++++++++++++++ .../sentinel/protocol/SimpleMessage.scala | 33 ++++--- 9 files changed, 160 insertions(+), 59 deletions(-) create mode 100644 src/main/scala/nl/gideondk/sentinel/server/Server.scala create mode 100644 src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index 595ab7e..f60067e 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -4,7 +4,6 @@ import java.util.concurrent.TimeUnit import akka.NotUsed import akka.actor.ActorSystem -import akka.event.Logging import akka.stream._ import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } import akka.util.ByteString @@ -106,7 +105,6 @@ object Client { Flow.fromGraph(GraphDSL.create(hosts) { implicit b ⇒ connections ⇒ - import GraphDSL.Implicits._ val s = b.add(new ClientStage[Context, Cmd, Evt](ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, true, processor, protocol.reversed)) @@ -146,9 +144,7 @@ class Client[Cmd, Evt](hosts: Source[HostEvent, NotUsed], val s = b.add(new ClientStage[Context, Cmd, Evt](connectionsPerHost, maximumFailuresPerHost, recoveryPeriod, true, processor, protocol)) reconnectLogic(b, b.add(hosts), s.in2, s.out2) - source.out ~> s.in1 - s.out1 ~> b.add(eventHandler) ClosedShape diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala index 7c6533c..c122094 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/ConsumerStage.scala @@ -28,6 +28,8 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut * * */ + implicit def mat = this.materializer + val pullThroughHandler = new OutHandler { override def onPull() = { pull(eventIn) @@ -45,7 +47,8 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut override def onPush(): Unit = { val chunk = grab(eventIn) - resolver.process(chunk) match { + + resolver.process(mat)(chunk) match { case ConsumeStreamChunk ⇒ chunkSource.push(chunk) @@ -80,7 +83,6 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut chunkSource = new SubSourceOutlet[Evt]("ConsumerStage.Event.In.ChunkSubStream") chunkSource.setHandler(pullThroughHandler) setHandler(eventIn, substreamHandler) - setHandler(signalOut, substreamHandler) initialChunk match { case Some(x) ⇒ push(signalOut, StreamEvent(Source.single(x) ++ Source.fromGraph(chunkSource.source))) @@ -100,7 +102,7 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut def onPush(): Unit = { val evt = grab(eventIn) - resolver.process(evt) match { + resolver.process(mat)(evt) match { case x: ProducerAction.Signal[Evt, Cmd] ⇒ push(actionOut, (SingularEvent(evt), x)) case x: ProducerAction.ProduceStream[Evt, Cmd] ⇒ push(actionOut, (SingularEvent(evt), x)) @@ -124,7 +126,9 @@ class ConsumerStage[Evt, Cmd](resolver: Resolver[Evt]) extends GraphStage[FanOut } def onPull(): Unit = { - if (!chunkSubStreamStarted && !hasBeenPulled(eventIn)) pull(eventIn) + if (!chunkSubStreamStarted && !hasBeenPulled(eventIn)) { + pull(eventIn) + } } setHandler(actionOut, this) diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala index b74148d..11eea89 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/Processor.scala @@ -1,6 +1,6 @@ package nl.gideondk.sentinel.pipeline -import akka.stream.BidiShape +import akka.stream.{ BidiShape, Materializer } import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Merge, Sink } import nl.gideondk.sentinel.protocol._ diff --git a/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala b/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala index 07bd85c..450df42 100644 --- a/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala +++ b/src/main/scala/nl/gideondk/sentinel/pipeline/Resolver.scala @@ -1,8 +1,9 @@ package nl.gideondk.sentinel.pipeline +import akka.stream.Materializer import nl.gideondk.sentinel.protocol.Action trait Resolver[In] { - def process: PartialFunction[In, Action] + def process(implicit mat: Materializer): PartialFunction[In, Action] } diff --git a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala index 1654ee0..55f528f 100644 --- a/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala +++ b/src/main/scala/nl/gideondk/sentinel/protocol/Command.scala @@ -29,34 +29,4 @@ trait Command[Out] case class SingularCommand[Out](payload: Out) extends Command[Out] -case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] - -//trait ServerCommand[Out, In] -// -//trait ServerMetric -// -//object ServerCommand { -// -// case class AskAll[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] -// -// case class AskAllHosts[Cmd, Evt](payload: Cmd, promise: Promise[List[Evt]]) extends ServerCommand[Cmd, Evt] -// -// case class AskAny[Cmd, Evt](payload: Cmd, promise: Promise[Evt]) extends ServerCommand[Cmd, Evt] -// -//} -// -//object ServerMetric { -// -// case object ConnectedSockets extends ServerMetric -// -// case object ConnectedHosts extends ServerMetric -// -//} -// -////object Reply { -//// -//// case class Response[Cmd](payload: Cmd) extends Reply[Cmd] -//// -//// case class StreamResponseChunk[Cmd](payload: Cmd) extends Reply[Cmd] -//// -////} +case class StreamingCommand[Out](stream: Source[Out, Any]) extends Command[Out] \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/server/Server.scala b/src/main/scala/nl/gideondk/sentinel/server/Server.scala new file mode 100644 index 0000000..07b263e --- /dev/null +++ b/src/main/scala/nl/gideondk/sentinel/server/Server.scala @@ -0,0 +1,43 @@ +package nl.gideondk.sentinel.server + +import akka.actor.ActorSystem +import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Sink, Source, Tcp } +import akka.stream.{ ActorMaterializer, FlowShape } +import akka.util.ByteString +import nl.gideondk.sentinel.pipeline.{ Processor, Resolver } + +import scala.concurrent.ExecutionContext +import scala.util.{ Failure, Success } + +object Server { + def apply[Cmd, Evt](interface: String, port: Int, resolver: Resolver[Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Unit = { + + val handler = Sink.foreach[Tcp.IncomingConnection] { conn ⇒ + val processor = Processor[Cmd, Evt](resolver, 1, true) + + val flow = Flow.fromGraph(GraphDSL.create() { implicit b ⇒ + import GraphDSL.Implicits._ + + val pipeline = b.add(processor.flow.atop(protocol.reversed)) + + pipeline.in1 <~ Source.empty + pipeline.out2 ~> Sink.ignore + + FlowShape(pipeline.in2, pipeline.out1) + }) + + conn handleWith flow + } + + val connections = Tcp().bind(interface, port, halfClose = true) + val binding = connections.to(handler).run() + + binding.onComplete { + case Success(b) ⇒ println("Bound to: " + b.localAddress) + case Failure(e) ⇒ + system.terminate() + } + + binding + } +} diff --git a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala index a16dd33..be00796 100644 --- a/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ProcessorSpec.scala @@ -14,7 +14,7 @@ class ProcessorSpec extends SentinelSpec(ActorSystem()) { val serverProcessor = Processor[SimpleMessageFormat, SimpleMessageFormat](SimpleServerHandler, 1, true) "The AntennaStage" should { - "correctly flow in a client, server situation" in { + "correctly flow in a client, server like situation" in { import nl.gideondk.sentinel.protocol.SimpleMessage._ implicit val materializer = ActorMaterializer() diff --git a/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala new file mode 100644 index 0000000..5c30161 --- /dev/null +++ b/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala @@ -0,0 +1,88 @@ +package nl.gideondk.sentinel + +import akka.actor.ActorSystem +import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } +import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source } +import nl.gideondk.sentinel.client.ClientStage.NoConnectionsAvailableException +import nl.gideondk.sentinel.client.{ Client, ClientStage, Host } +import nl.gideondk.sentinel.pipeline.Processor +import nl.gideondk.sentinel.protocol._ +import nl.gideondk.sentinel.server.Server + +import scala.concurrent.{ Await, Promise, duration } +import duration._ +import scala.util.{ Failure, Success, Try } + +class ServerClientSpec extends SentinelSpec(ActorSystem()) { + "a Server and Client" should { + "keep message order intact" in { + val port = TestHelpers.portNumber.incrementAndGet() + val server = ClientStageSpec.mockServer(system, port) + implicit val materializer = ActorMaterializer() + + val numberOfMessages = 100 + + val messages = (for (i ← 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)))).toList + val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) ⇒ context.complete(event) } + + val client = Client.flow(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, SimpleMessage.protocol) + val results = Source(messages).via(client).runWith(Sink.seq) + + whenReady(results) { result ⇒ + result should equal(messages.map(x ⇒ SingularEvent(x.payload))) + } + } + + "handle connection issues" in { + val port = TestHelpers.portNumber.incrementAndGet() + val serverSystem = ActorSystem() + ClientStageSpec.mockServer(serverSystem, port) + + implicit val materializer = ActorMaterializer() + + type Context = Promise[Event[SimpleMessageFormat]] + + val client = Client(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, OverflowStrategy.backpressure, SimpleMessage.protocol) + + Await.result(client.ask(SimpleReply("1")), 5 seconds) shouldEqual (SimpleReply("1")) + + serverSystem.terminate() + Thread.sleep(100) + + Try(Await.result(client.ask(SimpleReply("1")), 5 seconds)) shouldEqual (Failure(NoConnectionsAvailableException)) + + ClientStageSpec.mockServer(system, port) + Thread.sleep(3000) + + Await.result(client.ask(SimpleReply("1")), 5 seconds) shouldEqual (SimpleReply("1")) + } + + "correctly handle asymmetrical message types in a client, server situation" in { + import nl.gideondk.sentinel.protocol.SimpleMessage._ + + val port = TestHelpers.portNumber.incrementAndGet() + implicit val materializer = ActorMaterializer() + + type Context = Promise[Event[SimpleMessageFormat]] + + val server = Server("localhost", port, SimpleServerHandler, SimpleMessage.protocol.reversed) + val client = Client(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, OverflowStrategy.backpressure, SimpleMessage.protocol) + + val pingCommand = SimpleCommand(PING_PONG, "") + val generateNumbersCommand = SimpleCommand(GENERATE_NUMBERS, "1024") + val sendStream = Source.single(SimpleCommand(TOTAL_CHUNK_SIZE, "")) ++ Source(List.fill(1024)(SimpleStreamChunk("A"))) ++ Source.single(SimpleStreamChunk("")) + + Await.result(client.ask(pingCommand), 5 seconds) shouldBe SimpleReply("PONG") + Await.result(client.sendStream(sendStream), 5 seconds) shouldBe SimpleReply("1024") + Await.result(client.ask(pingCommand), 5 seconds) shouldBe SimpleReply("PONG") + Await.result(client.askStream(generateNumbersCommand).flatMap(x ⇒ x.runWith(Sink.seq)), 5 seconds) shouldBe (for (i ← 0 until 1024) yield (SimpleStreamChunk(i.toString))) + + // Await.result(flow.run(), 5 seconds) + // whenReady(flow.run()) { result ⇒ + // result should equal(Seq(SingularEvent(SimpleReply("PONG")), SingularEvent(SimpleReply("PONG")))) + // } + } + } +} + +//Server \ No newline at end of file diff --git a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala index 8a97860..51c8bd8 100644 --- a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala +++ b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala @@ -1,12 +1,12 @@ package nl.gideondk.sentinel.protocol -import akka.stream.scaladsl.{ BidiFlow, Framing } +import akka.stream.{ ActorMaterializer, Materializer } +import akka.stream.scaladsl.{ BidiFlow, Framing, Sink, Source } import akka.util.{ ByteString, ByteStringBuilder } -import nl.gideondk.sentinel._ import nl.gideondk.sentinel.pipeline.Resolver -import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Future sealed trait SimpleMessageFormat { def payload: String @@ -72,10 +72,10 @@ object SimpleMessage { def protocol = flow.atop(Framing.simpleFramingProtocol(1024)) } -import SimpleMessage._ +import nl.gideondk.sentinel.protocol.SimpleMessage._ object SimpleHandler extends Resolver[SimpleMessageFormat] { - def process: PartialFunction[SimpleMessageFormat, Action] = { + def process(implicit mat: Materializer): PartialFunction[SimpleMessageFormat, Action] = { case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream case x: SimpleError ⇒ ConsumerAction.AcceptError case x: SimpleReply ⇒ ConsumerAction.AcceptSignal @@ -85,18 +85,17 @@ object SimpleHandler extends Resolver[SimpleMessageFormat] { } object SimpleServerHandler extends Resolver[SimpleMessageFormat] { - def process: PartialFunction[SimpleMessageFormat, Action] = { + def process(implicit mat: Materializer): PartialFunction[SimpleMessageFormat, Action] = { + case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - case x ⇒ println("Unhandled: " + x); ConsumerAction.Ignore - - // case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ - // s: Enumerator[SimpleStreamChunk] ⇒ - // s |>>> Iteratee.fold(0) { (b, a) ⇒ b + a.payload.length } map (x ⇒ SimpleReply(x.toString)) - // } - // case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ - // val count = payload.toInt - // Future((Enumerator(List.range(0, count): _*) &> Enumeratee.map(x ⇒ SimpleStreamChunk(x.toString))) >>> Enumerator(SimpleStreamChunk(""))) - // } - // case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } + case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: Source[SimpleStreamChunk, Any] ⇒ + x.runWith(Sink.fold[Int, SimpleMessageFormat](0) { (b, a) ⇒ b + a.payload.length }).map(x ⇒ SimpleReply(x.toString)) + } + case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ + val count = payload.toInt + Future(Source(List.range(0, count)).map(x ⇒ SimpleStreamChunk(x.toString)) ++ Source.single(SimpleStreamChunk(""))) + } + case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } + case x ⇒ println("Unhandled: " + x); ConsumerAction.Ignore } } \ No newline at end of file From d09270056427268802fcfc5a415dc5a130803c40 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 13:14:20 +0100 Subject: [PATCH 45/54] Update README for 0.8-M1 release --- README.md | 190 ++++++++---------- project/Build.scala | 2 +- .../gideondk/sentinel/ServerClientSpec.scala | 51 +---- 3 files changed, 86 insertions(+), 157 deletions(-) diff --git a/README.md b/README.md index 26a63d6..9cf2499 100644 --- a/README.md +++ b/README.md @@ -1,37 +1,36 @@ # Sentinel -![Sentinel](http://images.wikia.com/matrix/images/c/c2/Sentinel_Print.jpg) +**Sentinel** is boilerplate for TCP based servers and clients through Using Akka IO and Akka Streams. -## Overview +The framework's focus is to abstract away the nitty gritty parts of stream based communication to have a solution for reactive TCP communication with reasonable defaults. +Sentinel is designed for usage in persistent connection environments, making it less suited for things like HTTP and best suited for database clients and persistent communication stacks stacks. -**Sentinel** is boilerplate for TCP based servers and clients through Akka IO (2.3). - -The implementation focusses on raw performance, using pipelines through multiple sockets represented by multiple workers (both client / server side). Sentinel is designed for usage in persistent connection environments, making it (currently) less suited for things like HTTP and best suited for DB clients / RPC stacks. - -Sentinel brings a unique symmetrical design through *Antennas*, resulting in the same request and response handling on both clients and servers. This not only makes it simple to share code on both sides, but also opens the possibility to inverse request & response flow from server to client. - -In its current state, it's being used internally as a platform to test performance strategies for CPU and IO bound services. In the nearby future, Sentinel will fuel both [Raiku](http://github.com/gideondk/raiku) as other soon-to-be-released Akka based libraries. +Sentinel brings a symmetrical design through *Processors*, resulting in the same request and response handling on both clients and servers. This not only makes it simple to share code on both sides, but also opens the possibility to inverse request & response flow from server to client. ## Status The current codebase of Sentinel can change heavily over releases. -In overall, treat Sentinel as pre-release alpha software. +In overall, treat Sentinel as alpha software. **Currently available in Sentinel:** -* Easy initialization of TCP servers and clients for default or custom router worker strategies; -* Supervision (and restart / reconnection functionality) on clients for a defined number of workers; -* Streaming requests and responses (currently) based on Play Iteratees; -* Direct server to client communication through symmetrical signal handling design. +* Easy initialization of TCP clients, capable of handing normal request and response based flows as streaming requests and responses. +* Connection pooling and management and accompanied flow handling for clients. +* Reactive manner how handling available hosts / endpoints on clients. +* Basic server template using the same constructs / protocol as client. + +**The following is currently missing in Sentinel, but will be added soon:** + +* A far more solid test suite. +* Better error handling and recovery. +* Default functionality for callback based protocols. +* More solid server implementation, with possibility of direct server to client communication. + +**(Currently) known issues:** -The following is currently missing in Sentinel, but will be added soon: +* There is no active (demand) buffering process within the client; when a stream is requested, but not consumed, additional requests on the same socket aren't demanded and therefore not pulled into new requests. -* Replacement of `Iteratees` in favour of the upcoming *Akka Streams*; -* A far more solid test suite; -* Better error handling and recovery; -* Default functionality for callback based protocols; -* Streaming server to client communication. ## Installation You can install Sentinel through source (by publishing it into your local Ivy repository): @@ -46,19 +45,15 @@ Or by adding the repo: to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
libraryDependencies ++= Seq(
-  "nl.gideondk" %% "sentinel" % "0.7.5.1"
+  "nl.gideondk" %% "sentinel" % "0.8-M1"
 )
 
## Architecture -The internal structure of Sentinel relies on a *Antenna* actor. The Antenna represents the connection between a client and a server and handles both the outgoing commands as incoming replies and handles the events received from the underlying *TCP* actors. +The internal structure of Sentinel relies on the `Processor` BidiFlow. The Processor represents the connection between a client and a server and handles both the outgoing commands as incoming events through a `ProducerStage` and `ConsumerStage`. -Within the antenna structure, two child actors are defined. One used for consuming replies from the connected host and one for the production of values for the connected host. - -Both clients as servers share the same antenna construction, which results in a symmetrical design for sending and receiving commands. When a message is received from the opposing host, a *resolver* is used to determine the action or reaction on the received event. Based on the used protocol (as defined in the underlying protocol pipeline), a host can process the event and decide whether the consume the received event or to respond with new values (as in a normal request -> response way). - -Once, for instance, a command is sent to a client (for a response from the connected server), the payload is sent to the opposing host and a reply-registration is set within the consumer part of the antenna. This registration and accompanying promise is completed with the consequential response from the server. +Both clients as servers share the same `Processor`, which results in a symmetrical design for sending and receiving commands. When a message is received from the opposing host, a `Resolver` is used to determine the action or reaction on the received event. Based on the used protocol (which is defined as an additional `BidiFlow`, converting `ByteStrings` to `Events` and `Commands` to `ByteStrings`, a host can process the event and decide whether the consume the received event or to respond with new values (as in a normal request -> response way). ## Actions The handle incoming events, multiple actions are defined which can be used to implement logic on top of the used protocol. Actions are split into consumer actions and producers actions, which make a antenna able to: @@ -74,8 +69,6 @@ The handle incoming events, multiple actions are defined which can be used to im `ConsumeChunkAndEndStream`: Consumes the chunk and terminates the stream (combination of the two above) -`Ignore`: Ignores the current received signal - ### Producer Actions `Signal`: Responds to the incoming signal with a new (async) signal @@ -84,52 +77,24 @@ The handle incoming events, multiple actions are defined which can be used to im `ProduceStream`: Produces a stream (Enumerator) for the requesting hosts ## Synchronicity -Normally, Sentinel clients connect to servers through multiple sockets to increase parallel performance on top of the synchronous nature of *TCP* sockets. Producers and consumers implement a state machine to correctly respond to running incoming and outgoing streams, handling messages which don't impose treats to the message flow and stashing messages which could leak into the running streams. +Normally, Sentinel clients connect to servers through multiple sockets to increase parallel performance on top of the synchronous nature of *TCP* sockets. -Because of the synchronous nature of the underlying semantics, you have to handle each receiving signal in a appropriate way. Not handling all signals correctly could result in values ending up in incorrect registrations etc. +Because of the *synchronous* nature of the underlying semantics, you have to handle each receiving signal in a appropriate way. Not handling all signals correctly could result in values ending up in incorrect order etc. ## Initialization -### Pipelines -The Pipeline implementation available in Akka 2.2 is becoming obsolete in Akka 2.3 to be replaced with a (better) alternative later on in Akka 2.4. As it seemed that pipelines aren't the best solution for Akka, this currently leaves Akka 2.3 without a reactive *protocol layer*. To bridge the period until a definite solution is available, the "older" pipeline implementation is packaged along with Sentinel. - -The pipeline implementation focusses on the definition of pipes for both incoming as outgoing messages. In these pipelines, a definition is made how incoming or outgoing messages are parsed and formatted. - -Each of these *stages* can easily be composed into a bigger stage (`A => B >> B => C`) taking a the input of the first stage and outputting the format of the last stage. Within Sentinel, the eventual output send to the IO workers is in the standard `ByteString` format, making it necessary that the end stage of the pipeline always outputs content of the `ByteString` type: - -```scala -case class PingPongMessageFormat(s: String) - -class PingPongMessageStage extends SymmetricPipelineStage[PipelineContext, - PingPongMessageFormat, ByteString] { - - override def apply(ctx: PipelineContext) = new SymmetricPipePair[PingPongMessageFormat, ByteString] { - implicit val byteOrder = ctx.byteOrder - - override val commandPipeline = { msg: PingPongMessageFormat ⇒ - Seq(Right(ByteString(msg.s))) - } - - override val eventPipeline = { bs: ByteString ⇒ - Seq(Left(PingPongMessageFormat(new String(bs.toArray)))) - } - } -} -``` - ### Resolver The default resolver for a client is one that automatically accepts all signals. This default behaviour makes it able to handle basic protocols asynchronously without defining a custom resolver on the client side. It's easy to extend the behaviour on the client side for receiving stream responses by defining a custom `Resolver`: ```scala -import SimpleMessage._ trait DefaultSimpleMessageHandler extends Resolver[SimpleMessageFormat, SimpleMessageFormat] { - def process = { - case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream - - case x: SimpleError ⇒ ConsumerAction.AcceptError - case x: SimpleReply ⇒ ConsumerAction.AcceptSignal + def process(implicit mat: Materializer): PartialFunction[SimpleMessageFormat, Action] = { + case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream + case x: SimpleError ⇒ ConsumerAction.AcceptError + case x: SimpleReply ⇒ ConsumerAction.AcceptSignal + case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } } } @@ -141,19 +106,16 @@ In a traditional structure, a different resolver should be used on the server si ```scala object SimpleServerHandler extends DefaultSimpleMessageHandler { - override def process = super.process orElse { + def process(implicit mat: Materializer): PartialFunction[SimpleMessageFormat, Action] = { + case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - - case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ - s: Enumerator[SimpleStreamChunk] ⇒ - s |>>> Iteratee.fold(0) { (b, a) ⇒ b + a.payload.length } map (x ⇒ SimpleReply(x.toString)) + case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: Source[SimpleStreamChunk, Any] ⇒ + x.runWith(Sink.fold[Int, SimpleMessageFormat](0) { (b, a) ⇒ b + a.payload.length }).map(x ⇒ SimpleReply(x.toString)) } - case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ val count = payload.toInt - Future((Enumerator(List.range(0, count): _*) &> Enumeratee.map(x ⇒ SimpleStreamChunk(x.toString))) >>> Enumerator(SimpleStreamChunk(""))) + Future(Source(List.range(0, count)).map(x ⇒ SimpleStreamChunk(x.toString)) ++ Source.single(SimpleStreamChunk(""))) } - case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } } } @@ -161,37 +123,67 @@ object SimpleServerHandler extends DefaultSimpleMessageHandler { Like illustrated, the `ProducerAction.Signal` producer action makes it able to respond with a Async response. Taking a function which handles the incoming event and producing a new value, wrapped in a `Future`. -`ProducerAction.ConsumeStream` takes a function handling the incoming event and the Enumerator with the consequential chunks, resulting in a new value wrapped in a `Future` +`ProducerAction.ConsumeStream` takes a function handling the incoming `Source` with the consequential chunks, resulting in a new value wrapped in a `Future` -`ProducerAction.ProduceStream` takes a function handling the incoming event and returning a corresponding stream as a `Enumerator` wrapped in a `Future` +`ProducerAction.ProduceStream` takes a function handling the incoming event and returning a corresponding stream as a `Source` wrapped in a `Future` ### Client After the definition of the pipeline, a client is easily created: ```scala -Client.randomRouting("localhost", 9999, 4, "Ping Client", stages = stages, resolver = resolver) +val client = Client(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, OverflowStrategy.backpressure, SimpleMessage.protocol) +``` + +The client takes a `Source[HostEvent, Any]]` as *hosts* parameter. Using this stream of either `HostUp` or `HostDown` events, the client updates its connection pool to a potentially changing set of endpoints. + +The Client succeedingly takes the `Resolver` as parameter, a `shouldReact` parameter to configure the client if it should react to incoming events (for server to client communication), the to-be-used `OverflowStrategy` for incoming commands and the protocol `BidiFlow` to be used (`BidiFlow[Cmd, ByteString, ByteString, Evt, Any]`) + + +The client has a set of configurable settings: + +``` +nl.gideondk.sentinel { + client { + host { + max-connections = 32 + max-failures = 16 + failure-recovery-duration = 4 seconds + auto-reconnect = true + reconnect-duration = 2 seconds + } + input-buffer-size = 1024 + } +} ``` -Defining the host and port where the client should connect to, the amount of workers used to handle commands / events, description of the client and the earlier defined context, stages and resolver (for the complete list of parameters, check the code for the moment). +`max-connections`: defines the amount of sockets to be opened per connected host. -You can use the `randomRouting` / `roundRobinRouting` methods depending on the routing strategy you want to use to communicate to the workers. For a more custom approach the `apply` method is available, which lets you define a router strategy yourself. +`max-failures`: defines the amount of (socket) failures a host may encounter before the host is removed from the connection pool. + +`failure-recovery-duration`: period after which the failure rate is resetted per connection. + +`auto-reconnect`: when set, `HostDown` events from the client (after disconnect) are refeeded back as `HostUp` events into the client for reconnection purposes. + +`reconnect-duration`: the reconnection delay. + +`input-buffer-size`: The input buffer size of the client (before the configured `OverFlowStrategy` is used. ### Server -When the stages and resolver are defined, creation of a server is very straight forward: +When the protocol and resolver are defined, creation of a server is very straight forward: ```scala -Server(portNumber, SimpleServerHandler, "Server", SimpleMessage.stages) +Server("localhost", port, SimpleServerHandler, SimpleMessage.protocol.reversed) ``` -This will automatically start the server with the corresponding stages and handler, in the future, separate functionality for starting, restarting and stopping services will be available. +This will automatically start the server with the corresponding processor and handler, in the future, separate functionality for starting, restarting and stopping services will be available. ## Client usage -Once a client and / or server has been set up, the `?` method can be used on the client to send a command to the connected server. Results are wrapped into a `Future` containing the type `Evt` defined in the incoming stage of the client. +Once a client and / or server has been set up, the `ask` method can be used on the client to send a command to the connected server. Results are wrapped into a `Future` containing the type `Evt` defined in the incoming stage of the client. ```scala -PingPongTestHelper.pingClient ? PingPongMessageFormat("PING") -res0: Future[PingPongMessageFormat] +client.ask(SimpleCommand(PING_PONG, "PING"))` +res0: Future[SimpleMessageFormat] ``` The bare bone approach to sending / receiving messages is focussed on the idea that a higher-level API on top of Sentinel is responsible to make client usage more comfortable. @@ -203,45 +195,31 @@ Sentinels structure for streaming requests and responses works best with protoco It's possible to stream content towards Sentinel clients by using the the `?<<-` command, expecting the command to be send to the server, accompanied by the actual stream: ```scala -c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) -res0: Future[SimpleCommand] +val stream = Source.single(SimpleCommand(TOTAL_CHUNK_SIZE, "")) ++ Source(List.fill(1024)(SimpleStreamChunk("A"))) ++ Source.single(SimpleStreamChunk("")) -c ?<<- Enumerator((SimpleCommand(TOTAL_CHUNK_SIZE, "") ++ chunks): _*) -res1: Future[SimpleCommand] +client.sendStream(stream) +res0: Future[SimpleMessageFormat] ``` -The content within the *Enumerator* is folded to send each item to the TCP connection (returning in the `Evt` type, defined through the pipeline). +The content within the *Source* is sent over the TCP connection (returning in the `Evt` type, defined through the pipeline). #### Receiving In the same manner, a stream can be requested from the server: ```scala -c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString) -res0: Future[Enumerator[SimpleCommand]] +client.askStream(SimpleCommand(GENERATE_NUMBERS, "1024")) +res0: Future[Source[SimpleMessageFormat, Any]] ``` -## Server usage -Although functionality will be expanded in the future, it's currently also possible to send requests from the server to the connected clients. This can be used for retrieval of client information on servers request, but could also be used as a retrieval pattern where clients are dormant after request, but respond to requests when necessary (retrieving sensor info per example). - -The following commands can be used to retrieve information: - -`?`: Sends command to *one* (randomly chosen) connected socket for a answer, resulting in one event. - -`?*`: Sends a command to all connected hosts, resulting in a list of events from each host individually. - -`?**`: Sends a command to all connected sockets, resulting in a list of events from all connected sockets. - -Simple server metrics are available through the `connectedSockets` and `connectedHosts` commands, returning a `Future[Int]` containing the corresponding count. +# Credits +The idea and internals for a large part of the client's connection pooling comes from [Maciej Ciołeks](https://github.com/maciekciolek) his wonderful [akka-http-lb](https://github.com/codeheroesdev/akka-http-lb) library. # License -Copyright © 2014 Gideon de Kok +Copyright © 2017 Gideon de Kok Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/gideondk/sentinel/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/project/Build.scala b/project/Build.scala index d7b5d59..bc09f8a 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -6,7 +6,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", - version := "0.8-SNAPSHOT", + version := "0.8-M1", organization := "nl.gideondk", scalaVersion := "2.11.8", parallelExecution in Test := false, diff --git a/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala index 5c30161..ebeef0d 100644 --- a/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala +++ b/src/test/scala/nl/gideondk/sentinel/ServerClientSpec.scala @@ -15,48 +15,6 @@ import scala.util.{ Failure, Success, Try } class ServerClientSpec extends SentinelSpec(ActorSystem()) { "a Server and Client" should { - "keep message order intact" in { - val port = TestHelpers.portNumber.incrementAndGet() - val server = ClientStageSpec.mockServer(system, port) - implicit val materializer = ActorMaterializer() - - val numberOfMessages = 100 - - val messages = (for (i ← 0 to numberOfMessages) yield (SingularCommand[SimpleMessageFormat](SimpleReply(i.toString)))).toList - val sink = Sink.foreach[(Try[Event[SimpleMessageFormat]], Promise[Event[SimpleMessageFormat]])] { case (event, context) ⇒ context.complete(event) } - - val client = Client.flow(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, SimpleMessage.protocol) - val results = Source(messages).via(client).runWith(Sink.seq) - - whenReady(results) { result ⇒ - result should equal(messages.map(x ⇒ SingularEvent(x.payload))) - } - } - - "handle connection issues" in { - val port = TestHelpers.portNumber.incrementAndGet() - val serverSystem = ActorSystem() - ClientStageSpec.mockServer(serverSystem, port) - - implicit val materializer = ActorMaterializer() - - type Context = Promise[Event[SimpleMessageFormat]] - - val client = Client(Source.single(ClientStage.HostUp(Host("localhost", port))), SimpleHandler, false, OverflowStrategy.backpressure, SimpleMessage.protocol) - - Await.result(client.ask(SimpleReply("1")), 5 seconds) shouldEqual (SimpleReply("1")) - - serverSystem.terminate() - Thread.sleep(100) - - Try(Await.result(client.ask(SimpleReply("1")), 5 seconds)) shouldEqual (Failure(NoConnectionsAvailableException)) - - ClientStageSpec.mockServer(system, port) - Thread.sleep(3000) - - Await.result(client.ask(SimpleReply("1")), 5 seconds) shouldEqual (SimpleReply("1")) - } - "correctly handle asymmetrical message types in a client, server situation" in { import nl.gideondk.sentinel.protocol.SimpleMessage._ @@ -76,13 +34,6 @@ class ServerClientSpec extends SentinelSpec(ActorSystem()) { Await.result(client.sendStream(sendStream), 5 seconds) shouldBe SimpleReply("1024") Await.result(client.ask(pingCommand), 5 seconds) shouldBe SimpleReply("PONG") Await.result(client.askStream(generateNumbersCommand).flatMap(x ⇒ x.runWith(Sink.seq)), 5 seconds) shouldBe (for (i ← 0 until 1024) yield (SimpleStreamChunk(i.toString))) - - // Await.result(flow.run(), 5 seconds) - // whenReady(flow.run()) { result ⇒ - // result should equal(Seq(SingularEvent(SimpleReply("PONG")), SingularEvent(SimpleReply("PONG")))) - // } } } -} - -//Server \ No newline at end of file +} \ No newline at end of file From 30bcd696865d554cadb211de0f9e1ce1d08475db Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 13:17:35 +0100 Subject: [PATCH 46/54] Update README.md --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 82409dc..389d781 100644 --- a/README.md +++ b/README.md @@ -45,12 +45,7 @@ Or by adding the repo: to your SBT configuration and adding Sentinel to your library dependencies (currently only build against Scala 2.11):
libraryDependencies ++= Seq(
-<<<<<<< HEAD
-  "nl.gideondk" %% "sentinel" % "0.7.5.1"
-=======
   "nl.gideondk" %% "sentinel" % "0.8-M1"
->>>>>>> develop
-)
 
## Architecture From c337c4116d5fb78a8d3a802ea00e7dc0e1f390dd Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 13:35:45 +0100 Subject: [PATCH 47/54] Clean-up --- .settings/org.eclipse.core.resources.prefs | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .settings/org.eclipse.core.resources.prefs diff --git a/.settings/org.eclipse.core.resources.prefs b/.settings/org.eclipse.core.resources.prefs deleted file mode 100644 index 99f26c0..0000000 --- a/.settings/org.eclipse.core.resources.prefs +++ /dev/null @@ -1,2 +0,0 @@ -eclipse.preferences.version=1 -encoding/=UTF-8 From 6aeabc9a75fdefffde561ef632e1f4e4f2e06106 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 13:36:09 +0100 Subject: [PATCH 48/54] Update .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7a1c2ba..63d7c8d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,4 @@ language: scala scala: - - 2.10.2 + - 2.11.8 script: "sbt ++$TRAVIS_SCALA_VERSION test" From 7c9d46149fe97aa309e6f361c89b384a48942709 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 13:49:39 +0100 Subject: [PATCH 49/54] Add performance mention to README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 389d781..460fea3 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ In overall, treat Sentinel as alpha software. **(Currently) known issues:** * There is no active (demand) buffering process within the client; when a stream is requested, but not consumed, additional requests on the same socket aren't demanded and therefore not pulled into new requests. - +* No real performance testing has been done yet, so consider things shaky. ## Installation You can install Sentinel through source (by publishing it into your local Ivy repository): @@ -46,6 +46,7 @@ to your SBT configuration and adding Sentinel to your library dependencies (curr
libraryDependencies ++= Seq(
   "nl.gideondk" %% "sentinel" % "0.8-M1"
+)
 
## Architecture From 36dbfcdad66681c1a5967c7c84e0c2088e9fba58 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 14:05:56 +0100 Subject: [PATCH 50/54] Fix merge issues --- project/Build.scala | 12 - src/main/scala/akka/io/Pipelines.scala | 1168 ------------------------ 2 files changed, 1180 deletions(-) delete mode 100644 src/main/scala/akka/io/Pipelines.scala diff --git a/project/Build.scala b/project/Build.scala index c462dab..200282a 100755 --- a/project/Build.scala +++ b/project/Build.scala @@ -6,11 +6,7 @@ object ApplicationBuild extends Build { override lazy val settings = super.settings ++ Seq( name := "sentinel", -<<<<<<< HEAD - version := "0.8-SNAPSHOT", -======= version := "0.8-M1", ->>>>>>> develop organization := "nl.gideondk", scalaVersion := "2.11.8", parallelExecution in Test := false, @@ -29,14 +25,7 @@ object ApplicationBuild extends Build { val akkaVersion = "2.4.11" val appDependencies = Seq( -<<<<<<< HEAD - "org.scalatest" %% "scalatest" % "2.2.0" % "test", - "com.typesafe.play" %% "play-iteratees" % "2.3.1", - - "com.typesafe.akka" %% "akka-actor" % "2.4.6", - "com.typesafe.akka" %% "akka-testkit" % "2.4.6" % "test" -======= "org.scalatest" %% "scalatest" % "3.0.0" % "test", "com.typesafe.akka" %% "akka-stream" % akkaVersion, @@ -46,7 +35,6 @@ object ApplicationBuild extends Build { "com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test", "com.typesafe" % "config" % "1.3.0" ->>>>>>> develop ) lazy val root = Project( diff --git a/src/main/scala/akka/io/Pipelines.scala b/src/main/scala/akka/io/Pipelines.scala deleted file mode 100644 index 54144aa..0000000 --- a/src/main/scala/akka/io/Pipelines.scala +++ /dev/null @@ -1,1168 +0,0 @@ -/** Copyright (C) 2009-2013 Typesafe Inc. - */ - -package akka.io - -import java.lang.{ Iterable ⇒ JIterable } -import scala.annotation.tailrec -import scala.util.{ Try, Success, Failure } -import java.nio.ByteOrder -import akka.util.ByteString -import scala.collection.mutable -import akka.actor.{ NoSerializationVerificationNeeded, ActorContext } -import scala.concurrent.duration.FiniteDuration -import scala.collection.mutable.WrappedArray -import scala.concurrent.duration.Deadline -import scala.beans.BeanProperty -import akka.event.LoggingAdapter - -/** Scala API: A pair of pipes, one for commands and one for events, plus a - * management port. Commands travel from top to bottom, events from bottom to - * top. All messages which need to be handled “in-order” (e.g. top-down or - * bottom-up) need to be either events or commands; management messages are - * processed in no particular order. - * - * Java base classes are provided in the form of [[AbstractPipePair]] - * and [[AbstractSymmetricPipePair]] since the Scala function types can be - * awkward to handle in Java. - * - * @see [[PipelineStage]] - * @see [[AbstractPipePair]] - * @see [[AbstractSymmetricPipePair]] - * @see [[PipePairFactory]] - */ -trait PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - - type Result = Either[EvtAbove, CmdBelow] - type Mgmt = PartialFunction[AnyRef, Iterable[Result]] - - /** The command pipeline transforms injected commands from the upper stage - * into commands for the stage below, but it can also emit events for the - * upper stage. Any number of each can be generated. - */ - def commandPipeline: CmdAbove ⇒ Iterable[Result] - - /** The event pipeline transforms injected event from the lower stage - * into event for the stage above, but it can also emit commands for the - * stage below. Any number of each can be generated. - */ - def eventPipeline: EvtBelow ⇒ Iterable[Result] - - /** The management port allows sending broadcast messages to all stages - * within this pipeline. This can be used to communicate with stages in the - * middle without having to thread those messages through the surrounding - * stages. Each stage can generate events and commands in response to a - * command, and the aggregation of all those is returned. - * - * The default implementation ignores all management commands. - */ - def managementPort: Mgmt = PartialFunction.empty -} - -/** A convenience type for expressing a [[PipePair]] which has the same types - * for commands and events. - */ -trait SymmetricPipePair[Above, Below] extends PipePair[Above, Below, Above, Below] - -/** Java API: A pair of pipes, one for commands and one for events. Commands travel from - * top to bottom, events from bottom to top. - * - * @see [[PipelineStage]] - * @see [[AbstractSymmetricPipePair]] - * @see [[PipePairFactory]] - */ -abstract class AbstractPipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - - /** Commands reaching this pipe pair are transformed into a sequence of - * commands for the next or events for the previous stage. - * - * Throwing exceptions within this method will abort processing of the whole - * pipeline which this pipe pair is part of. - * - * @param cmd the incoming command - * @return an Iterable of elements which are either events or commands - * - * @see [[#makeCommand]] - * @see [[#makeEvent]] - */ - def onCommand(cmd: CmdAbove): JIterable[Either[EvtAbove, CmdBelow]] - - /** Events reaching this pipe pair are transformed into a sequence of - * commands for the next or events for the previous stage. - * - * Throwing exceptions within this method will abort processing of the whole - * pipeline which this pipe pair is part of. - * - * @param cmd the incoming command - * @return an Iterable of elements which are either events or commands - * - * @see [[#makeCommand]] - * @see [[#makeEvent]] - */ - def onEvent(event: EvtBelow): JIterable[Either[EvtAbove, CmdBelow]] - - /** Management commands are sent to all stages in a broadcast fashion, - * conceptually in parallel (but not actually executing a stage - * reentrantly in case of events or commands being generated in response - * to a management command). - */ - def onManagementCommand(cmd: AnyRef): JIterable[Either[EvtAbove, CmdBelow]] = - java.util.Collections.emptyList() - - /** Helper method for wrapping a command which shall be emitted. - */ - def makeCommand(cmd: CmdBelow): Either[EvtAbove, CmdBelow] = Right(cmd) - - /** Helper method for wrapping an event which shall be emitted. - */ - def makeEvent(event: EvtAbove): Either[EvtAbove, CmdBelow] = Left(event) - - /** INTERNAL API: do not touch! - */ - private[io] val _internal$cmd = { - val l = new java.util.ArrayList[AnyRef](1) - l add null - l - } - /** INTERNAL API: do not touch! - */ - private[io] val _internal$evt = { - val l = new java.util.ArrayList[AnyRef](1) - l add null - l - } - - /** Wrap a single command for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Right]] and an [[java.lang.Iterable]] by reusing - * one such instance within the AbstractPipePair, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * final MyResult result = ... ; - * return singleCommand(result); - * }}} - * - * @see PipelineContext#singleCommand - */ - def singleCommand(cmd: CmdBelow): JIterable[Either[EvtAbove, CmdBelow]] = { - _internal$cmd.set(0, cmd.asInstanceOf[AnyRef]) - _internal$cmd.asInstanceOf[JIterable[Either[EvtAbove, CmdBelow]]] - } - - /** Wrap a single event for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Left]] and an [[java.lang.Iterable]] by reusing - * one such instance within the AbstractPipePair, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * final MyResult result = ... ; - * return singleEvent(result); - * }}} - * - * @see PipelineContext#singleEvent - */ - def singleEvent(evt: EvtAbove): JIterable[Either[EvtAbove, CmdBelow]] = { - _internal$evt.set(0, evt.asInstanceOf[AnyRef]) - _internal$evt.asInstanceOf[JIterable[Either[EvtAbove, CmdBelow]]] - } - - /** INTERNAL API: Dealias a possibly optimized return value such that it can - * be safely used; this is never needed when only using public API. - */ - def dealias[Cmd, Evt](msg: JIterable[Either[Evt, Cmd]]): JIterable[Either[Evt, Cmd]] = { - import java.util.Collections.singletonList - if (msg eq _internal$cmd) singletonList(Right(_internal$cmd.get(0).asInstanceOf[Cmd])) - else if (msg eq _internal$evt) singletonList(Left(_internal$evt.get(0).asInstanceOf[Evt])) - else msg - } -} - -/** A convenience type for expressing a [[AbstractPipePair]] which has the same types - * for commands and events. - */ -abstract class AbstractSymmetricPipePair[Above, Below] extends AbstractPipePair[Above, Below, Above, Below] - -/** This class contains static factory methods which produce [[PipePair]] - * instances; those are needed within the implementation of [[PipelineStage#apply]]. - */ -object PipePairFactory { - - /** Scala API: construct a [[PipePair]] from the two given functions; useful for not capturing `$outer` references. - */ - def apply[CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (commandPL: CmdAbove ⇒ Iterable[Either[EvtAbove, CmdBelow]], - eventPL: EvtBelow ⇒ Iterable[Either[EvtAbove, CmdBelow]], - management: PartialFunction[AnyRef, Iterable[Either[EvtAbove, CmdBelow]]] = PartialFunction.empty) = - new PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - override def commandPipeline = commandPL - override def eventPipeline = eventPL - override def managementPort = management - } - - private abstract class Converter[CmdAbove <: AnyRef, CmdBelow <: AnyRef, EvtAbove <: AnyRef, EvtBelow <: AnyRef] // - (val ap: AbstractPipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow], ctx: PipelineContext) { - import scala.collection.JavaConverters._ - protected def normalize(output: JIterable[Either[EvtAbove, CmdBelow]]): Iterable[Either[EvtAbove, CmdBelow]] = - if (output == java.util.Collections.EMPTY_LIST) Nil - else if (output eq ap._internal$cmd) ctx.singleCommand(ap._internal$cmd.get(0).asInstanceOf[CmdBelow]) - else if (output eq ap._internal$evt) ctx.singleEvent(ap._internal$evt.get(0).asInstanceOf[EvtAbove]) - else output.asScala - } - - /** Java API: construct a [[PipePair]] from the given [[AbstractPipePair]]. - */ - def create[CmdAbove <: AnyRef, CmdBelow <: AnyRef, EvtAbove <: AnyRef, EvtBelow <: AnyRef] // - (ctx: PipelineContext, ap: AbstractPipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - new Converter(ap, ctx) with PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - override val commandPipeline = { cmd: CmdAbove ⇒ normalize(ap.onCommand(cmd)) } - override val eventPipeline = { evt: EvtBelow ⇒ normalize(ap.onEvent(evt)) } - override val managementPort: Mgmt = { case x ⇒ normalize(ap.onManagementCommand(x)) } - } - - /** Java API: construct a [[PipePair]] from the given [[AbstractSymmetricPipePair]]. - */ - def create[Above <: AnyRef, Below <: AnyRef] // - (ctx: PipelineContext, ap: AbstractSymmetricPipePair[Above, Below]): SymmetricPipePair[Above, Below] = - new Converter(ap, ctx) with SymmetricPipePair[Above, Below] { - override val commandPipeline = { cmd: Above ⇒ normalize(ap.onCommand(cmd)) } - override val eventPipeline = { evt: Below ⇒ normalize(ap.onEvent(evt)) } - override val managementPort: Mgmt = { case x ⇒ normalize(ap.onManagementCommand(x)) } - } -} - -case class PipelinePorts[CmdAbove, CmdBelow, EvtAbove, EvtBelow]( - commands: CmdAbove ⇒ (Iterable[EvtAbove], Iterable[CmdBelow]), - events: EvtBelow ⇒ (Iterable[EvtAbove], Iterable[CmdBelow]), - management: PartialFunction[AnyRef, (Iterable[EvtAbove], Iterable[CmdBelow])]) - -/** This class contains static factory methods which turn a pipeline context - * and a [[PipelineStage]] into readily usable pipelines. - */ -object PipelineFactory { - - /** Scala API: build the pipeline and return a pair of functions representing - * the command and event pipelines. Each function returns the commands and - * events resulting from running the pipeline on the given input, where the - * the sequence of events is the first element of the returned pair and the - * sequence of commands the second element. - * - * Exceptions thrown by the pipeline stages will not be caught. - * - * @param ctx The context object for this pipeline - * @param stage The (composite) pipeline stage from whcih to build the pipeline - * @return a pair of command and event pipeline functions - */ - def buildFunctionTriple[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (ctx: Ctx, stage: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipelinePorts[CmdAbove, CmdBelow, EvtAbove, EvtBelow] = { - val pp = stage apply ctx - val split: (Iterable[Either[EvtAbove, CmdBelow]]) ⇒ (Iterable[EvtAbove], Iterable[CmdBelow]) = { in ⇒ - if (in.isEmpty) (Nil, Nil) - else if (in eq ctx.cmd) (Nil, Seq[CmdBelow](ctx.cmd(0))) - else if (in eq ctx.evt) (Seq[EvtAbove](ctx.evt(0)), Nil) - else { - val cmds = Vector.newBuilder[CmdBelow] - val evts = Vector.newBuilder[EvtAbove] - in foreach { - case Right(cmd) ⇒ cmds += cmd - case Left(evt) ⇒ evts += evt - } - (evts.result, cmds.result) - } - } - PipelinePorts(pp.commandPipeline andThen split, pp.eventPipeline andThen split, pp.managementPort andThen split) - } - - /** Scala API: build the pipeline attaching the given command and event sinks - * to its outputs. Exceptions thrown within the pipeline stages will abort - * processing (i.e. will not be processed in following stages) but will be - * caught and passed as [[scala.util.Failure]] into the respective sink. - * - * Exceptions thrown while processing management commands are not caught. - * - * @param ctx The context object for this pipeline - * @param stage The (composite) pipeline stage from whcih to build the pipeline - * @param commandSink The function to invoke for commands or command failures - * @param eventSink The function to invoke for events or event failures - * @return a handle for injecting events or commands into the pipeline - */ - def buildWithSinkFunctions[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (ctx: Ctx, - stage: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow])( - commandSink: Try[CmdBelow] ⇒ Unit, - eventSink: Try[EvtAbove] ⇒ Unit): PipelineInjector[CmdAbove, EvtBelow] = - new PipelineInjector[CmdAbove, EvtBelow] { - val pl = stage(ctx) - override def injectCommand(cmd: CmdAbove): Unit = { - Try(pl.commandPipeline(cmd)) match { - case f: Failure[_] ⇒ commandSink(f.asInstanceOf[Try[CmdBelow]]) - case Success(out) ⇒ - if (out.isEmpty) () // nothing - else if (out eq ctx.cmd) commandSink(Success(ctx.cmd(0))) - else if (out eq ctx.evt) eventSink(Success(ctx.evt(0))) - else out foreach { - case Right(cmd) ⇒ commandSink(Success(cmd)) - case Left(evt) ⇒ eventSink(Success(evt)) - } - } - } - override def injectEvent(evt: EvtBelow): Unit = { - Try(pl.eventPipeline(evt)) match { - case f: Failure[_] ⇒ eventSink(f.asInstanceOf[Try[EvtAbove]]) - case Success(out) ⇒ - if (out.isEmpty) () // nothing - else if (out eq ctx.cmd) commandSink(Success(ctx.cmd(0))) - else if (out eq ctx.evt) eventSink(Success(ctx.evt(0))) - else out foreach { - case Right(cmd) ⇒ commandSink(Success(cmd)) - case Left(evt) ⇒ eventSink(Success(evt)) - } - } - } - override def managementCommand(cmd: AnyRef): Unit = { - val out = pl.managementPort(cmd) - if (out.isEmpty) () // nothing - else if (out eq ctx.cmd) commandSink(Success(ctx.cmd(0))) - else if (out eq ctx.evt) eventSink(Success(ctx.evt(0))) - else out foreach { - case Right(cmd) ⇒ commandSink(Success(cmd)) - case Left(evt) ⇒ eventSink(Success(evt)) - } - } - } - - /** Java API: build the pipeline attaching the given callback object to its - * outputs. Exceptions thrown within the pipeline stages will abort - * processing (i.e. will not be processed in following stages) but will be - * caught and passed as [[scala.util.Failure]] into the respective sink. - * - * Exceptions thrown while processing management commands are not caught. - * - * @param ctx The context object for this pipeline - * @param stage The (composite) pipeline stage from whcih to build the pipeline - * @param callback The [[PipelineSink]] to attach to the built pipeline - * @return a handle for injecting events or commands into the pipeline - */ - def buildWithSink[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (ctx: Ctx, - stage: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow], - callback: PipelineSink[CmdBelow, EvtAbove]): PipelineInjector[CmdAbove, EvtBelow] = - buildWithSinkFunctions[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow](ctx, stage)({ - case Failure(thr) ⇒ callback.onCommandFailure(thr) - case Success(cmd) ⇒ callback.onCommand(cmd) - }, { - case Failure(thr) ⇒ callback.onEventFailure(thr) - case Success(evt) ⇒ callback.onEvent(evt) - }) -} - -/** A handle for injecting commands and events into a pipeline. Commands travel - * down (or to the right) through the stages, events travel in the opposite - * direction. - * - * @see [[PipelineFactory#buildWithSinkFunctions]] - * @see [[PipelineFactory#buildWithSink]] - */ -trait PipelineInjector[Cmd, Evt] { - - /** Inject the given command into the connected pipeline. - */ - @throws(classOf[Exception]) - def injectCommand(cmd: Cmd): Unit - - /** Inject the given event into the connected pipeline. - */ - @throws(classOf[Exception]) - def injectEvent(event: Evt): Unit - - /** Send a management command to all stages (in an unspecified order). - */ - @throws(classOf[Exception]) - def managementCommand(cmd: AnyRef): Unit -} - -/** A sink which can be attached by [[PipelineFactory#buildWithSink]] to a - * pipeline when it is being built. The methods are called when commands, - * events or their failures occur during evaluation of the pipeline (i.e. - * when injection is triggered using the associated [[PipelineInjector]]). - */ -abstract class PipelineSink[Cmd, Evt] { - - /** This callback is invoked for every command generated by the pipeline. - * - * By default this does nothing. - */ - @throws(classOf[Throwable]) - def onCommand(cmd: Cmd): Unit = () - - /** This callback is invoked if an exception occurred while processing an - * injected command. If this callback is invoked that no other callbacks will - * be invoked for the same injection. - * - * By default this will just throw the exception. - */ - @throws(classOf[Throwable]) - def onCommandFailure(thr: Throwable): Unit = throw thr - - /** This callback is invoked for every event generated by the pipeline. - * - * By default this does nothing. - */ - @throws(classOf[Throwable]) - def onEvent(event: Evt): Unit = () - - /** This callback is invoked if an exception occurred while processing an - * injected event. If this callback is invoked that no other callbacks will - * be invoked for the same injection. - * - * By default this will just throw the exception. - */ - @throws(classOf[Throwable]) - def onEventFailure(thr: Throwable): Unit = throw thr -} - -/** This base trait of each pipeline’s context provides optimized facilities - * for generating single commands or events (i.e. the fast common case of 1:1 - * message transformations). - * - * IMPORTANT NOTICE: - * - * A PipelineContext MUST NOT be shared between multiple pipelines, it contains mutable - * state without synchronization. You have been warned! - * - * @see AbstractPipelineContext see AbstractPipelineContext for a default implementation (Java) - */ -trait PipelineContext { - - /** INTERNAL API: do not touch! - */ - private val cmdHolder = new Array[AnyRef](1) - /** INTERNAL API: do not touch! - */ - private val evtHolder = new Array[AnyRef](1) - /** INTERNAL API: do not touch! - */ - private[io] val cmd = WrappedArray.make(cmdHolder) - /** INTERNAL API: do not touch! - */ - private[io] val evt = WrappedArray.make(evtHolder) - - /** Scala API: Wrap a single command for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Right]] and an [[scala.collection.Iterable]] by reusing - * one such instance within the PipelineContext, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * override val commandPipeline = { cmd => - * val myResult = ... - * ctx.singleCommand(myResult) - * } - * }}} - * - * @see AbstractPipePair#singleCommand see AbstractPipePair for the Java API - */ - def singleCommand[Cmd <: AnyRef, Evt <: AnyRef](cmd: Cmd): Iterable[Either[Evt, Cmd]] = { - cmdHolder(0) = cmd - this.cmd - } - - /** Scala API: Wrap a single event for efficient return to the pipeline’s machinery. - * This method avoids allocating a [[scala.util.Left]] and an [[scala.collection.Iterable]] by reusing - * one such instance within the context, hence it can be used ONLY ONCE by - * each pipeline stage. Prototypic and safe usage looks like this: - * - * {{{ - * override val eventPipeline = { cmd => - * val myResult = ... - * ctx.singleEvent(myResult) - * } - * }}} - * - * @see AbstractPipePair#singleEvent see AbstractPipePair for the Java API - */ - def singleEvent[Cmd <: AnyRef, Evt <: AnyRef](evt: Evt): Iterable[Either[Evt, Cmd]] = { - evtHolder(0) = evt - this.evt - } - - /** A shared (and shareable) instance of an empty `Iterable[Either[EvtAbove, CmdBelow]]`. - * Use this when processing does not yield any commands or events as result. - */ - def nothing[Cmd, Evt]: Iterable[Either[Evt, Cmd]] = Nil - - /** INTERNAL API: Dealias a possibly optimized return value such that it can - * be safely used; this is never needed when only using public API. - */ - def dealias[Cmd, Evt](msg: Iterable[Either[Evt, Cmd]]): Iterable[Either[Evt, Cmd]] = { - if (msg.isEmpty) Nil - else if (msg eq cmd) Seq(Right(cmd(0))) - else if (msg eq evt) Seq(Left(evt(0))) - else msg - } -} - -/** This base trait of each pipeline’s context provides optimized facilities - * for generating single commands or events (i.e. the fast common case of 1:1 - * message transformations). - * - * IMPORTANT NOTICE: - * - * A PipelineContext MUST NOT be shared between multiple pipelines, it contains mutable - * state without synchronization. You have been warned! - */ -abstract class AbstractPipelineContext extends PipelineContext - -object PipelineStage { - - /** Java API: attach the two given stages such that the command output of the - * first is fed into the command input of the second, and the event output of - * the second is fed into the event input of the first. In other words: - * sequence the stages such that the left one is on top of the right one. - * - * @param left the left or upper pipeline stage - * @param right the right or lower pipeline stage - * @return a pipeline stage representing the sequence of the two stages - */ - def sequence[Ctx <: PipelineContext, CmdAbove, CmdBelow, CmdBelowBelow, EvtAbove, EvtBelow, EvtBelowBelow] // - (left: PipelineStage[_ >: Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow], - right: PipelineStage[_ >: Ctx, CmdBelow, CmdBelowBelow, EvtBelow, EvtBelowBelow]) // - : PipelineStage[Ctx, CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] = - left >> right - - /** Java API: combine the two stages such that the command pipeline of the - * left stage is used and the event pipeline of the right, discarding the - * other two sub-pipelines. - * - * @param left the command pipeline - * @param right the event pipeline - * @return a pipeline stage using the left command pipeline and the right event pipeline - */ - def combine[Ctx <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] // - (left: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow], - right: PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipelineStage[Ctx, CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - left | right -} - -/** A [[PipelineStage]] which is symmetric in command and event types, i.e. it only - * has one command and event type above and one below. - */ -abstract class SymmetricPipelineStage[Context <: PipelineContext, Above, Below] extends PipelineStage[Context, Above, Below, Above, Below] - -/** A pipeline stage which can be combined with other stages to build a - * protocol stack. The main function of this class is to serve as a factory - * for the actual [[PipePair]] generated by the [[#apply]] method so that a - * context object can be passed in. - * - * @see [[PipelineFactory]] - */ -abstract class PipelineStage[Context <: PipelineContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] { left ⇒ - - /** Implement this method to generate this stage’s pair of command and event - * functions. - * - * INTERNAL API: do not use this method to instantiate a pipeline! - * - * @see [[PipelineFactory]] - * @see [[AbstractPipePair]] - * @see [[AbstractSymmetricPipePair]] - */ - protected[io] def apply(ctx: Context): PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] - - /** Scala API: attach the two given stages such that the command output of the - * first is fed into the command input of the second, and the event output of - * the second is fed into the event input of the first. In other words: - * sequence the stages such that the left one is on top of the right one. - * - * @param right the right or lower pipeline stage - * @return a pipeline stage representing the sequence of the two stages - */ - def >>[CmdBelowBelow, EvtBelowBelow, BelowContext <: Context] // - (right: PipelineStage[_ >: BelowContext, CmdBelow, CmdBelowBelow, EvtBelow, EvtBelowBelow]) // - : PipelineStage[BelowContext, CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] = - new PipelineStage[BelowContext, CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] { - - protected[io] override def apply(ctx: BelowContext): PipePair[CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] = { - - val leftPL = left(ctx) - val rightPL = right(ctx) - - new PipePair[CmdAbove, CmdBelowBelow, EvtAbove, EvtBelowBelow] { - - type Output = Either[EvtAbove, CmdBelowBelow] - - import language.implicitConversions - @inline implicit def narrowRight[A, B, C](in: Right[A, B]): Right[C, B] = in.asInstanceOf[Right[C, B]] - @inline implicit def narrowLeft[A, B, C](in: Left[A, B]): Left[A, C] = in.asInstanceOf[Left[A, C]] - - def loopLeft(input: Iterable[Either[EvtAbove, CmdBelow]]): Iterable[Output] = { - if (input.isEmpty) Nil - else if (input eq ctx.cmd) loopRight(rightPL.commandPipeline(ctx.cmd(0))) - else if (input eq ctx.evt) ctx.evt - else { - val output = Vector.newBuilder[Output] - input foreach { - case Right(cmd) ⇒ output ++= ctx.dealias(loopRight(rightPL.commandPipeline(cmd))) - case l @ Left(_) ⇒ output += l - } - output.result - } - } - - def loopRight(input: Iterable[Either[EvtBelow, CmdBelowBelow]]): Iterable[Output] = { - if (input.isEmpty) Nil - else if (input eq ctx.cmd) ctx.cmd - else if (input eq ctx.evt) loopLeft(leftPL.eventPipeline(ctx.evt(0))) - else { - val output = Vector.newBuilder[Output] - input foreach { - case r @ Right(_) ⇒ output += r - case Left(evt) ⇒ output ++= ctx.dealias(loopLeft(leftPL.eventPipeline(evt))) - } - output.result - } - } - - override val commandPipeline = { a: CmdAbove ⇒ loopLeft(leftPL.commandPipeline(a)) } - - override val eventPipeline = { b: EvtBelowBelow ⇒ loopRight(rightPL.eventPipeline(b)) } - - override val managementPort: PartialFunction[AnyRef, Iterable[Either[EvtAbove, CmdBelowBelow]]] = { - case x ⇒ - val output = Vector.newBuilder[Output] - output ++= ctx.dealias(loopLeft(leftPL.managementPort.applyOrElse(x, (_: AnyRef) ⇒ Nil))) - output ++= ctx.dealias(loopRight(rightPL.managementPort.applyOrElse(x, (_: AnyRef) ⇒ Nil))) - output.result - } - } - } - } - - /** Scala API: combine the two stages such that the command pipeline of the - * left stage is used and the event pipeline of the right, discarding the - * other two sub-pipelines. - * - * @param right the event pipeline - * @return a pipeline stage using the left command pipeline and the right event pipeline - */ - def |[RightContext <: Context] // - (right: PipelineStage[_ >: RightContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow]) // - : PipelineStage[RightContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - new PipelineStage[RightContext, CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - override def apply(ctx: RightContext): PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] = - new PipePair[CmdAbove, CmdBelow, EvtAbove, EvtBelow] { - - val leftPL = left(ctx) - val rightPL = right(ctx) - - override val commandPipeline = leftPL.commandPipeline - override val eventPipeline = rightPL.eventPipeline - override val managementPort: Mgmt = { - case x ⇒ - val output = Vector.newBuilder[Either[EvtAbove, CmdBelow]] - output ++= ctx.dealias(leftPL.managementPort(x)) - output ++= ctx.dealias(rightPL.managementPort(x)) - output.result - } - } - } -} - -object BackpressureBuffer { - /** Message type which is sent when the buffer’s high watermark has been - * reached, which means that further write requests should not be sent - * until the low watermark has been reached again. - */ - trait HighWatermarkReached extends Tcp.Event - case object HighWatermarkReached extends HighWatermarkReached - - /** Message type which is sent when the buffer’s fill level falls below - * the low watermark, which means that writing can commence again. - */ - trait LowWatermarkReached extends Tcp.Event - case object LowWatermarkReached extends LowWatermarkReached - -} - -/** This pipeline stage implements a configurable buffer for transforming the - * per-write ACK/NACK-based backpressure model of a TCP connection actor into - * an edge-triggered back-pressure model: the upper stages will receive - * notification when the buffer runs full ([[BackpressureBuffer.HighWatermarkReached]]) and when - * it subsequently empties ([[BackpressureBuffer.LowWatermarkReached]]). The upper layers should - * respond by not generating more writes when the buffer is full. There is also - * a hard limit upon which this buffer will abort the connection. - * - * All limits are configurable and are given in number of bytes. - * The `highWatermark` should be set such that the - * amount of data generated before reception of the asynchronous - * [[BackpressureBuffer.HighWatermarkReached]] notification does not lead to exceeding the - * `maxCapacity` hard limit; if the writes may arrive in bursts then the - * difference between these two should allow for at least one burst to be sent - * after the high watermark has been reached. The `lowWatermark` must be less - * than or equal to the `highWatermark`, where the difference between these two - * defines the hysteresis, i.e. how often these notifications are sent out (i.e. - * if the difference is rather large then it will take some time for the buffer - * to empty below the low watermark, and that room is then available for data - * sent in response to the [[BackpressureBuffer.LowWatermarkReached]] notification; if the - * difference was small then the buffer would more quickly oscillate between - * these two limits). - */ -class BackpressureBuffer(lowBytes: Long, highBytes: Long, maxBytes: Long) - extends PipelineStage[HasLogging, Tcp.Command, Tcp.Command, Tcp.Event, Tcp.Event] { - - require(lowBytes >= 0, "lowWatermark needs to be non-negative") - require(highBytes >= lowBytes, "highWatermark needs to be at least as large as lowWatermark") - require(maxBytes >= highBytes, "maxCapacity needs to be at least as large as highWatermark") - - // WARNING: Closes over enclosing class -- cannot moved outside because of backwards binary compatibility - // Fixed in 2.3 - case class Ack(num: Int, ack: Tcp.Event) extends Tcp.Event with NoSerializationVerificationNeeded - - override def apply(ctx: HasLogging) = new PipePair[Tcp.Command, Tcp.Command, Tcp.Event, Tcp.Event] { - - import Tcp._ - import BackpressureBuffer._ - - private val log = ctx.getLogger - - private var storageOffset = 0 - private var storage = Vector.empty[Write] - private def currentOffset = storageOffset + storage.size - - private var stored = 0L - private var suspended = false - - private var behavior = writing - override def commandPipeline = behavior - override def eventPipeline = behavior - - private def become(f: Message ⇒ Iterable[Result]) { behavior = f } - - private lazy val writing: Message ⇒ Iterable[Result] = { - case Write(data, ack) ⇒ - buffer(Write(data, Ack(currentOffset, ack)), doWrite = true) - - case CommandFailed(Write(_, Ack(offset, _))) ⇒ - become(buffering(offset)) - ctx.singleCommand(ResumeWriting) - - case cmd: CloseCommand ⇒ cmd match { - case _ if storage.isEmpty ⇒ - become(finished) - ctx.singleCommand(cmd) - case Abort ⇒ - storage = Vector.empty - become(finished) - ctx.singleCommand(Abort) - case _ ⇒ - become(closing(cmd)) - ctx.nothing - } - - case Ack(seq, ack) ⇒ acknowledge(seq, ack) - - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - - private def buffering(nack: Int): Message ⇒ Iterable[Result] = { - var toAck = 10 - var closed: CloseCommand = null - - { - case Write(data, ack) ⇒ - buffer(Write(data, Ack(currentOffset, ack)), doWrite = false) - - case WritingResumed ⇒ - ctx.singleCommand(storage(0)) - - case cmd: CloseCommand ⇒ cmd match { - case Abort ⇒ - storage = Vector.empty - become(finished) - ctx.singleCommand(Abort) - case _ ⇒ - closed = cmd - ctx.nothing - } - - case Ack(seq, ack) if seq < nack ⇒ acknowledge(seq, ack) - - case Ack(seq, ack) ⇒ - val ackMsg = acknowledge(seq, ack) - if (storage.nonEmpty) { - if (toAck > 0) { - toAck -= 1 - ctx.dealias(ackMsg) ++ Seq(Right(storage(0))) - } else { - become(if (closed != null) closing(closed) else writing) - ctx.dealias(ackMsg) ++ storage.map(Right(_)) - } - } else if (closed != null) { - become(finished) - ctx.dealias(ackMsg) ++ Seq(Right(closed)) - } else { - become(writing) - ackMsg - } - - case CommandFailed(_: Write) ⇒ ctx.nothing - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - } - - private def closing(cmd: CloseCommand): Message ⇒ Iterable[Result] = { - case Ack(seq, ack) ⇒ - val result = acknowledge(seq, ack) - if (storage.isEmpty) { - become(finished) - ctx.dealias(result) ++ Seq(Right(cmd)) - } else result - - case CommandFailed(_: Write) ⇒ - become({ - case WritingResumed ⇒ - become(closing(cmd)) - storage.map(Right(_)) - case CommandFailed(_: Write) ⇒ ctx.nothing - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - }) - ctx.singleCommand(ResumeWriting) - - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - - private val finished: Message ⇒ Iterable[Result] = { - case _: Write ⇒ ctx.nothing - case CommandFailed(_: Write) ⇒ ctx.nothing - case cmd: Command ⇒ ctx.singleCommand(cmd) - case evt: Event ⇒ ctx.singleEvent(evt) - } - - private def buffer(w: Write, doWrite: Boolean): Iterable[Result] = { - storage :+= w - stored += w.data.size - - if (stored > maxBytes) { - log.warning("aborting connection (buffer overrun)") - become(finished) - ctx.singleCommand(Abort) - } else if (stored > highBytes && !suspended) { - log.debug("suspending writes") - suspended = true - if (doWrite) { - Seq(Right(w), Left(HighWatermarkReached)) - } else { - ctx.singleEvent(HighWatermarkReached) - } - } else if (doWrite) { - ctx.singleCommand(w) - } else Nil - } - - private def acknowledge(seq: Int, ack: Event): Iterable[Result] = { - require(seq == storageOffset, s"received ack $seq at $storageOffset") - require(storage.nonEmpty, s"storage was empty at ack $seq") - - val size = storage(0).data.size - stored -= size - - storageOffset += 1 - storage = storage drop 1 - - if (suspended && stored < lowBytes) { - log.debug("resuming writes") - suspended = false - if (ack == NoAck) ctx.singleEvent(LowWatermarkReached) - else Vector(Left(ack), Left(LowWatermarkReached)) - } else if (ack == NoAck) ctx.nothing - else ctx.singleEvent(ack) - } - } - -} - -//#length-field-frame -/** Pipeline stage for length-field encoded framing. It will prepend a - * four-byte length header to the message; the header contains the length of - * the resulting frame including header in big-endian representation. - * - * The `maxSize` argument is used to protect the communication channel sanity: - * larger frames will not be sent (silently dropped) or received (in which case - * stream decoding would be broken, hence throwing an IllegalArgumentException). - */ -class LengthFieldFrame(maxSize: Int, - byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN, - headerSize: Int = 4, - lengthIncludesHeader: Boolean = true) - extends SymmetricPipelineStage[PipelineContext, ByteString, ByteString] { - - //#range-checks-omitted - require(byteOrder ne null, "byteOrder must not be null") - require(headerSize > 0 && headerSize <= 4, "headerSize must be in (0, 4]") - require(maxSize > 0, "maxSize must be positive") - require(maxSize <= (Int.MaxValue >> (4 - headerSize) * 8) * (if (headerSize == 4) 1 else 2), - "maxSize cannot exceed 256**headerSize") - //#range-checks-omitted - - override def apply(ctx: PipelineContext) = - new SymmetricPipePair[ByteString, ByteString] { - var buffer = None: Option[ByteString] - implicit val byteOrder = LengthFieldFrame.this.byteOrder - - /** Extract as many complete frames as possible from the given ByteString - * and return the remainder together with the extracted frames in reverse - * order. - */ - @tailrec - def extractFrames(bs: ByteString, acc: List[ByteString]) // - : (Option[ByteString], Seq[ByteString]) = { - if (bs.isEmpty) { - (None, acc) - } else if (bs.length < headerSize) { - (Some(bs.compact), acc) - } else { - val length = bs.iterator.getLongPart(headerSize).toInt - if (length < 0 || length > maxSize) - throw new IllegalArgumentException( - s"received too large frame of size $length (max = $maxSize)") - val total = if (lengthIncludesHeader) length else length + headerSize - if (bs.length >= total) { - extractFrames(bs drop total, bs.slice(headerSize, total) :: acc) - } else { - (Some(bs.compact), acc) - } - } - } - - /* - * This is how commands (writes) are transformed: calculate length - * including header, write that to a ByteStringBuilder and append the - * payload data. The result is a single command (i.e. `Right(...)`). - */ - override def commandPipeline = - { bs: ByteString ⇒ - val length = if (lengthIncludesHeader) bs.length + headerSize else bs.length - - if (length < 0 || length > maxSize) - throw new IllegalArgumentException( - s"received too large frame of size $length (max = $maxSize)") - - else { - val bb = ByteString.newBuilder - bb.putLongPart(length, headerSize) - bb ++= bs - ctx.singleCommand(bb.result) - } - } - - /* - * This is how events (reads) are transformed: append the received - * ByteString to the buffer (if any) and extract the frames from the - * result. In the end store the new buffer contents and return the - * list of events (i.e. `Left(...)`). - */ - override def eventPipeline = - { bs: ByteString ⇒ - val data = if (buffer.isEmpty) bs else buffer.get ++ bs - val (nb, frames) = extractFrames(data, Nil) - buffer = nb - /* - * please note the specialized (optimized) facility for emitting - * just a single event - */ - frames match { - case Nil ⇒ Nil - case one :: Nil ⇒ ctx.singleEvent(one) - case many ⇒ many reverseMap (Left(_)) - } - } - } -} -//#length-field-frame - -/** Pipeline stage for delimiter byte based framing and de-framing. Useful for string oriented protocol using '\n' - * or 0 as delimiter values. - * - * @param maxSize The maximum size of the frame the pipeline is willing to decode. Not checked for encoding, as the - * sender might decide to pass through multiple chunks in one go (multiple lines in case of a line-based - * protocol) - * @param delimiter The sequence of bytes that will be used as the delimiter for decoding. - * @param includeDelimiter If enabled, the delmiter bytes will be part of the decoded messages. In the case of sends - * the delimiter has to be appended to the end of frames by the user. It is also possible - * to send multiple frames by embedding multiple delimiters in the passed ByteString - */ -class DelimiterFraming(maxSize: Int, delimiter: ByteString = ByteString('\n'), includeDelimiter: Boolean = false) - extends SymmetricPipelineStage[PipelineContext, ByteString, ByteString] { - - require(maxSize > 0, "maxSize must be positive") - require(delimiter.nonEmpty, "delimiter must not be empty") - - override def apply(ctx: PipelineContext) = new SymmetricPipePair[ByteString, ByteString] { - val singleByteDelimiter: Boolean = delimiter.size == 1 - var buffer: ByteString = ByteString.empty - var delimiterFragment: Option[ByteString] = None - val firstByteOfDelimiter = delimiter.head - - @tailrec - private def extractParts(nextChunk: ByteString, acc: List[ByteString]): List[ByteString] = delimiterFragment match { - case Some(fragment) if nextChunk.size < fragment.size && fragment.startsWith(nextChunk) ⇒ - buffer ++= nextChunk - delimiterFragment = Some(fragment.drop(nextChunk.size)) - acc - // We got the missing parts of the delimiter - case Some(fragment) if nextChunk.startsWith(fragment) ⇒ - val decoded = if (includeDelimiter) buffer ++ fragment else buffer.take(buffer.size - delimiter.size + fragment.size) - buffer = ByteString.empty - delimiterFragment = None - extractParts(nextChunk.drop(fragment.size), decoded :: acc) - case _ ⇒ - val matchPosition = nextChunk.indexOf(firstByteOfDelimiter) - if (matchPosition == -1) { - delimiterFragment = None - val minSize = buffer.size + nextChunk.size - if (minSize > maxSize) throw new IllegalArgumentException( - s"Received too large frame of size $minSize (max = $maxSize)") - buffer ++= nextChunk - acc - } else if (matchPosition + delimiter.size > nextChunk.size) { - val delimiterMatchLength = nextChunk.size - matchPosition - if (nextChunk.drop(matchPosition) == delimiter.take(delimiterMatchLength)) { - buffer ++= nextChunk - // we are expecting the other parts of the delimiter - delimiterFragment = Some(delimiter.drop(nextChunk.size - matchPosition)) - acc - } else { - // false positive - delimiterFragment = None - buffer ++= nextChunk.take(matchPosition + 1) - extractParts(nextChunk.drop(matchPosition + 1), acc) - } - } else { - delimiterFragment = None - val missingBytes: Int = if (includeDelimiter) matchPosition + delimiter.size else matchPosition - val expectedSize = buffer.size + missingBytes - if (expectedSize > maxSize) throw new IllegalArgumentException( - s"Received frame already of size $expectedSize (max = $maxSize)") - - if (singleByteDelimiter || nextChunk.slice(matchPosition, matchPosition + delimiter.size) == delimiter) { - val decoded = buffer ++ nextChunk.take(missingBytes) - buffer = ByteString.empty - extractParts(nextChunk.drop(matchPosition + delimiter.size), decoded :: acc) - } else { - buffer ++= nextChunk.take(matchPosition + 1) - extractParts(nextChunk.drop(matchPosition + 1), acc) - } - } - - } - - override val eventPipeline = { - bs: ByteString ⇒ - val parts = extractParts(bs, Nil) - buffer = buffer.compact // TODO: This should be properly benchmarked and memory profiled - parts match { - case Nil ⇒ Nil - case one :: Nil ⇒ ctx.singleEvent(one.compact) - case many ⇒ many reverseMap { frame ⇒ Left(frame.compact) } - } - } - - override val commandPipeline = { - bs: ByteString ⇒ ctx.singleCommand(bs) - } - } -} - -/** Simple convenience pipeline stage for turning Strings into ByteStrings and vice versa. - * - * @param charset The character set to be used for encoding and decoding the raw byte representation of the strings. - */ -class StringByteStringAdapter(charset: String = "utf-8") - extends PipelineStage[PipelineContext, String, ByteString, String, ByteString] { - - override def apply(ctx: PipelineContext) = new PipePair[String, ByteString, String, ByteString] { - - val commandPipeline = (str: String) ⇒ ctx.singleCommand(ByteString(str, charset)) - - val eventPipeline = (bs: ByteString) ⇒ ctx.singleEvent(bs.decodeString(charset)) - } -} - -/** This trait expresses that the pipeline’s context needs to provide a logging - * facility. - */ -trait HasLogging extends PipelineContext { - /** Retrieve the [[akka.event.LoggingAdapter]] for this pipeline’s context. - */ - def getLogger: LoggingAdapter -} - -//#tick-generator -/** This trait expresses that the pipeline’s context needs to live within an - * actor and provide its ActorContext. - */ -trait HasActorContext extends PipelineContext { - /** Retrieve the [[akka.actor.ActorContext]] for this pipeline’s context. - */ - def getContext: ActorContext -} - -object TickGenerator { - /** This message type is used by the TickGenerator to trigger - * the rescheduling of the next Tick. The actor hosting the pipeline - * which includes a TickGenerator must arrange for messages of this - * type to be injected into the management port of the pipeline. - */ - trait Trigger - - /** This message type is emitted by the TickGenerator to the whole - * pipeline, informing all stages about the time at which this Tick - * was emitted (relative to some arbitrary epoch). - */ - case class Tick(@BeanProperty timestamp: FiniteDuration) extends Trigger -} - -/** This pipeline stage does not alter the events or commands - */ -class TickGenerator[Cmd <: AnyRef, Evt <: AnyRef](interval: FiniteDuration) - extends PipelineStage[HasActorContext, Cmd, Cmd, Evt, Evt] { - import TickGenerator._ - - override def apply(ctx: HasActorContext) = - new PipePair[Cmd, Cmd, Evt, Evt] { - - // use unique object to avoid double-activation on actor restart - private val trigger: Trigger = { - val path = ctx.getContext.self.path - - new Trigger { - override def toString = s"Tick[$path]" - } - } - - private def schedule() = - ctx.getContext.system.scheduler.scheduleOnce( - interval, ctx.getContext.self, trigger)(ctx.getContext.dispatcher) - - // automatically activate this generator - schedule() - - override val commandPipeline = (cmd: Cmd) ⇒ ctx.singleCommand(cmd) - - override val eventPipeline = (evt: Evt) ⇒ ctx.singleEvent(evt) - - override val managementPort: Mgmt = { - case `trigger` ⇒ - ctx.getContext.self ! Tick(Deadline.now.time) - schedule() - Nil - } - } -} -//#tick-generator - From a63daa110ea0fe2ad799e179fe1f2ac91b345840 Mon Sep 17 00:00:00 2001 From: Gideon de Kok Date: Fri, 6 Jan 2017 15:25:17 +0100 Subject: [PATCH 51/54] Yikes! --- .../scala/nl/gideondk/sentinel/Antenna.scala | 112 -------- .../scala/nl/gideondk/sentinel/Client.scala | 180 ------------- .../sentinel/processors/Consumer.scala | 244 ------------------ .../sentinel/processors/Producer.scala | 150 ----------- .../nl/gideondk/sentinel/FullDuplexSpec.scala | 73 ------ .../gideondk/sentinel/RequestResponse.scala | 93 ------- .../gideondk/sentinel/ServerRequestSpec.scala | 99 ------- .../nl/gideondk/sentinel/StreamingSpec.scala | 155 ----------- .../nl/gideondk/sentinel/TestHelpers.scala | 43 +-- .../sentinel/protocols/SimpleMessage.scala | 103 -------- 10 files changed, 1 insertion(+), 1251 deletions(-) delete mode 100644 src/main/scala/nl/gideondk/sentinel/Antenna.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/Client.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala delete mode 100644 src/main/scala/nl/gideondk/sentinel/processors/Producer.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/RequestResponse.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala delete mode 100644 src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala diff --git a/src/main/scala/nl/gideondk/sentinel/Antenna.scala b/src/main/scala/nl/gideondk/sentinel/Antenna.scala deleted file mode 100644 index cc6bec6..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Antenna.scala +++ /dev/null @@ -1,112 +0,0 @@ -package nl.gideondk.sentinel - -import akka.actor._ -import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } -import akka.io._ -import nl.gideondk.sentinel.processors._ -import scala.collection.immutable.Queue - -import scala.concurrent.Future - -class Antenna[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true) extends Actor with ActorLogging with Stash { - - import context.dispatcher - - def active(tcpHandler: ActorRef): Receive = { - val consumer = context.actorOf(Props(new Consumer(init)), name = "resolver") - val producer = context.actorOf(Props(new Producer(init)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "producer") - - var commandQueue = Queue.empty[init.Command] - var commandInProcess = false - - context watch tcpHandler - context watch producer - context watch consumer - - def handleTermination: Receive = { - case x: Terminated ⇒ context.stop(self) - } - - def highWaterMark: Receive = handleTermination orElse { - case BackpressureBuffer.LowWatermarkReached ⇒ - unstashAll() - context.unbecome() - case _ ⇒ - stash() - } - - def popCommand() = if (!commandQueue.isEmpty) { - val cmd = commandQueue.head - commandQueue = commandQueue.tail - tcpHandler ! cmd - } else { - commandInProcess = false - } - - def handleCommands: Receive = { - case x: Command.Ask[Cmd, Evt] ⇒ - consumer ! x.registration - - val cmd = init.Command(x.payload) - if (allowPipelining) tcpHandler ! cmd - else if (commandInProcess) { - commandQueue :+= cmd - } else { - commandInProcess = true - tcpHandler ! cmd - } - - case x: Command.AskStream[Cmd, Evt] ⇒ - consumer ! x.registration - - val cmd = init.Command(x.payload) - if (allowPipelining) tcpHandler ! cmd - else if (commandInProcess) { - commandQueue :+= cmd - } else { - commandInProcess = true - tcpHandler ! cmd - } - - case x: Command.SendStream[Cmd, Evt] ⇒ - consumer ! x.registration - producer ! ProducerActionAndData(ProducerAction.ProduceStream[Unit, Cmd](Unit ⇒ Future(x.stream)), ()) - } - - def handleReplies: Receive = { - case x: Reply.Response[Cmd] ⇒ - tcpHandler ! init.Command(x.payload) - - case x: Reply.StreamResponseChunk[Cmd] ⇒ - tcpHandler ! init.Command(x.payload) - } - - handleTermination orElse handleCommands orElse handleReplies orElse { - case x: Registration[Evt, _] ⇒ - consumer ! x - - case init.Event(data) ⇒ { - resolver.process(data) match { - case x: ProducerAction[Evt, Cmd] ⇒ producer ! ProducerActionAndData[Evt, Cmd](x, data) - - case ConsumerAction.ConsumeStreamChunk ⇒ - consumer ! ConsumerActionAndData[Evt](ConsumerAction.ConsumeStreamChunk, data) - - case x: ConsumerAction ⇒ - consumer ! ConsumerActionAndData[Evt](x, data) - if (!allowPipelining) popCommand() - } - - } - - case BackpressureBuffer.HighWatermarkReached ⇒ { - context.become(highWaterMark, false) - } - } - } - - def receive = { - case Management.RegisterTcpHandler(tcpHandler) ⇒ - context.become(active(tcpHandler)) - } -} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/Client.scala b/src/main/scala/nl/gideondk/sentinel/Client.scala deleted file mode 100644 index 5328ee3..0000000 --- a/src/main/scala/nl/gideondk/sentinel/Client.scala +++ /dev/null @@ -1,180 +0,0 @@ -package nl.gideondk.sentinel - -import java.net.InetSocketAddress - -import scala.concurrent._ -import scala.concurrent.duration.{ DurationInt, FiniteDuration } - -import akka.actor._ -import akka.io._ -import akka.io.Tcp._ -import akka.routing._ - -import akka.util.ByteString - -import play.api.libs.iteratee._ - -trait Client[Cmd, Evt] { - import Registration._ - - def actor: ActorRef - - def ?(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = ask(command) - - def ?->>(command: Cmd)(implicit context: ExecutionContext): Future[Enumerator[Evt]] = askStream(command) - - def ?<<-(command: Cmd, source: Enumerator[Cmd])(implicit context: ExecutionContext): Future[Evt] = sendStream(command, source) - - def ?<<-(source: Enumerator[Cmd])(implicit context: ExecutionContext): Future[Evt] = sendStream(source) - - def ask(command: Cmd)(implicit context: ExecutionContext): Future[Evt] = { - val promise = Promise[Evt]() - actor ! Command.Ask(command, ReplyRegistration(promise)) - promise.future - } - - def askStream(command: Cmd)(implicit context: ExecutionContext): Future[Enumerator[Evt]] = { - val promise = Promise[Enumerator[Evt]]() - actor ! Command.AskStream(command, StreamReplyRegistration(promise)) - promise.future - } - - def sendStream(command: Cmd, source: Enumerator[Cmd]): Future[Evt] = - sendStream(Enumerator(command) >>> source) - - def sendStream(source: Enumerator[Cmd]): Future[Evt] = { - val promise = Promise[Evt]() - actor ! Command.SendStream(source, ReplyRegistration(promise)) - promise.future - } -} - -object Client { - case class ConnectToServer(addr: InetSocketAddress) - - def defaultResolver[Cmd, Evt] = new Resolver[Evt, Cmd] { - def process = { - case _ ⇒ ConsumerAction.AcceptSignal - } - } - - def apply[Cmd, Evt](serverHost: String, serverPort: Int, routerConfig: RouterConfig, - description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - val core = system.actorOf(Props(new ClientCore[Cmd, Evt](routerConfig, description, workerReconnectTime, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher"), name = "sentinel-client-" + java.util.UUID.randomUUID.toString) - core ! Client.ConnectToServer(new InetSocketAddress(serverHost, serverPort)) - new Client[Cmd, Evt] { - val actor = core - } - } - - def randomRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RandomPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) - } - - def roundRobinRouting[Cmd, Evt](serverHost: String, serverPort: Int, numberOfConnections: Int, description: String = "Sentinel Client", stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], workerReconnectTime: FiniteDuration = 2 seconds, resolver: Resolver[Evt, Cmd] = Client.defaultResolver[Cmd, Evt], allowPipelining: Boolean = true, lowBytes: Long = 100L, highBytes: Long = 5000L, maxBufferSize: Long = 20000L)(implicit system: ActorSystem) = { - apply(serverHost, serverPort, RoundRobinPool(numberOfConnections), description, stages, workerReconnectTime, resolver, allowPipelining, lowBytes, highBytes, maxBufferSize) - } -} - -class ClientAntennaManager[Cmd, Evt](address: InetSocketAddress, stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true)(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash { - val tcp = akka.io.IO(Tcp)(context.system) - - override def preStart = tcp ! Tcp.Connect(address) - - def connected(antenna: ActorRef): Receive = { - case x: Command[Cmd, Evt] ⇒ - antenna forward x - - case x: Terminated ⇒ - context.stop(self) - - } - - def disconnected: Receive = { - case Connected(remoteAddr, localAddr) ⇒ - val init = TcpPipelineHandler.withLogger(log, - stages >> - new TcpReadWriteAdapter >> - new BackpressureBuffer(lowBytes, highBytes, maxBufferSize)) - - val antenna = context.actorOf(Props(new Antenna(init, resolver, allowPipelining)).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher")) - val handler = context.actorOf(TcpPipelineHandler.props(init, sender, antenna).withDeploy(Deploy.local)) - context watch handler - - sender ! Register(handler) - antenna ! Management.RegisterTcpHandler(handler) - - unstashAll() - context.become(connected(antenna)) - - case CommandFailed(cmd: akka.io.Tcp.Command) ⇒ - context.stop(self) // Bit harsh at the moment, but should trigger reconnect and probably do better next time... - - // case x: nl.gideondk.sentinel.Command[Cmd, Evt] ⇒ - // x.registration.promise.failure(new Exception("Client has not yet been connected to a endpoint")) - - case _ ⇒ stash() - } - - def receive = disconnected -} - -class ClientCore[Cmd, Evt](routerConfig: RouterConfig, description: String, reconnectDuration: FiniteDuration, - stages: ⇒ PipelineStage[PipelineContext, Cmd, ByteString, Evt, ByteString], resolver: Resolver[Evt, Cmd], allowPipelining: Boolean = true, workerDescription: String = "Sentinel Client Worker")(lowBytes: Long, highBytes: Long, maxBufferSize: Long) extends Actor with ActorLogging with Stash { - - import context.dispatcher - - var addresses = List.empty[Tuple2[InetSocketAddress, Option[ActorRef]]] - - private case object InitializeRouter - private case class ReconnectRouter(address: InetSocketAddress) - - var coreRouter: Option[ActorRef] = None - var reconnecting = false - - def antennaManagerProto(address: InetSocketAddress) = - new ClientAntennaManager(address, stages, resolver, allowPipelining)(lowBytes, highBytes, maxBufferSize) - - def routerProto(address: InetSocketAddress) = - context.actorOf(Props(antennaManagerProto(address)).withRouter(routerConfig).withDispatcher("nl.gideondk.sentinel.sentinel-dispatcher")) - - override def preStart = { - self ! InitializeRouter - } - - def receive = { - case x: Client.ConnectToServer ⇒ - log.debug("Connecting to: " + x.addr) - if (!addresses.map(_._1).contains(x)) { - val router = routerProto(x.addr) - context.watch(router) - addresses = addresses ++ List(x.addr -> Some(router)) - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) - reconnecting = false - unstashAll() - } else { - log.debug("Client is already connected to: " + x.addr) - } - - case Terminated(actor) ⇒ - /* If router died, restart after a period of time */ - val terminatedRouter = addresses.find(_._2 == Some(actor)) - terminatedRouter match { - case Some(r) ⇒ - addresses = addresses diff addresses.find(_._2 == Some(actor)).toList - coreRouter = Some(context.system.actorOf(Props.empty.withRouter(RoundRobinGroup(addresses.map(_._2).flatten.map(_.path.toString))))) - log.error("Router for: " + r._1 + " died, restarting in: " + reconnectDuration.toString()) - reconnecting = true - context.system.scheduler.scheduleOnce(reconnectDuration, self, Client.ConnectToServer(r._1)) - case None ⇒ - } - - case x: Command[Cmd, Evt] ⇒ - coreRouter match { - case Some(r) ⇒ if (reconnecting) stash() else r forward x - case None ⇒ x.registration.promise.failure(new Exception("No connection(s) available")) - } - - case _ ⇒ - } -} diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala deleted file mode 100644 index c336996..0000000 --- a/src/main/scala/nl/gideondk/sentinel/processors/Consumer.scala +++ /dev/null @@ -1,244 +0,0 @@ -package nl.gideondk.sentinel.processors - -import scala.collection.immutable.Queue -import scala.concurrent._ -import scala.concurrent.duration.DurationInt - -import akka.actor._ -import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } -import akka.pattern.ask -import akka.util.Timeout - -import play.api.libs.iteratee._ - -import nl.gideondk.sentinel._ - -object Consumer { - - trait StreamConsumerMessage - - case object ReadyForStream extends StreamConsumerMessage - - case object StartingWithStream extends StreamConsumerMessage - - case object AskNextChunk extends StreamConsumerMessage - - case object RegisterStreamConsumer extends StreamConsumerMessage - - case object ReleaseStreamConsumer extends StreamConsumerMessage - - case object TimeoutStreamConsumer extends StreamConsumerMessage - - trait ConsumerData[Evt] - - case class ConsumerException[Evt](cause: Evt) extends Exception { - override def toString() = "ConsumerException(" + cause + ")" - } - - case class DataChunk[Evt](c: Evt) extends ConsumerData[Evt] - - case class StreamChunk[Evt](c: Evt) extends ConsumerData[Evt] - - case class ErrorChunk[Evt](c: Evt) extends ConsumerData[Evt] - - case class EndOfStream[Evt]() extends ConsumerData[Evt] - -} - -class StreamHandler[Cmd, Evt](streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { - import Registration._ - import Consumer._ - import ConsumerAction._ - import context.dispatcher - - context.setReceiveTimeout(streamConsumerTimeout.duration) - - var hook: Option[Promise[ConsumerData[Evt]]] = None - var buffer = Queue[ConsumerData[Evt]]() - - override def postStop() = { - hook.foreach(_.failure(new Exception("Actor quit unexpectedly"))) - } - - def receive: Receive = { - case ReleaseStreamConsumer ⇒ - context.stop(self) - sender ! () - - case AskNextChunk ⇒ - sender ! nextStreamChunk - - case chunk: ConsumerData[Evt] ⇒ - hook match { - case Some(x) ⇒ - x.success(chunk) - hook = None - case None ⇒ - buffer :+= chunk - } - - case ReceiveTimeout ⇒ { - context.stop(self) - } - - } - - def nextStreamChunk = { - buffer.headOption match { - case Some(c) ⇒ - buffer = buffer.tail - Promise[ConsumerData[Evt]]().success(c) - case None ⇒ - val p = Promise[ConsumerData[Evt]]() - hook = Some(p) - p - } - } -} - -class Consumer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], - streamChunkTimeout: Timeout = Timeout(120 seconds), - streamConsumerTimeout: Timeout = Timeout(10 seconds)) extends Actor with ActorLogging { - import Registration._ - import Consumer._ - import ConsumerAction._ - - import context.dispatcher - - implicit val timeout = streamChunkTimeout - - var registrations = Queue[Registration[Evt, _]]() - - var streamBuffer = Queue[ConsumerData[Evt]]() - - var currentRunningStream: Option[ActorRef] = None - - override def postStop() = { - registrations.foreach(_.promise.failure(new Exception("Actor quit unexpectedly"))) - } - - def processAction(data: Evt, action: ConsumerAction) = { - def handleConsumerData(cd: ConsumerData[Evt]) = { - val registration = registrations.head - registrations = registrations.tail - - registration match { - case r: ReplyRegistration[_] ⇒ - r.promise.completeWith(cd match { - case x: DataChunk[Evt] ⇒ - Future.successful(x.c) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - }) - - case r: StreamReplyRegistration[_] ⇒ - r.promise.completeWith(cd match { - case x: DataChunk[Evt] ⇒ - Future.failed(new Exception("Unexpectedly received a normal chunk instead of stream chunk")) - case x: ErrorChunk[Evt] ⇒ - Future.failed(ConsumerException(x.c)) - }) - } - } - - def handleStreamData(cd: ConsumerData[Evt]) = { - currentRunningStream match { - case Some(x) ⇒ - cd match { - case x: EndOfStream[Evt] ⇒ currentRunningStream = None - case _ ⇒ () - } - - x ! cd - - case None ⇒ - registrations.headOption match { - case Some(registration) ⇒ - registration match { - case r: ReplyRegistration[_] ⇒ - throw new Exception("Unexpectedly received a stream chunk instead of normal reply") // TODO: use specific exception classes - case r: StreamReplyRegistration[_] ⇒ { - val streamHandler = context.actorOf(Props(new StreamHandler(streamConsumerTimeout)), name = "streamHandler-" + java.util.UUID.randomUUID.toString) - currentRunningStream = Some(streamHandler) - - val worker = streamHandler - - // TODO: handle stream chunk timeout better - val resource = Enumerator.generateM[Evt] { - (worker ? AskNextChunk).mapTo[Promise[ConsumerData[Evt]]].flatMap(_.future).flatMap { - _ match { - case x: EndOfStream[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future(None)) - case x: StreamChunk[Evt] ⇒ Future(Some(x.c)) - case x: ErrorChunk[Evt] ⇒ (worker ? ReleaseStreamConsumer) flatMap (u ⇒ Future.failed(ConsumerException(x.c))) - } - } - } - - def dequeueStreamBuffer(): Unit = { - streamBuffer.headOption match { - case Some(x) ⇒ - streamBuffer = streamBuffer.tail - x match { - case x: EndOfStream[Evt] ⇒ - worker ! x - case x ⇒ - worker ! x - dequeueStreamBuffer() - } - case None ⇒ () - } - } - - dequeueStreamBuffer() - worker ! cd - - registrations = registrations.tail - r.promise success resource - } - - } - - case None ⇒ - streamBuffer :+= cd - } - } - } - - action match { - case AcceptSignal ⇒ - handleConsumerData(DataChunk(data)) - case AcceptError ⇒ - currentRunningStream match { - case Some(x) ⇒ handleStreamData(ErrorChunk(data)) - case None ⇒ handleConsumerData(ErrorChunk(data)) - } - - case ConsumeStreamChunk ⇒ - handleStreamData(StreamChunk(data)) - case EndStream ⇒ - handleStreamData(EndOfStream[Evt]()) - case ConsumeChunkAndEndStream ⇒ - handleStreamData(StreamChunk(data)) - handleStreamData(EndOfStream[Evt]()) - - case Ignore ⇒ () - } - } - - def handleRegistrations: Receive = { - case rc: ReplyRegistration[Evt] ⇒ - registrations :+= rc - - case rc: StreamReplyRegistration[Evt] ⇒ - registrations :+= rc - - } - - var behavior: Receive = handleRegistrations orElse { - case x: ConsumerActionAndData[Evt] ⇒ - processAction(x.data, x.action) - - } - - def receive = behavior -} \ No newline at end of file diff --git a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala b/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala deleted file mode 100644 index aba73ec..0000000 --- a/src/main/scala/nl/gideondk/sentinel/processors/Producer.scala +++ /dev/null @@ -1,150 +0,0 @@ -package nl.gideondk.sentinel.processors - -import scala.collection.immutable.Queue -import scala.concurrent.{ ExecutionContext, Future, Promise } -import scala.concurrent.duration.DurationInt -import scala.util.{ Failure, Success } - -import akka.actor._ -import akka.io.TcpPipelineHandler.{ Init, WithinActorContext } -import akka.pattern.ask -import akka.util.Timeout - -import play.api.libs.iteratee._ - -import nl.gideondk.sentinel._ - -object Producer { - trait HandleResult - case class HandleAsyncResult[Cmd](response: Cmd) extends HandleResult - case class HandleStreamResult[Cmd](stream: Enumerator[Cmd]) extends HandleResult - - trait StreamProducerMessage - case class StreamProducerChunk[Cmd](c: Cmd) extends StreamProducerMessage - - case object StartStreamHandling extends StreamProducerMessage - case object ReadyForStream extends StreamProducerMessage - case object StreamProducerEnded extends StreamProducerMessage - case object StreamProducerChunkReceived extends StreamProducerMessage - - case object DequeueResponse -} - -class Producer[Cmd, Evt](init: Init[WithinActorContext, Cmd, Evt], streamChunkTimeout: Timeout = Timeout(5 seconds)) extends Actor with ActorLogging with Stash { - import Producer._ - import ProducerAction._ - import context.dispatcher - - var responseQueue = Queue.empty[Promise[HandleResult]] - - def produceAsyncResult(data: Evt, f: Evt ⇒ Future[Cmd]) = { - val worker = self - val promise = Promise[HandleResult]() - responseQueue :+= promise - - for { - response ← f(data) map (result ⇒ HandleAsyncResult(result)) - } yield { - promise.success(response) - worker ! DequeueResponse - } - } - - def produceStreamResult(data: Evt, f: Evt ⇒ Future[Enumerator[Cmd]]) = { - val worker = self - val promise = Promise[HandleResult]() - responseQueue :+= promise - - for { - response ← f(data) map (result ⇒ HandleStreamResult(result)) - } yield { - promise.success(response) - worker ! DequeueResponse - } - } - - val initSignal = produceAsyncResult(_, _) - val initStreamConsumer = produceAsyncResult(_, _) - val initStreamProducer = produceStreamResult(_, _) - - def processAction(data: Evt, action: ProducerAction[Evt, Cmd]) = { - val worker = self - val future = action match { - case x: Signal[Evt, Cmd] ⇒ initSignal(data, x.f) - - case x: ProduceStream[Evt, Cmd] ⇒ initStreamProducer(data, x.f) - - case x: ConsumeStream[Evt, Cmd] ⇒ - val incomingStreamPromise = Promise[Enumerator[Evt]]() - context.parent ! Registration.StreamReplyRegistration(incomingStreamPromise) - incomingStreamPromise.future flatMap ((s) ⇒ initStreamConsumer(data, x.f(_)(s))) - } - - future.onFailure { - case e ⇒ - log.error(e, e.getMessage) - context.stop(self) - } - } - - def handleRequest: Receive = { - case x: ProducerActionAndData[Evt, Cmd] ⇒ - processAction(x.data, x.action) - } - - def handleDequeue: Receive = { - case DequeueResponse ⇒ { - def dequeueAndSend: Unit = { - if (!responseQueue.isEmpty && responseQueue.front.isCompleted) { - // TODO: Should be handled a lot safer! - val promise = responseQueue.head - responseQueue = responseQueue.tail - promise.future.value match { - case Some(Success(v)) ⇒ - self ! v - dequeueAndSend - case Some(Failure(e)) ⇒ // Would normally not occur... - log.error(e, e.getMessage) - context.stop(self) - } - } - - } - dequeueAndSend - } - } - - def handleRequestAndResponse: Receive = handleRequest orElse handleDequeue orElse { - case x: HandleAsyncResult[Cmd] ⇒ context.parent ! Reply.Response(x.response) - case x: HandleStreamResult[Cmd] ⇒ - val worker = self - // TODO: What to do when producing Enumerator times out, send error, close stream and continue producing? - implicit val timeout = streamChunkTimeout - - val consumer = (x.stream |>>> Iteratee.foldM(())((a, b) ⇒ (worker ? StreamProducerChunk(b)).map(x ⇒ ()))).flatMap(x ⇒ (worker ? StreamProducerEnded)) - consumer.onFailure { - case e ⇒ - log.error(e, e.getMessage) - context.stop(self) - } - - context.become(handleRequestAndStreamResponse) - - case x: StreamProducerMessage ⇒ - log.error("Internal leakage in stream: received unexpected stream chunk") - context.stop(self) - } - - def handleRequestAndStreamResponse: Receive = handleRequest orElse { - case StreamProducerChunk(c) ⇒ - sender ! StreamProducerChunkReceived - context.parent ! Reply.StreamResponseChunk(c) - case StreamProducerEnded ⇒ - sender ! StreamProducerChunkReceived - context.become(handleRequestAndResponse) - unstashAll() - case _ ⇒ stash() - } - - def receive = handleRequestAndResponse -} diff --git a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala b/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala deleted file mode 100644 index e044d20..0000000 --- a/src/test/scala/nl/gideondk/sentinel/FullDuplexSpec.scala +++ /dev/null @@ -1,73 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import akka.actor._ -import akka.routing._ - -import scala.concurrent._ -import scala.concurrent.duration._ - -import protocols._ - -class FullDuplexSpec extends WordSpec with ShouldMatchers { - - import SimpleMessage._ - - implicit val duration = Duration(25, SECONDS) - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - s - } - - "A client and a server" should { - "be able to exchange requests simultaneously" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - Thread.sleep(500) - val action = c ? SimpleCommand(PING_PONG, "") - val serverAction = (s ?* SimpleCommand(PING_PONG, "")).map(_.head) - - val responses = Future.sequence(List(action, serverAction)) - - val results = Await.result(responses, 5 seconds) - - results.length should equal(2) - results.distinct.length should equal(1) - } - - "be able to exchange multiple requests simultaneously" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - Thread.sleep(1000) - - val c = client(portNumber) - val secC = client(portNumber) - Thread.sleep(1000) - - val numberOfRequests = 10 - - val actions = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(PING_PONG, ""))) - val secActions = Future.sequence(List.fill(numberOfRequests)(secC ? SimpleCommand(PING_PONG, ""))) - val serverActions = Future.sequence(List.fill(numberOfRequests)((s ?** SimpleCommand(PING_PONG, "")))) - - val combined = Future.sequence(List(actions, serverActions.map(_.flatten), secActions)) - - val aa = Await.result(actions, 5 seconds) - - val results = Await.result(combined, 5 seconds) - - results(0).length should equal(numberOfRequests) - results(2).length should equal(numberOfRequests) - results(1).length should equal(numberOfRequests * 2) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala b/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala deleted file mode 100644 index c807c43..0000000 --- a/src/test/scala/nl/gideondk/sentinel/RequestResponse.scala +++ /dev/null @@ -1,93 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec - -import akka.actor._ -import scala.concurrent.duration._ -import scala.concurrent._ - -import scala.util.Try - -import protocols._ - -class RequestResponseSpec extends WordSpec { - - import SimpleMessage._ - - implicit val duration = Duration(5, SECONDS) - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.roundRobinRouting("localhost", portNumber, 16, "Worker", SimpleMessage.stages, 0.1 seconds, SimpleServerHandler, lowBytes = 1024L, highBytes = 1024 * 1024, maxBufferSize = 1024 * 1024 * 50)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) - s - } - - "A client" should { - "be able to request a response from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val action = c ? SimpleCommand(PING_PONG, "") - val result = Try(Await.result(action, 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to requests multiple requests from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - Thread.sleep(100) - - val numberOfRequests = 1000 - - val action = Future.sequence(List.fill(numberOfRequests)(c ? SimpleCommand(ECHO, LargerPayloadTestHelper.randomBSForSize(1024 * 10)))) - val result = Try(Await.result(action, 5 seconds)) - - result.get.length should equal(numberOfRequests) - result.isSuccess should equal(true) - } - - "be able to receive responses in correct order" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val numberOfRequests = 20 * 1000 - - val items = List.range(0, numberOfRequests).map(_.toString) - val action = Future.sequence(items.map(x ⇒ (c ? SimpleCommand(ECHO, x)))) - val result = Await.result(action, 5 seconds) - - result.map(_.payload) should equal(items) - } - - "should automatically reconnect" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - Thread.sleep(500) - - val action = c ? SimpleCommand(PING_PONG, "") - val result = Try(Await.result(action, 5 seconds)) - - result.isSuccess should equal(true) - - system.stop(s.actor) - Thread.sleep(250) - - val secAction = c ? SimpleCommand(PING_PONG, "") - val ss = server(portNumber) - - Thread.sleep(250) - val endResult = Try(Await.result(secAction, 10 seconds)) - - endResult.isSuccess should equal(true) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala b/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala deleted file mode 100644 index f8db82c..0000000 --- a/src/test/scala/nl/gideondk/sentinel/ServerRequestSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import akka.actor._ -import akka.routing._ -import scala.concurrent._ -import scala.concurrent.duration._ - -import protocols._ -import akka.util.Timeout - -class ServerRequestSpec extends WordSpec { - - import SimpleMessage._ - - implicit val duration = Duration(5, SECONDS) - implicit val timeout = Timeout(Duration(5, SECONDS)) - - val numberOfConnections = 16 - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, numberOfConnections, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) - s - } - - "A server" should { - "be able to send a request to a client" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - Thread.sleep(500) - - val action = (s ? SimpleCommand(PING_PONG, "")) - val result = Await.result(action, 5 seconds) - - result should equal(SimpleReply("PONG")) - } - - "be able to send a request to a all unique connected hosts" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - - val numberOfClients = 5 - List.fill(numberOfClients)(client(portNumber)) - - Thread.sleep(500) - - val action = (s ?* SimpleCommand(PING_PONG, "")) - val result = Await.result(action, 5 seconds) - - result.length should equal(1) - } - - "be able to send a request to a all connected clients" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - - val numberOfClients = 5 - List.fill(numberOfClients)(client(portNumber)) - - Thread.sleep(500) - - val action = (s ?** SimpleCommand(PING_PONG, "")) - val result = Await.result(action, 5 seconds) - - result.length should equal(numberOfClients * numberOfConnections) - } - - "be able to retrieve the correct number of connected sockets" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - - val numberOfClients = 5 - val clients = List.fill(numberOfClients)(client(portNumber)) - - Thread.sleep(500) - - val connectedSockets = Await.result((s connectedSockets), 5 seconds) - connectedSockets should equal(numberOfClients * numberOfConnections) - - val connectedHosts = Await.result((s connectedHosts), 5 seconds) - connectedHosts should equal(1) - - val toBeKilledActors = clients.splitAt(3)._1.map(_.actor) - toBeKilledActors.foreach(x ⇒ x ! PoisonPill) - Thread.sleep(500) - - val stillConnectedSockets = Await.result((s connectedSockets), 5 seconds) - stillConnectedSockets should equal(2 * numberOfConnections) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala b/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala deleted file mode 100644 index 0f08092..0000000 --- a/src/test/scala/nl/gideondk/sentinel/StreamingSpec.scala +++ /dev/null @@ -1,155 +0,0 @@ -package nl.gideondk.sentinel - -import scala.concurrent.ExecutionContext.Implicits.global - -import org.scalatest.WordSpec -import org.scalatest.matchers.ShouldMatchers - -import akka.actor._ -import akka.routing._ -import scala.concurrent.duration._ -import scala.concurrent._ - -import scala.util.Try -import play.api.libs.iteratee._ - -import protocols._ - -class StreamingSpec extends WordSpec { - - import SimpleMessage._ - - implicit val duration = Duration(5, SECONDS) - - def client(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler)(system) - def nonPipelinedClient(portNumber: Int)(implicit system: ActorSystem) = Client.randomRouting("localhost", portNumber, 1, "Worker", SimpleMessage.stages, 0.5 seconds, SimpleServerHandler, false)(system) - - def server(portNumber: Int)(implicit system: ActorSystem) = { - val s = Server(portNumber, SimpleServerHandler, stages = SimpleMessage.stages)(system) - Thread.sleep(100) - s - } - - "A client" should { - "be able to send a stream to a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val chunks = List.fill(count)(SimpleStreamChunk("ABCDEF")) ++ List(SimpleStreamChunk("")) - val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) - - val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) - - val result = Try(Await.result(action, 5 seconds)) - - result.isSuccess should equal(true) - result.get.payload.toInt should equal(localLength) - } - - "be able to receive streams from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val action = c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString) - - val f = action.flatMap(_ |>>> Iteratee.getChunks) - val result = Await.result(f, 5 seconds) - - result.length should equal(count) - } - - "be able to receive multiple streams simultaneously from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val numberOfActions = 8 - val actions = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - - val result = Await.result(actions.map(_.flatten), 5 seconds) - - result.length should equal(count * numberOfActions) - } - - "be able to receive multiple streams and normal commands simultaneously from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val numberOfActions = 8 - - val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, ""))) - - val actions = Future.sequence(List(streamAction, action)) - - val result = Try(Await.result(actions.map(_.flatten), 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to receive multiple streams and normal commands simultaneously from a server in a non-pipelined environment" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = nonPipelinedClient(portNumber) - - val count = 500 - val numberOfActions = 8 - - val streamAction = Future.sequence(List.fill(numberOfActions)((c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks))) - val action = Future.sequence(List.fill(count)(c ? SimpleCommand(PING_PONG, ""))) - - val actions = Future.sequence(List(streamAction, action)) - - val result = Try(Await.result(actions.map(_.flatten), 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to handle slow or idle consumers while retrieving streams from a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val numberOfActions = 8 - - val newAct = for { - takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) |>>> Iteratee.getChunks) - takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x &> Enumeratee.take(1) &> Enumeratee.map(x ⇒ throw new Exception("")) |>>> Iteratee.getChunks).recover { case e ⇒ () } - act ← c ? SimpleCommand(PING_PONG, "") - act ← c ? SimpleCommand(PING_PONG, "") - takSome ← (c ?->> SimpleCommand(GENERATE_NUMBERS, count.toString)).flatMap(x ⇒ x |>>> Iteratee.getChunks) - act ← c ? SimpleCommand(PING_PONG, "") - } yield act - - val result = Try(Await.result(newAct, 5 seconds)) - - result.isSuccess should equal(true) - } - - "be able to receive send streams simultaneously to a server" in new TestKitSpec { - val portNumber = TestHelpers.portNumber.getAndIncrement() - val s = server(portNumber) - val c = client(portNumber) - - val count = 500 - val chunks = List.fill(count)(SimpleStreamChunk("ABCDEF")) ++ List(SimpleStreamChunk("")) - val action = c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)) - - val numberOfActions = 2 - val actions = Future.sequence(List.fill(numberOfActions)(c ?<<- (SimpleCommand(TOTAL_CHUNK_SIZE, ""), Enumerator(chunks: _*)))) - - val localLength = chunks.foldLeft(0)((b, a) ⇒ b + a.payload.length) - val result = Await.result(actions, 5 seconds) - - result.map(_.payload.toInt).sum should equal(localLength * numberOfActions) - } - } -} diff --git a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala index 1b38e6b..5415906 100644 --- a/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala +++ b/src/test/scala/nl/gideondk/sentinel/TestHelpers.scala @@ -1,54 +1,27 @@ package nl.gideondk.sentinel -<<<<<<< HEAD -import org.scalatest.{ Suite, BeforeAndAfterAll, WordSpec } -import org.scalatest.matchers.ShouldMatchers - -import akka.io.SymmetricPipelineStage -import akka.util.ByteString -======= import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicInteger ->>>>>>> develop import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.testkit._ -<<<<<<< HEAD -======= import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.Span import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } ->>>>>>> develop -import scala.concurrent.{ Await, Future } import scala.concurrent.duration.Duration +import scala.concurrent.{ Await, Future } import scala.language.postfixOps abstract class SentinelSpec(_system: ActorSystem) extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures { -<<<<<<< HEAD -abstract class TestKitSpec extends TestKit(ActorSystem(java.util.UUID.randomUUID.toString)) - with Suite - with ShouldMatchers - with BeforeAndAfterAll - with ImplicitSender { - override def afterAll = { - system.shutdown() - } -} - -object TestHelpers { - val portNumber = new AtomicInteger(10500) -} -======= implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(1500, org.scalatest.time.Millis)) override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true implicit val ec = _system.dispatcher val log: LoggingAdapter = Logging(system, this.getClass) ->>>>>>> develop override protected def afterAll(): Unit = { super.afterAll() @@ -63,20 +36,6 @@ object TestHelpers { } } -<<<<<<< HEAD -object LargerPayloadTestHelper { - def randomBSForSize(size: Int) = { - implicit val be = java.nio.ByteOrder.BIG_ENDIAN - val stringB = new StringBuilder(size) - val paddingString = "abcdefghijklmnopqrs" - - while (stringB.length() + paddingString.length() < size) stringB.append(paddingString) - - stringB.toString() - } -} -======= object TestHelpers { val portNumber = new AtomicInteger(10500) } ->>>>>>> develop diff --git a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala deleted file mode 100644 index f041d8c..0000000 --- a/src/test/scala/nl/gideondk/sentinel/protocols/SimpleMessage.scala +++ /dev/null @@ -1,103 +0,0 @@ -package nl.gideondk.sentinel.protocols - -import scala.concurrent._ -import scala.concurrent.ExecutionContext.Implicits.global - -import akka.io._ -import akka.util.{ ByteString, ByteStringBuilder } - -import nl.gideondk.sentinel._ -import play.api.libs.iteratee._ - -trait SimpleMessageFormat { - def payload: String -} - -case class SimpleCommand(cmd: Int, payload: String) extends SimpleMessageFormat // 1 -case class SimpleReply(payload: String) extends SimpleMessageFormat // 2 -case class SimpleStreamChunk(payload: String) extends SimpleMessageFormat // 3 -case class SimpleError(payload: String) extends SimpleMessageFormat // 4 - -class PingPongMessageStage extends SymmetricPipelineStage[PipelineContext, SimpleMessageFormat, ByteString] { - override def apply(ctx: PipelineContext) = new SymmetricPipePair[SimpleMessageFormat, ByteString] { - implicit val byteOrder = java.nio.ByteOrder.BIG_ENDIAN - - override val commandPipeline = { - msg: SimpleMessageFormat ⇒ - { - val bsb = new ByteStringBuilder() - msg match { - case x: SimpleCommand ⇒ - bsb.putByte(1.toByte) - bsb.putInt(x.cmd) - bsb.putBytes(x.payload.getBytes) - case x: SimpleReply ⇒ - bsb.putByte(2.toByte) - bsb.putBytes(x.payload.getBytes) - case x: SimpleStreamChunk ⇒ - bsb.putByte(3.toByte) - bsb.putBytes(x.payload.getBytes) - case x: SimpleError ⇒ - bsb.putByte(4.toByte) - bsb.putBytes(x.payload.getBytes) - case _ ⇒ - } - Seq(Right(bsb.result)) - } - - } - - override val eventPipeline = { - bs: ByteString ⇒ - val iter = bs.iterator - iter.getByte.toInt match { - case 1 ⇒ - Seq(Left(SimpleCommand(iter.getInt, new String(iter.toByteString.toArray)))) - case 2 ⇒ - Seq(Left(SimpleReply(new String(iter.toByteString.toArray)))) - case 3 ⇒ - Seq(Left(SimpleStreamChunk(new String(iter.toByteString.toArray)))) - case 4 ⇒ - Seq(Left(SimpleError(new String(iter.toByteString.toArray)))) - } - - } - } -} - -object SimpleMessage { - val stages = new PingPongMessageStage >> new LengthFieldFrame(1024 * 1024) - - val PING_PONG = 1 - val TOTAL_CHUNK_SIZE = 2 - val GENERATE_NUMBERS = 3 - val CHUNK_LENGTH = 4 - val ECHO = 5 -} - -import SimpleMessage._ -trait DefaultSimpleMessageHandler extends Resolver[SimpleMessageFormat, SimpleMessageFormat] { - def process = { - case SimpleStreamChunk(x) ⇒ if (x.length > 0) ConsumerAction.ConsumeStreamChunk else ConsumerAction.EndStream - case x: SimpleError ⇒ ConsumerAction.AcceptError - case x: SimpleReply ⇒ ConsumerAction.AcceptSignal - } -} - -object SimpleClientHandler extends DefaultSimpleMessageHandler - -object SimpleServerHandler extends DefaultSimpleMessageHandler { - - override def process = super.process orElse { - case SimpleCommand(PING_PONG, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply("PONG")) } - case SimpleCommand(TOTAL_CHUNK_SIZE, payload) ⇒ ProducerAction.ConsumeStream { x: SimpleCommand ⇒ - s: Enumerator[SimpleStreamChunk] ⇒ - s |>>> Iteratee.fold(0) { (b, a) ⇒ b + a.payload.length } map (x ⇒ SimpleReply(x.toString)) - } - case SimpleCommand(GENERATE_NUMBERS, payload) ⇒ ProducerAction.ProduceStream { x: SimpleCommand ⇒ - val count = payload.toInt - Future((Enumerator(List.range(0, count): _*) &> Enumeratee.map(x ⇒ SimpleStreamChunk(x.toString))) >>> Enumerator(SimpleStreamChunk(""))) - } - case SimpleCommand(ECHO, payload) ⇒ ProducerAction.Signal { x: SimpleCommand ⇒ Future(SimpleReply(x.payload)) } - } -} \ No newline at end of file From 090a9c510773e80b22adbc4edf113ee38ea13560 Mon Sep 17 00:00:00 2001 From: James Roper Date: Thu, 15 Jun 2017 14:46:51 +1000 Subject: [PATCH 52/54] Don't force materializer to be an ActorMaterializer --- .../scala/nl/gideondk/sentinel/client/Client.scala | 10 +++++----- .../nl/gideondk/sentinel/client/ClientStage.scala | 2 +- .../scala/nl/gideondk/sentinel/server/Server.scala | 4 ++-- .../nl/gideondk/sentinel/protocol/SimpleMessage.scala | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index f60067e..e9a9a66 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -55,20 +55,20 @@ object Client { def apply[Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, - protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Client[Cmd, Evt] = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) new Client(hosts, ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) } def apply[Cmd, Evt](hosts: List[Host], resolver: Resolver[Evt], shouldReact: Boolean, inputOverflowStrategy: OverflowStrategy, - protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Client[Cmd, Evt] = { + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Client[Cmd, Evt] = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) new Client(Source(hosts.map(HostUp)), ClientConfig.connectionsPerHost, ClientConfig.maxFailuresPerHost, ClientConfig.failureRecoveryPeriod, ClientConfig.inputBufferSize, inputOverflowStrategy, processor, protocol.reversed) } def flow[Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], - shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + shouldReact: Boolean = false, protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) type Context = Promise[Event[Evt]] @@ -100,7 +100,7 @@ object Client { def rawFlow[Context, Cmd, Evt](hosts: Source[HostEvent, NotUsed], resolver: Resolver[Evt], shouldReact: Boolean = false, - protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = { + protocol: BidiFlow[Cmd, ByteString, ByteString, Evt, Any])(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) = { val processor = Processor[Cmd, Evt](resolver, Config.producerParallelism) Flow.fromGraph(GraphDSL.create(hosts) { implicit b ⇒ @@ -129,7 +129,7 @@ object Client { class Client[Cmd, Evt](hosts: Source[HostEvent, NotUsed], connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, inputBufferSize: Int, inputOverflowStrategy: OverflowStrategy, - processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) { + processor: Processor[Cmd, Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: Materializer) { type Context = Promise[Event[Evt]] diff --git a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala index a9499e8..307e023 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/ClientStage.scala @@ -41,7 +41,7 @@ import nl.gideondk.sentinel.client.ClientStage._ class ClientStage[Context, Cmd, Evt](connectionsPerHost: Int, maximumFailuresPerHost: Int, recoveryPeriod: FiniteDuration, finishGracefully: Boolean, processor: Processor[Cmd, Evt], - protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer) + protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: Materializer) extends GraphStage[BidiShape[(Command[Cmd], Context), (Try[Event[Evt]], Context), HostEvent, HostEvent]] { diff --git a/src/main/scala/nl/gideondk/sentinel/server/Server.scala b/src/main/scala/nl/gideondk/sentinel/server/Server.scala index 07b263e..395c258 100644 --- a/src/main/scala/nl/gideondk/sentinel/server/Server.scala +++ b/src/main/scala/nl/gideondk/sentinel/server/Server.scala @@ -2,7 +2,7 @@ package nl.gideondk.sentinel.server import akka.actor.ActorSystem import akka.stream.scaladsl.{ BidiFlow, Flow, GraphDSL, Sink, Source, Tcp } -import akka.stream.{ ActorMaterializer, FlowShape } +import akka.stream.{ Materializer, FlowShape } import akka.util.ByteString import nl.gideondk.sentinel.pipeline.{ Processor, Resolver } @@ -10,7 +10,7 @@ import scala.concurrent.ExecutionContext import scala.util.{ Failure, Success } object Server { - def apply[Cmd, Evt](interface: String, port: Int, resolver: Resolver[Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext): Unit = { + def apply[Cmd, Evt](interface: String, port: Int, resolver: Resolver[Evt], protocol: BidiFlow[ByteString, Evt, Cmd, ByteString, Any])(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Unit = { val handler = Sink.foreach[Tcp.IncomingConnection] { conn ⇒ val processor = Processor[Cmd, Evt](resolver, 1, true) diff --git a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala index 51c8bd8..42b2447 100644 --- a/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala +++ b/src/test/scala/nl/gideondk/sentinel/protocol/SimpleMessage.scala @@ -1,6 +1,6 @@ package nl.gideondk.sentinel.protocol -import akka.stream.{ ActorMaterializer, Materializer } +import akka.stream.Materializer import akka.stream.scaladsl.{ BidiFlow, Framing, Sink, Source } import akka.util.{ ByteString, ByteStringBuilder } import nl.gideondk.sentinel.pipeline.Resolver From 221fae2c3fcaa86931512e007d19f998d820f58b Mon Sep 17 00:00:00 2001 From: James Roper Date: Thu, 15 Jun 2017 15:06:19 +1000 Subject: [PATCH 53/54] Don't load Typesafe config yourself Sentinal shouldn't load Typesafe config itself, since this undermines the ability to configure it per test, or to use a custom load mechanism for Typesafe config. It also shouldn't store the config statically. This converts the config to an Akka extension, and uses the config loaded by Akka, which gives end users the flexbility to configure it however they want, including programatically. By making it an Akka extension, it means the configuration values are only read from the configuration once per actor system (they are effectively cached for each use). --- .../scala/nl/gideondk/sentinel/Config.scala | 19 +++++++++--- .../nl/gideondk/sentinel/client/Client.scala | 29 ++++++++++++++----- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/src/main/scala/nl/gideondk/sentinel/Config.scala b/src/main/scala/nl/gideondk/sentinel/Config.scala index dd9f763..36bf29f 100644 --- a/src/main/scala/nl/gideondk/sentinel/Config.scala +++ b/src/main/scala/nl/gideondk/sentinel/Config.scala @@ -1,9 +1,20 @@ package nl.gideondk.sentinel -import com.typesafe.config.ConfigFactory - -object Config { - private lazy val config = ConfigFactory.load().getConfig("nl.gideondk.sentinel") +import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } +import com.typesafe.config.{ Config ⇒ TypesafeConfig } +class Config(config: TypesafeConfig) extends Extension { val producerParallelism = config.getInt("pipeline.parallelism") } + +object Config extends ExtensionId[Config] with ExtensionIdProvider { + override def lookup = Config + override def createExtension(system: ExtendedActorSystem) = + new Config(system.settings.config.getConfig("nl.gideondk.sentinel")) + override def get(system: ActorSystem): Config = super.get(system) + + private def config(implicit system: ActorSystem) = apply(system) + + def producerParallelism(implicit system: ActorSystem) = config.producerParallelism +} + diff --git a/src/main/scala/nl/gideondk/sentinel/client/Client.scala b/src/main/scala/nl/gideondk/sentinel/client/Client.scala index f60067e..2c3f3ef 100644 --- a/src/main/scala/nl/gideondk/sentinel/client/Client.scala +++ b/src/main/scala/nl/gideondk/sentinel/client/Client.scala @@ -3,10 +3,11 @@ package nl.gideondk.sentinel.client import java.util.concurrent.TimeUnit import akka.NotUsed -import akka.actor.ActorSystem +import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } import akka.stream._ import akka.stream.scaladsl.{ BidiFlow, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } import akka.util.ByteString +import com.typesafe.config.{ Config ⇒ TypesafeConfig } import nl.gideondk.sentinel.Config import nl.gideondk.sentinel.client.Client._ import nl.gideondk.sentinel.client.ClientStage.{ HostEvent, _ } @@ -17,12 +18,7 @@ import scala.concurrent._ import scala.concurrent.duration._ import scala.util.Try -object ClientConfig { - - import com.typesafe.config.ConfigFactory - - private lazy val config = ConfigFactory.load().getConfig("nl.gideondk.sentinel") - +class ClientConfig(config: TypesafeConfig) extends Extension { val connectionsPerHost = config.getInt("client.host.max-connections") val maxFailuresPerHost = config.getInt("client.host.max-failures") val failureRecoveryPeriod = Duration(config.getDuration("client.host.failure-recovery-duration").toNanos, TimeUnit.NANOSECONDS) @@ -34,6 +30,25 @@ object ClientConfig { val inputBufferSize = config.getInt("client.input-buffer-size") } +object ClientConfig extends ExtensionId[ClientConfig] with ExtensionIdProvider { + override def lookup = ClientConfig + override def createExtension(system: ExtendedActorSystem) = + new ClientConfig(system.settings.config.getConfig("nl.gideondk.sentinel")) + override def get(system: ActorSystem): ClientConfig = super.get(system) + + private def clientConfig(implicit system: ActorSystem) = apply(system) + + def connectionsPerHost(implicit system: ActorSystem) = clientConfig.connectionsPerHost + def maxFailuresPerHost(implicit system: ActorSystem) = clientConfig.maxFailuresPerHost + def failureRecoveryPeriod(implicit system: ActorSystem) = clientConfig.failureRecoveryPeriod + + def reconnectDuration(implicit system: ActorSystem) = clientConfig.reconnectDuration + def shouldReconnect(implicit system: ActorSystem) = clientConfig.shouldReconnect + + def clientParallelism(implicit system: ActorSystem) = clientConfig.clientParallelism + def inputBufferSize(implicit system: ActorSystem) = clientConfig.inputBufferSize +} + object Client { private def reconnectLogic[M](builder: GraphDSL.Builder[M], hostEventSource: Source[HostEvent, NotUsed]#Shape, hostEventIn: Inlet[HostEvent], hostEventOut: Outlet[HostEvent])(implicit system: ActorSystem) = { From 0fd128ef399f340662d5ef22065dc3db989f63d1 Mon Sep 17 00:00:00 2001 From: James Roper Date: Thu, 15 Jun 2017 15:11:06 +1000 Subject: [PATCH 54/54] Tell travis to use JDK8 --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 63d7c8d..2ec896d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,6 @@ language: scala scala: - 2.11.8 +jdk: + - oraclejdk8 script: "sbt ++$TRAVIS_SCALA_VERSION test"