From 4b8ba5998cc2ac2e590359e9993b6542d6e5df51 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Sun, 9 Dec 2018 18:01:17 +0000 Subject: [PATCH 01/55] Some refactoring --- .../{PiEventHandler.scala => PiEvents.scala} | 110 +----------------- .../pew/execution/AkkaExecutor.scala | 1 + .../pew/execution/MultiStateExecutor.scala | 1 + .../pew/execution/ProcessExecutor.scala | 1 + .../execution/SingleBlockingExecutor.scala | 2 + .../pew/execution/SingleStateExecutor.scala | 2 + src/com/workflowfm/pew/metrics/Measure.scala | 1 + .../pew/mongodb/MongoExecutor.scala | 1 + .../pew/stateless/StatelessExecutor.scala | 2 +- .../stateless/components/ResultListener.scala | 2 +- .../kafka/MinimalKafkaExecutor.scala | 1 + .../pew/stream/PiEventHandler.scala | 40 +++++++ .../workflowfm/pew/stream/PiObservable.scala | 40 +++++++ .../pew/stream/PromiseHandler.scala | 46 ++++++++ 14 files changed, 139 insertions(+), 111 deletions(-) rename src/com/workflowfm/pew/{PiEventHandler.scala => PiEvents.scala} (58%) create mode 100644 src/com/workflowfm/pew/stream/PiEventHandler.scala create mode 100644 src/com/workflowfm/pew/stream/PiObservable.scala create mode 100644 src/com/workflowfm/pew/stream/PromiseHandler.scala diff --git a/src/com/workflowfm/pew/PiEventHandler.scala b/src/com/workflowfm/pew/PiEvents.scala similarity index 58% rename from src/com/workflowfm/pew/PiEventHandler.scala rename to src/com/workflowfm/pew/PiEvents.scala index 5b2c63e0..cb17f777 100644 --- a/src/com/workflowfm/pew/PiEventHandler.scala +++ b/src/com/workflowfm/pew/PiEvents.scala @@ -1,9 +1,6 @@ package com.workflowfm.pew -import java.text.SimpleDateFormat - -import scala.collection.immutable.Queue -import scala.concurrent.{ Promise, Future, ExecutionContext } +import com.workflowfm.pew._ sealed trait PiEvent[KeyT] { def id:KeyT @@ -118,108 +115,3 @@ object PiEventProcessException { def apply[KeyT]( id: KeyT, ref: Int, ex: Throwable, time: Long=System.currentTimeMillis() ): PiEventProcessException[KeyT] = PiEventProcessException( id, ref, ex.getLocalizedMessage, ex.getStackTrace, time ) } - -// Return true if the handler is done and needs to be unsubscribed. - -trait PiEventHandler[KeyT] extends (PiEvent[KeyT]=>Boolean) { - def name:String - def and(h:PiEventHandler[KeyT]) = MultiPiEventHandler(this,h) -} - -trait PiEventHandlerFactory[T,H <: PiEventHandler[T]] { - def build(id:T):H -} - -class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { - val formatter = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss.SSS") - override def apply(e:PiEvent[T]) = { - val time = formatter.format(e.time) - System.err.println("["+time+"]" + e.asString) - false - } -} - - -class PromiseHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { - val promise = Promise[Any]() - def future = promise.future - - // class PromiseException(message:String) extends Exception(message) - - override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { - case PiEventResult(i,res,_) => promise.success(res); true - case ex: PiExceptionEvent[T] => promise.failure( ex.exception ); true - case _ => false - } else false -} - -class PromiseHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,PromiseHandler[T]] { - def this(name:String) = this { _:T => name } - override def build(id:T) = new PromiseHandler[T](name(id),id) -} - - - -class CounterHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { - private var counter:Int = 0 - def count = counter - val promise = Promise[Int]() - def future = promise.future - - override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { - case PiEventResult(i,res,_) => counter += 1 ; promise.success(counter) ; true - case ex: PiExceptionEvent[T] => counter += 1; promise.success(counter) ; true - case _ => counter += 1 ; false - } else false -} - -class CounterHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,CounterHandler[T]] { - def this(name:String) = this { _:T => name } - override def build(id:T) = new CounterHandler[T](name(id),id) -} - - -case class MultiPiEventHandler[T](handlers:Queue[PiEventHandler[T]]) extends PiEventHandler[T] { - override def name = handlers map (_.name) mkString(",") - override def apply(e:PiEvent[T]) = handlers map (_(e)) forall (_ == true) - override def and(h:PiEventHandler[T]) = MultiPiEventHandler(handlers :+ h) -} - -object MultiPiEventHandler { - def apply[T](handlers:PiEventHandler[T]*):MultiPiEventHandler[T] = MultiPiEventHandler[T](Queue[PiEventHandler[T]]() ++ handlers) -} - - -trait PiObservable[T] { - def subscribe(handler:PiEventHandler[T]):Future[Boolean] - def unsubscribe(handlerName:String):Future[Boolean] -} - -trait SimplePiObservable[T] extends PiObservable[T] { - import collection.mutable.Map - - implicit val executionContext:ExecutionContext - - val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() - - override def subscribe(handler:PiEventHandler[T]):Future[Boolean] = Future { - //System.err.println("Subscribed: " + handler.name) - handlers += (handler.name -> handler) - true - } - - override def unsubscribe(handlerName:String):Future[Boolean] = Future { - handlers.remove(handlerName).isDefined - } - - def publish(evt:PiEvent[T]) = { - handlers.retain((k,v) => !v(evt)) - } -} - -trait DelegatedPiObservable[T] extends PiObservable[T] { - val worker: PiObservable[T] - - override def subscribe( handler: PiEventHandler[T] ): Future[Boolean] = worker.subscribe( handler ) - override def unsubscribe( handlerName: String ): Future[Boolean] = worker.unsubscribe( handlerName ) -} diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 8590a763..9701714d 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -4,6 +4,7 @@ import akka.actor._ import akka.pattern.{ask, pipe} import akka.util.Timeout import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, SimplePiObservable, PiEventHandler } import scala.concurrent._ import scala.concurrent.duration._ diff --git a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala index e069e499..cf01f2f3 100644 --- a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.SimplePiObservable import scala.concurrent._ import scala.util.{Failure, Success} diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index 7c109f0b..01242584 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, PiEventHandler, PiEventHandlerFactory, PromiseHandlerFactory } import scala.concurrent._ import scala.concurrent.duration._ diff --git a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala index 40e5c624..e5fb191e 100644 --- a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala @@ -1,6 +1,8 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.SimplePiObservable + import scala.concurrent._ import scala.concurrent.duration.Duration import scala.annotation.tailrec diff --git a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala index 98b86762..31aa0f45 100644 --- a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala @@ -1,6 +1,8 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.SimplePiObservable + import scala.concurrent._ import scala.concurrent.duration.Duration import scala.annotation.tailrec diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index 37a02283..3da7389a 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.metrics import com.workflowfm.pew._ +import com.workflowfm.pew.stream.PiEventHandler case class ProcessMetrics[KeyT] (piID:KeyT, ref:Int, process:String, start:Long=System.currentTimeMillis(), finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) diff --git a/src/com/workflowfm/pew/mongodb/MongoExecutor.scala b/src/com/workflowfm/pew/mongodb/MongoExecutor.scala index 94b1f1f9..2053e20c 100644 --- a/src/com/workflowfm/pew/mongodb/MongoExecutor.scala +++ b/src/com/workflowfm/pew/mongodb/MongoExecutor.scala @@ -2,6 +2,7 @@ package com.workflowfm.pew.mongodb import com.workflowfm.pew._ import com.workflowfm.pew.execution._ +import com.workflowfm.pew.stream.SimplePiObservable import com.workflowfm.pew.mongodb.bson.PiCodecProvider import scala.concurrent._ diff --git a/src/com/workflowfm/pew/stateless/StatelessExecutor.scala b/src/com/workflowfm/pew/stateless/StatelessExecutor.scala index 186af279..aa2c030f 100644 --- a/src/com/workflowfm/pew/stateless/StatelessExecutor.scala +++ b/src/com/workflowfm/pew/stateless/StatelessExecutor.scala @@ -1,7 +1,7 @@ package com.workflowfm.pew.stateless import akka.Done -import com.workflowfm.pew.PiObservable +import com.workflowfm.pew.stream.PiObservable import com.workflowfm.pew.execution._ import scala.concurrent.duration.Duration diff --git a/src/com/workflowfm/pew/stateless/components/ResultListener.scala b/src/com/workflowfm/pew/stateless/components/ResultListener.scala index efc90607..5c2d4899 100644 --- a/src/com/workflowfm/pew/stateless/components/ResultListener.scala +++ b/src/com/workflowfm/pew/stateless/components/ResultListener.scala @@ -1,7 +1,7 @@ package com.workflowfm.pew.stateless.components import com.workflowfm.pew.stateless.StatelessMessages.PiiLog -import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, SimplePiObservable, PiEventHandler } import org.bson.types.ObjectId import scala.language.implicitConversions diff --git a/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala b/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala index 2f5d55ff..909f356a 100644 --- a/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala +++ b/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala @@ -5,6 +5,7 @@ import akka.actor.ActorSystem import akka.kafka.scaladsl.Consumer.Control import akka.stream.Materializer import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, DelegatedPiObservable } import com.workflowfm.pew.stateless._ import com.workflowfm.pew.stateless.components.ResultListener import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors diff --git a/src/com/workflowfm/pew/stream/PiEventHandler.scala b/src/com/workflowfm/pew/stream/PiEventHandler.scala new file mode 100644 index 00000000..d2b08d0c --- /dev/null +++ b/src/com/workflowfm/pew/stream/PiEventHandler.scala @@ -0,0 +1,40 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.{ PiEvent, PiExceptionEvent, PiEventResult } + +import java.text.SimpleDateFormat + +import scala.collection.immutable.Queue + +// Return true if the handler is done and needs to be unsubscribed. + +trait PiEventHandler[KeyT] extends (PiEvent[KeyT]=>Boolean) { + def name:String + def and(h:PiEventHandler[KeyT]) = MultiPiEventHandler(this,h) +} + +trait PiEventHandlerFactory[T,H <: PiEventHandler[T]] { + def build(id:T):H +} + +class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { + val formatter = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss.SSS") + override def apply(e:PiEvent[T]) = { + val time = formatter.format(e.time) + System.err.println(s"[$time] ${e.asString}") + false + } +} + + +case class MultiPiEventHandler[T](handlers:Queue[PiEventHandler[T]]) extends PiEventHandler[T] { + override def name = handlers map (_.name) mkString(",") + override def apply(e:PiEvent[T]) = handlers map (_(e)) forall (_ == true) + override def and(h:PiEventHandler[T]) = MultiPiEventHandler(handlers :+ h) +} + +object MultiPiEventHandler { + def apply[T](handlers:PiEventHandler[T]*):MultiPiEventHandler[T] = MultiPiEventHandler[T](Queue[PiEventHandler[T]]() ++ handlers) +} + + diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala new file mode 100644 index 00000000..7bd93936 --- /dev/null +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -0,0 +1,40 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.PiEvent + +import scala.concurrent.{ Promise, Future, ExecutionContext } + + +trait PiObservable[T] { + def subscribe(handler:PiEventHandler[T]):Future[Boolean] + def unsubscribe(handlerName:String):Future[Boolean] +} + +trait SimplePiObservable[T] extends PiObservable[T] { + import collection.mutable.Map + + implicit val executionContext:ExecutionContext + + val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() + + override def subscribe(handler:PiEventHandler[T]):Future[Boolean] = Future { + //System.err.println("Subscribed: " + handler.name) + handlers += (handler.name -> handler) + true + } + + override def unsubscribe(handlerName:String):Future[Boolean] = Future { + handlers.remove(handlerName).isDefined + } + + def publish(evt:PiEvent[T]) = { + handlers.retain((k,v) => !v(evt)) + } +} + +trait DelegatedPiObservable[T] extends PiObservable[T] { + val worker: PiObservable[T] + + override def subscribe( handler: PiEventHandler[T] ): Future[Boolean] = worker.subscribe( handler ) + override def unsubscribe( handlerName: String ): Future[Boolean] = worker.unsubscribe( handlerName ) +} diff --git a/src/com/workflowfm/pew/stream/PromiseHandler.scala b/src/com/workflowfm/pew/stream/PromiseHandler.scala new file mode 100644 index 00000000..482b1bb1 --- /dev/null +++ b/src/com/workflowfm/pew/stream/PromiseHandler.scala @@ -0,0 +1,46 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.{ PiEvent, PiExceptionEvent, PiEventResult } + +import scala.concurrent.{ Promise, Future, ExecutionContext } + + +class PromiseHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { + val promise = Promise[Any]() + def future = promise.future + + // class PromiseException(message:String) extends Exception(message) + + override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { + case PiEventResult(i,res,_) => promise.success(res); true + case ex: PiExceptionEvent[T] => promise.failure( ex.exception ); true + case _ => false + } else false +} + +class PromiseHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,PromiseHandler[T]] { + def this(name:String) = this { _:T => name } + override def build(id:T) = new PromiseHandler[T](name(id),id) +} + + + +class CounterHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { + private var counter:Int = 0 + def count = counter + val promise = Promise[Int]() + def future = promise.future + + override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { + case PiEventResult(i,res,_) => counter += 1 ; promise.success(counter) ; true + case ex: PiExceptionEvent[T] => counter += 1; promise.success(counter) ; true + case _ => counter += 1 ; false + } else false +} + +class CounterHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,CounterHandler[T]] { + def this(name:String) = this { _:T => name } + override def build(id:T) = new CounterHandler[T](name(id),id) +} + + From 980c5db79f9485837bb1ae463897b588e5f247be Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Sun, 9 Dec 2018 18:23:53 +0000 Subject: [PATCH 02/55] Generalized `PromiseHandler` Closes #26 No need to change the factories and no need for a `PromiseHandleFactory` --- .../pew/execution/ProcessExecutor.scala | 6 +- .../pew/stream/PromiseHandler.scala | 68 ++++++++++++++----- 2 files changed, 53 insertions(+), 21 deletions(-) diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index 01242584..7d9116fc 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -1,7 +1,7 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ -import com.workflowfm.pew.stream.{ PiObservable, PiEventHandler, PiEventHandlerFactory, PromiseHandlerFactory } +import com.workflowfm.pew.stream.{ PiObservable, PiEventHandler, PiEventHandlerFactory, ResultHandlerFactory } import scala.concurrent._ import scala.concurrent.duration._ @@ -81,7 +81,7 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => * @return A Future with the result of the executed process */ def execute(process:PiProcess,args:Seq[Any]):Future[Any] = - call(process,args,new PromiseHandlerFactory[KeyT]({ id => s"[$id]"})) flatMap (_.future) + call(process,args,new ResultHandlerFactory[KeyT]({ id => s"[$id]"})) flatMap (_.future) } object ProcessExecutor { @@ -120,7 +120,7 @@ trait SimulatorExecutor[KeyT] extends ProcessExecutor[KeyT] { this:PiObservable[ * @return A Future with the result of the executed process */ def simulate(process:PiProcess,args:Seq[Any],timeout:FiniteDuration=10.seconds):Future[Any] = { - val f = call(process,args,new PromiseHandlerFactory[KeyT]({ id => s"[$id]"})) + val f = call(process,args,new ResultHandlerFactory[KeyT]({ id => s"[$id]"})) val handler = Await.result(f, timeout) handler.future } diff --git a/src/com/workflowfm/pew/stream/PromiseHandler.scala b/src/com/workflowfm/pew/stream/PromiseHandler.scala index 482b1bb1..05701ef8 100644 --- a/src/com/workflowfm/pew/stream/PromiseHandler.scala +++ b/src/com/workflowfm/pew/stream/PromiseHandler.scala @@ -1,41 +1,73 @@ package com.workflowfm.pew.stream -import com.workflowfm.pew.{ PiEvent, PiExceptionEvent, PiEventResult } +import com.workflowfm.pew.{ PiEvent, PiException, PiExceptionEvent, PiEventResult } import scala.concurrent.{ Promise, Future, ExecutionContext } -class PromiseHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { - val promise = Promise[Any]() +trait PromiseHandler[T,R] extends PiEventHandler[T] { + val id:T + + protected val promise = Promise[R]() def future = promise.future // class PromiseException(message:String) extends Exception(message) - override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { - case PiEventResult(i,res,_) => promise.success(res); true - case ex: PiExceptionEvent[T] => promise.failure( ex.exception ); true + override def apply(e:PiEvent[T]) = if (e.id == this.id) update(e) match { + case PiEventResult(i,res,_) => promise.success(succeed(res)); true + case ex: PiExceptionEvent[T] => fail(ex.exception) match { + case Left(r) => promise.success(r); true + case Right(x) => promise.failure(x); true + } case _ => false - } else false + } else false + + /** + * This should handle the event (if needed) and return it, potentially updated. + * (Default does nothing.) + */ + def update(event:PiEvent[T]) :PiEvent[T] = event + + /** + * This will be executed when the workflow completes successfully. + * @param result the result of the workflow + * @return an object of type R that will complete the promise + */ + def succeed(result:Any) :R + + /** + * This will be executed when the workflow fails due to an exception. + * @param exception the exception that occurred + * @return either an object to complete the promise successfully or an exception to fail the promise with + */ + def fail(exception:PiException[T]) :Either[R,Exception] } -class PromiseHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,PromiseHandler[T]] { +class ResultHandler[T](override val name:String, override val id:T) extends PromiseHandler[T,Any] { + + override def succeed(result:Any) = result + override def fail(exception:PiException[T]) = Right(exception) +} + +class ResultHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,ResultHandler[T]] { def this(name:String) = this { _:T => name } - override def build(id:T) = new PromiseHandler[T](name(id),id) + override def build(id:T) = new ResultHandler[T](name(id),id) } -class CounterHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { +class CounterHandler[T](override val name:String, override val id:T) extends PromiseHandler[T,Int] { private var counter:Int = 0 def count = counter - val promise = Promise[Int]() - def future = promise.future - - override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { - case PiEventResult(i,res,_) => counter += 1 ; promise.success(counter) ; true - case ex: PiExceptionEvent[T] => counter += 1; promise.success(counter) ; true - case _ => counter += 1 ; false - } else false + + override def update(event:PiEvent[T]) = { + counter += 1 + // TODO add metadata for the counter here, because why not? + event + } + + override def succeed(result:Any) = counter + override def fail(exception:PiException[T]) = Left(counter) } class CounterHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,CounterHandler[T]] { From f4ec574cc56af44ae5770125944eb6d335799ae2 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Sun, 9 Dec 2018 22:08:49 +0000 Subject: [PATCH 03/55] First step There are a few design decisions to be made --- .../pew/execution/ProcessExecutor.scala | 15 +---- .../workflowfm/pew/stream/PiObservable.scala | 8 ++- src/com/workflowfm/pew/stream/PiStream.scala | 56 +++++++++++++++++++ 3 files changed, 63 insertions(+), 16 deletions(-) create mode 100644 src/com/workflowfm/pew/stream/PiStream.scala diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index 7d9116fc..6e034e13 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -85,21 +85,8 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => } object ProcessExecutor { - // def default = SingleBlockingExecutor(Map[String,PiProcess]()) - final case class AlreadyExecutingException(private val cause: Throwable = None.orNull) extends Exception("Unable to execute more than one process at a time", cause) - - /* - final case class UnknownProcessException(val process:String, private val cause: Throwable = None.orNull) - extends Exception("Unknown process: " + process, cause) - final case class AtomicProcessIsCompositeException(val process:String, private val cause: Throwable = None.orNull) - extends Exception("Executor encountered composite process thread: " + process + " (this should never happen!)", cause) - final case class NoResultException(val id:String, private val cause: Throwable = None.orNull) - extends Exception("Failed to get result for: " + id, cause) - final case class NoSuchInstanceException(val id:String, private val cause: Throwable = None.orNull) - extends Exception("Failed to find instance with id: " + id, cause) - */ } trait SimulatorExecutor[KeyT] extends ProcessExecutor[KeyT] { this:PiObservable[KeyT] => @@ -112,7 +99,7 @@ trait SimulatorExecutor[KeyT] extends ProcessExecutor[KeyT] { this:PiObservable[ def simulationReady:Boolean /** - * Executes a process with a PromiseHandler + * Executes a process with a ResultHandler * Same as ProcessExecutor.execute but blocks until call has been initiated. * The simulator needs to ensure this has happened before continuing. * @param process The (atomic or composite) PiProcess to be executed diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala index 7bd93936..c1a4c5c4 100644 --- a/src/com/workflowfm/pew/stream/PiObservable.scala +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -4,13 +4,17 @@ import com.workflowfm.pew.PiEvent import scala.concurrent.{ Promise, Future, ExecutionContext } +trait PiPublisher[T] { + protected def publish(evt:PiEvent[T]):Unit +} trait PiObservable[T] { def subscribe(handler:PiEventHandler[T]):Future[Boolean] def unsubscribe(handlerName:String):Future[Boolean] } -trait SimplePiObservable[T] extends PiObservable[T] { + +trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { import collection.mutable.Map implicit val executionContext:ExecutionContext @@ -27,7 +31,7 @@ trait SimplePiObservable[T] extends PiObservable[T] { handlers.remove(handlerName).isDefined } - def publish(evt:PiEvent[T]) = { + override def publish(evt:PiEvent[T]) = { handlers.retain((k,v) => !v(evt)) } } diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala new file mode 100644 index 00000000..34152f93 --- /dev/null +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -0,0 +1,56 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.{ PiEvent, PiExceptionEvent, PiEventResult } + +import akka.actor._ +import akka.stream._ +import akka.stream.scaladsl._ +import akka.{ NotUsed, Done } + +trait PiSource[T] extends PiPublisher[T] { + implicit val system:ActorSystem + implicit val materializer = ActorMaterializer() + + private val sourceQueue = Source.queue[PiEvent[T]](PiSource.bufferSize, PiSource.overflowStrategy) + + private val ( + queue: SourceQueueWithComplete[PiEvent[T]], + source: Source[PiEvent[T], NotUsed] + ) = { + val (q,s) = sourceQueue.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run() + s.runWith(Sink.ignore) + //s.runForeach(e => println(s">>>>>>>>>> ${e.id} ${e.time - 1542976910000L}")) + (q,s) + } + + protected def publish(evt:PiEvent[T]) = queue.offer(evt) + + def subscribe[R](sink:PiEvent[T]=>R):Unit = { + val x = source + .map(sink(_)) + } + def subscribe(handler:PiEventHandler[T]):Unit = { + // Using a shared kill switch even though we only want to shutdown one stream, because + // the shared switch is available immediately (pre-materialization) so we can use it + // as a sink. + val killSwitch = KillSwitches.shared(s"kill:${handler.name}") + val x = source + .via(killSwitch.flow) + .map { e => println(s">>> ${handler.name}: ${e.id} ${e.time- 1542976910000L}") ; e } + .map(handler(_)) + .runForeach { r => if (r) { + println(s"===KILLING IN THE NAME OF: ${handler.name}") + killSwitch.shutdown() } + } + } +} + +object PiSource { + val bufferSize = 5 + val overflowStrategy = OverflowStrategy.backpressure +} + + +case class PiSink(name:String, switch:KillSwitch) { + def shutdown = switch.shutdown() +} From fc501673ee509bf74000c79761e5ec8a1656101e Mon Sep 17 00:00:00 2001 From: Petros Date: Sun, 9 Dec 2018 23:46:51 +0000 Subject: [PATCH 04/55] Implemented PiSwitch and PiStream See #34 Need to fix and extend testing Need to see if it fits with Kafka and Aurora stuff --- .../pew/execution/AkkaExecutor.scala | 13 ++++----- .../workflowfm/pew/stream/PiObservable.scala | 22 +++++++++------ src/com/workflowfm/pew/stream/PiStream.scala | 28 +++++++++++-------- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 9701714d..ae9bb55d 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -4,7 +4,7 @@ import akka.actor._ import akka.pattern.{ask, pipe} import akka.util.Timeout import com.workflowfm.pew._ -import com.workflowfm.pew.stream.{ PiObservable, SimplePiObservable, PiEventHandler } +import com.workflowfm.pew.stream.{ PiObservable, PiStream, PiEventHandler, PiSwitch } import scala.concurrent._ import scala.concurrent.duration._ @@ -41,9 +41,8 @@ class AkkaExecutor ( override protected def start(id:Int) = execActor ! AkkaExecutor.Start(id) - override def subscribe(handler:PiEventHandler[Int]):Future[Boolean] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[Boolean] + override def subscribe(handler:PiEventHandler[Int]):Future[PiSwitch] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[PiSwitch] - override def unsubscribe(name:String):Future[Boolean] = (execActor ? AkkaExecutor.Unsubscribe(name)).mapTo[Boolean] } object AkkaExecutor { @@ -61,7 +60,6 @@ object AkkaExecutor { case object SimReady case class Subscribe(handler:PiEventHandler[Int]) - case class Unsubscribe(name:String) def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) def execprops(store:PiInstanceStore[Int], processes:PiProcessStore)(implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes)) @@ -71,9 +69,9 @@ class AkkaExecActor( var store:PiInstanceStore[Int], processes:PiProcessStore )( - implicit system: ActorSystem, - override implicit val executionContext: ExecutionContext = ExecutionContext.global -) extends Actor with SimplePiObservable[Int] { + override implicit val system: ActorSystem, + implicit val executionContext: ExecutionContext = ExecutionContext.global +) extends Actor with PiStream[Int] { var ctr:Int = 0 @@ -193,7 +191,6 @@ class AkkaExecActor( case AkkaExecutor.AckCall => Unit case AkkaExecutor.SimReady => sender() ! simulationReady() case AkkaExecutor.Subscribe(h) => subscribe(h) pipeTo sender() - case AkkaExecutor.Unsubscribe(name) => unsubscribe(name) pipeTo sender() case m => System.err.println("!!! Received unknown message: " + m) } diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala index c1a4c5c4..3bb31c43 100644 --- a/src/com/workflowfm/pew/stream/PiObservable.scala +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -8,9 +8,12 @@ trait PiPublisher[T] { protected def publish(evt:PiEvent[T]):Unit } +trait PiSwitch { + def stop:Unit +} + trait PiObservable[T] { - def subscribe(handler:PiEventHandler[T]):Future[Boolean] - def unsubscribe(handlerName:String):Future[Boolean] + def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] } @@ -19,26 +22,29 @@ trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { implicit val executionContext:ExecutionContext - val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() + protected val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() - override def subscribe(handler:PiEventHandler[T]):Future[Boolean] = Future { + override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = Future { //System.err.println("Subscribed: " + handler.name) handlers += (handler.name -> handler) - true + Switch(handler.name) } - override def unsubscribe(handlerName:String):Future[Boolean] = Future { + def unsubscribe(handlerName:String):Future[Boolean] = Future { handlers.remove(handlerName).isDefined } override def publish(evt:PiEvent[T]) = { handlers.retain((k,v) => !v(evt)) } + + case class Switch(name:String) extends PiSwitch { + override def stop:Unit = unsubscribe(name) + } } trait DelegatedPiObservable[T] extends PiObservable[T] { val worker: PiObservable[T] - override def subscribe( handler: PiEventHandler[T] ): Future[Boolean] = worker.subscribe( handler ) - override def unsubscribe( handlerName: String ): Future[Boolean] = worker.unsubscribe( handlerName ) + override def subscribe( handler: PiEventHandler[T] ): Future[PiSwitch] = worker.subscribe( handler ) } diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index 34152f93..cab23290 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -7,7 +7,13 @@ import akka.stream._ import akka.stream.scaladsl._ import akka.{ NotUsed, Done } -trait PiSource[T] extends PiPublisher[T] { +import scala.concurrent.{ Future, ExecutionContext } + +trait PiSource[T] extends PiObservable[T] { + def getSource:Source[PiEvent[T], NotUsed] +} + +trait PiStream[T] extends PiSource[T] with PiPublisher[T] { implicit val system:ActorSystem implicit val materializer = ActorMaterializer() @@ -23,25 +29,25 @@ trait PiSource[T] extends PiPublisher[T] { (q,s) } - protected def publish(evt:PiEvent[T]) = queue.offer(evt) + override def publish(evt:PiEvent[T]) = queue.offer(evt) - def subscribe[R](sink:PiEvent[T]=>R):Unit = { - val x = source - .map(sink(_)) - } - def subscribe(handler:PiEventHandler[T]):Unit = { + override def getSource = source + //def subscribe(sink:PiEvent[T]=>Unit):Future[Done] = source.runForeach(sink(_)) + + override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = { // Using a shared kill switch even though we only want to shutdown one stream, because // the shared switch is available immediately (pre-materialization) so we can use it // as a sink. val killSwitch = KillSwitches.shared(s"kill:${handler.name}") val x = source .via(killSwitch.flow) - .map { e => println(s">>> ${handler.name}: ${e.id} ${e.time- 1542976910000L}") ; e } + //.map { e => println(s">>> ${handler.name}: ${e.id} ${e.time- 1542976910000L}") ; e } .map(handler(_)) .runForeach { r => if (r) { - println(s"===KILLING IN THE NAME OF: ${handler.name}") + //println(s"===KILLING IN THE NAME OF: ${handler.name}") killSwitch.shutdown() } } + Future.successful(PiKillSwitch(killSwitch)) } } @@ -51,6 +57,6 @@ object PiSource { } -case class PiSink(name:String, switch:KillSwitch) { - def shutdown = switch.shutdown() +case class PiKillSwitch(switch:KillSwitch) extends PiSwitch { + override def stop = switch.shutdown() } From 0b9886ab20b1bde82794d05c5b8cb0666a78546e Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Mon, 10 Dec 2018 12:37:25 +0000 Subject: [PATCH 05/55] Extending PiStream functionality --- src/com/workflowfm/pew/stream/PiStream.scala | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index cab23290..b2eac76a 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -10,12 +10,25 @@ import akka.{ NotUsed, Done } import scala.concurrent.{ Future, ExecutionContext } trait PiSource[T] extends PiObservable[T] { + implicit val materializer:Materializer + def getSource:Source[PiEvent[T], NotUsed] + + def subscribeAndForget(f:PiEvent[T]=>Unit):Unit = getSource.runForeach(f) + + def subscribe(f:PiEvent[T]=>Unit):PiSwitch = { + val kill = getSource + .viaMat(KillSwitches.single)(Keep.right) + .to(Sink.foreach(f)) + .run() + PiKillSwitch(kill) + } + } trait PiStream[T] extends PiSource[T] with PiPublisher[T] { implicit val system:ActorSystem - implicit val materializer = ActorMaterializer() + override implicit val materializer = ActorMaterializer() private val sourceQueue = Source.queue[PiEvent[T]](PiSource.bufferSize, PiSource.overflowStrategy) From 1f33ad74deb551b7a9c4556cf1e2aff25a619e5b Mon Sep 17 00:00:00 2001 From: Petros Date: Tue, 11 Dec 2018 23:50:07 +0000 Subject: [PATCH 06/55] Fixed unit tests --- .../pew/execution/AkkaExecutorTests.scala | 17 +++++++++-------- .../execution/SingleStateExecutorTests.scala | 2 +- .../workflowfm/pew/metrics/MetricsTests.scala | 9 +++++---- .../pew/mongodb/MongoExecutorTests.scala | 3 ++- .../pew/stateless/KafkaExecutorTests.scala | 13 +++++++------ 5 files changed, 24 insertions(+), 20 deletions(-) diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index df0f0e53..1b05344f 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -7,6 +7,7 @@ import org.scalatest.junit.JUnitRunner import scala.concurrent._ import scala.concurrent.duration._ import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution._ import RexampleTypes._ @@ -145,21 +146,21 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "execute Rexample with a CounterHandler" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) val factory = new CounterHandlerFactory[Int]("counter") - ex.subscribe(new PrintEventHandler("printer")) + val kill = ex.subscribe(new PrintEventHandler("printer")) val f1 = ex.call(ri,Seq(21),factory) flatMap(_.future) val r1 = await(f1) r1 should be (8) - ex.unsubscribe("printer") + kill.map(_.stop) } it should "allow separate handlers per executor" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) val ex2 = new AkkaExecutor(pai,pbi,pci,ri) val factory = new CounterHandlerFactory[Int]("counter") - ex.subscribe(new PrintEventHandler("printer")) - ex2.subscribe(new PrintEventHandler("printer")) + val k1 = ex.subscribe(new PrintEventHandler("printer")) + val k2 = ex2.subscribe(new PrintEventHandler("printer")) val f1 = ex.call(ri,Seq(99),factory) flatMap(_.future) val f2 = ex2.execute(ri,Seq(11)) @@ -170,8 +171,8 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi val r1 = await(f1) r1 should be (8) - ex.unsubscribe("printer") - ex2.unsubscribe("printer") + k1.map(_.stop) + k2.map(_.stop) } it should "allow separate handlers for separate workflows" in { @@ -194,8 +195,8 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "unsubscribe handlers successfully" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) val factory = new CounterHandlerFactory[Int]("counter" + _) - ex.subscribe(new PrintEventHandler("printerX")) - ex.unsubscribe("printerX") + val kill = ex.subscribe(new PrintEventHandler("printerX")) + kill.map(_.stop) val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) val f2 = ex.call(ri,Seq(11),factory) flatMap(_.future) diff --git a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala index bc042c7a..dba17aca 100644 --- a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala @@ -12,7 +12,7 @@ import com.typesafe.config.ConfigFactory import scala.concurrent.Await import scala.concurrent.duration.Duration import com.workflowfm.pew._ - +import com.workflowfm.pew.stream._ @RunWith(classOf[JUnitRunner]) diff --git a/test/com/workflowfm/pew/metrics/MetricsTests.scala b/test/com/workflowfm/pew/metrics/MetricsTests.scala index abf06953..bbcc86a5 100644 --- a/test/com/workflowfm/pew/metrics/MetricsTests.scala +++ b/test/com/workflowfm/pew/metrics/MetricsTests.scala @@ -10,6 +10,7 @@ import scala.concurrent._ import scala.concurrent.Await import scala.concurrent.duration._ import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution._ import RexampleTypes._ @@ -36,12 +37,12 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr val handler = new MetricsHandler[Int]("metrics") val ex = new AkkaExecutor(pai,pbi,pci,ri) - ex.subscribe(handler) + val k1 = ex.subscribe(handler) val f1 = ex.execute(ri,Seq(11)) await(f1) - ex.unsubscribe("metrics") + k1.map(_.stop) handler.keys.size shouldBe 1 handler.processMetrics.size shouldBe 3 @@ -53,7 +54,7 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr val handler = new MetricsHandler[Int]("metrics") val ex = new AkkaExecutor(pai,pbi,pci,ri) - ex.subscribe(handler) + val k1 = ex.subscribe(handler) val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) @@ -66,7 +67,7 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr val r3 = await(f3) r3 should be (("PbISleptFor1s","PcISleptFor1s")) - ex.unsubscribe("metrics") + k1.map(_.stop) new MetricsPrinter[Int]()(handler) new MetricsD3Timeline[Int]("resources/d3-timeline","Rexample3")(handler) diff --git a/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala b/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala index f40f32a1..e0f2de50 100644 --- a/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala +++ b/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala @@ -2,6 +2,7 @@ package com.workflowfm.pew.mongodb import akka.actor.ActorSystem import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution.RexampleTypes._ import com.workflowfm.pew.execution._ import org.bson.types.ObjectId @@ -115,7 +116,7 @@ class MongoExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll w it should "fail properly when a workflow doesn't exist" in { val ex = new MongoExecutor(client, "pew", "test_exec_insts",pai,pbi,pci,pci2,ri,ri2) val id = new ObjectId() - val handler = new PromiseHandler("unitHandler",id) + val handler = new ResultHandler("unitHandler",id) ex.subscribe(handler) ex.postResult(id, 0, MetadataAtomicProcess.result(PiItem(0))) diff --git a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala index b1a1adca..0c4d10e2 100644 --- a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala +++ b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala @@ -3,7 +3,8 @@ package com.workflowfm.pew.stateless import akka.Done import com.workflowfm.pew.stateless.StatelessMessages.{AnyMsg, _} import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors -import com.workflowfm.pew.{PromiseHandler, _} +import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.stateless.components.{AtomicExecutor, Reducer, ResultListener} import com.workflowfm.pew.stateless.instances.kafka.MinimalKafkaExecutor import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors.{indyReducer, indySequencer, sendMessages} @@ -59,10 +60,10 @@ class KafkaExecutorTests extends PewTestSuite with KafkaTests { val pii = PiInstance( ObjectId.get, pbi, PiObject(1) ) sendMessages( ReduceRequest( pii, Seq() ) ) - val handler = new PromiseHandler( "test", pii.id ) + val handler = new ResultHandler( "test", pii.id ) listener.subscribe( handler ) - await( handler.promise.future ) should be ("PbISleptFor1s") + await( handler.future ) should be ("PbISleptFor1s") await( KafkaConnectors.shutdown( c1, c2, c3, c4 ) ) val msgsOf = new MessageDrain( true ) @@ -77,10 +78,10 @@ class KafkaExecutorTests extends PewTestSuite with KafkaTests { val ex = makeExecutor( completeProcess.settings ) val piiId = await( ex.init( pbi, Seq( PiObject(1) ) ) ) - val handler = new PromiseHandler( "test", piiId ) + val handler = new ResultHandler( "test", piiId ) ex.subscribe( handler ) - val f1 = handler.promise.future + val f1 = handler.future ex.start( piiId ) await( f1 ) should be ("PbISleptFor1s") @@ -300,7 +301,7 @@ class KafkaExecutorTests extends PewTestSuite with KafkaTests { futId.value.get.get } - val handler = new PromiseHandler[ObjectId]( "testhandler", ourPiiId ) + val handler = new ResultHandler[ObjectId]( "testhandler", ourPiiId ) // Dont consume, we need the outstanding messages to resume. val fstMsgs: MessageMap = new MessageDrain( false ) From b69ddc1122a72cf1ab93b55e8b28a229146c98cb Mon Sep 17 00:00:00 2001 From: petrospapapa Date: Wed, 12 Dec 2018 17:41:20 +0000 Subject: [PATCH 07/55] Updated changelog for 1.3.0 --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e23ed29b..45735a19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,19 @@ Includes feature updates, bug fixes, and open issues. * [AkkaExecutor.Call may timeout #4](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/4) - since v0.1 + +## [v1.3.0](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.3.0) - 2018-12-14 + +### Features + +* All `PiEvents` now carry an array of `PiMetadata`. The default value contains the system time of the event. Atomic processes can expose additional metadata for `PiEventReturn` (see also [#21](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/21)). +* Improved `PiEventHandlers`. The `PromiseHandler` is now generalized to return a single object at the end of the workflow. The old `PromiseHandler` is an instance called `ResultHandler` (see also [#26](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/26)). +* Implemented `PiStream` using Akka's `BroadcastHub` to enable more flexible event handling (see also [#34](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/34)). Executors can now be mixed in with (at least) either of the two default observables, namely `SimplePiObservable` and `PiStream`. +* Fixed some codec issues (see also [#31](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/31)). +* The simulator now measures the simulation's real (system) duration. +* Various improvements in Executor unit tests. + + ## [v1.2.2](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.2.2) - 2018-12-03 ### Features From b5a3810fb8fb902c0181526427c66b369ebb4652 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 26 Jun 2019 13:17:28 +0100 Subject: [PATCH 08/55] Adds documentation for metrics WIP --- docs/metrics.org | 38 ++++++++++++++++++++ src/com/workflowfm/pew/metrics/Measure.scala | 30 +++++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 docs/metrics.org diff --git a/docs/metrics.org b/docs/metrics.org new file mode 100644 index 00000000..7ad4aee1 --- /dev/null +++ b/docs/metrics.org @@ -0,0 +1,38 @@ +#+TITLE: Metrics +#+AUTHOR: Petros Papapanagiotou +#+EMAIL: petrospapapan@gmail.com +#+OPTIONS: toc:2 +#+EXCLUDE_TAGS: noexport + +* Introduction + +The PEW engine is able to automatically track metrics during workflow execution. These can be used to monitor the workflows and extract analytics and insights. + +Metrics can be tracked in 2 settings: +1) [[realtime][*Real-time execution*]]: These are metrics from the engine regarding the execution of any workflow. +2) [[simulation][*Simulation*]]: These are metrics from the simulator across the simulated time. + +We describe the general setting and the individual metrics next. + +* Setting +The general idea for PEW, is that the engine will automatically collect all the available metrics at runtime. The user can then implement an output function to generate some analytics from the collected metrics. + +The following key concepts are used by PEW for capturing and managing metrics: + +** `Metrics` +Metrics are captured around individual concepts, such as an atomic process or task, a persistent resource, or a workflow. Each set of metrics is captured in an immutable case class. This includes the different features being measured and the methods to update them based on what is happening in the engine. + +** `Aggregator` +An `Aggregator` is a mutable class that collects all the `Metrics` collected across multiple workflows in one place. It contains methods to update the different metrics, indexed by some id, and based on different events that may take place. + +** `Output` +An `Output` is essentially a function that can generate any outputs from the `Metrics` within an `Aggregator`. Outputs may include analytics, visualizations, reports, or anything else. + +* <>Real-time execution metrics + +Real-time metrics are aimed to be generic and domain-independent. We describe each of the general setting concepts above in this particular context. + +** `Metrics` + +We collect the [[https://github.com/PetrosPapapa/WorkflowFM-PEW/blob/master/src/com/workflowfm/pew/metrics/Measure.scala][following real-time metrics]]: + diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index 1d9288ec..fee5b6a4 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -2,19 +2,47 @@ package com.workflowfm.pew.metrics import com.workflowfm.pew._ +/** Metrics for a particular instance of an atomic process. + * + * @constructor initialize the metrics of a process for a workflow with ID `piID`, unique call reference `ref`, and process name `process`, starting now + * + * @tparam KeyT the type used for workflow IDs + * @param piID the ID of the workflow that executed this process + * @param ref the call reference for that process, i.e. a unique call ID for this particular workflow + * @param process the name of the process + * @param start the system time in milliseconds when the process call started + * @param finish the system time in milliseconds that the process call finished, or [[scala.None]] if it is still running + * @param result a [[String]] representation of the returned result from the process call, or [[scala.None]] if it still running. In case of failure, the field is populated with the `getLocalizedMessage` of the exception thrown + */ case class ProcessMetrics[KeyT] (piID:KeyT, ref:Int, process:String, start:Long=System.currentTimeMillis(), finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) } +/** Metrics for a particular workflow. + * + * @constructor initialize the metrics of a workflow with ID `piID` starting now + * + * @tparam KeyT the type used for workflow IDs + * @param piID the unique ID of the workflow. + * @param start the system time in milliseconds when the workflow started executing + * @param calls the number of calls to atomic processes + * @param finish the system time in milliseconds that this workflow finished, or [[scala.None]] if it is still running + * @param result a [[String]] representation of the returned result from the workflow, or [[scala.None]] if it still running. In case of failure, the field is populated with the `getLocalizedMessage` of the exception thrown + */ case class WorkflowMetrics[KeyT] (piID:KeyT, start:Long=System.currentTimeMillis(), calls:Int=0, finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) def call = copy(calls=calls+1) } +/** Collects/aggregates metrics across multiple process calls and workflow executions + * + */ class MetricsAggregator[KeyT] { import scala.collection.immutable.Map - + + /** Process metrics indexed by workflow ID, then by call reference ID */ val processMap = scala.collection.mutable.Map[KeyT,Map[Int,ProcessMetrics[KeyT]]]() + /** Workflow metrics indexed by workflow ID */ val workflowMap = scala.collection.mutable.Map[KeyT,WorkflowMetrics[KeyT]]() // Set From b473f4d2adf422d076070a6bdb5ee90ab4bd47a0 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 26 Jun 2019 16:36:06 +0100 Subject: [PATCH 09/55] Adds scaladoc documentation for com.workflowfm.pew.metrics --- build.sbt | 2 + src/com/workflowfm/pew/metrics/Measure.scala | 125 +++++++++++++++++-- src/com/workflowfm/pew/metrics/Output.scala | 116 +++++++++++++++-- 3 files changed, 220 insertions(+), 23 deletions(-) diff --git a/build.sbt b/build.sbt index 8dc9f7e5..d086c425 100644 --- a/build.sbt +++ b/build.sbt @@ -8,6 +8,8 @@ lazy val commonSettings = Seq ( scalaVersion := "2.12.6" ) +autoAPIMappings := true + // The dependencies are in Maven format, with % separating the parts. // Notice the extra bit "test" on the end of JUnit and ScalaTest, which will // mean it is only a test dependency. diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index fee5b6a4..183922de 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -5,14 +5,14 @@ import com.workflowfm.pew._ /** Metrics for a particular instance of an atomic process. * * @constructor initialize the metrics of a process for a workflow with ID `piID`, unique call reference `ref`, and process name `process`, starting now - * + * * @tparam KeyT the type used for workflow IDs * @param piID the ID of the workflow that executed this process * @param ref the call reference for that process, i.e. a unique call ID for this particular workflow * @param process the name of the process * @param start the system time in milliseconds when the process call started * @param finish the system time in milliseconds that the process call finished, or [[scala.None]] if it is still running - * @param result a [[String]] representation of the returned result from the process call, or [[scala.None]] if it still running. In case of failure, the field is populated with the `getLocalizedMessage` of the exception thrown + * @param result a `String` representation of the returned result from the process call, or [[scala.None]] if it still running. In case of failure, the field is populated with the localized message of the exception thrown */ case class ProcessMetrics[KeyT] (piID:KeyT, ref:Int, process:String, start:Long=System.currentTimeMillis(), finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) @@ -27,16 +27,14 @@ case class ProcessMetrics[KeyT] (piID:KeyT, ref:Int, process:String, start:Long= * @param start the system time in milliseconds when the workflow started executing * @param calls the number of calls to atomic processes * @param finish the system time in milliseconds that this workflow finished, or [[scala.None]] if it is still running - * @param result a [[String]] representation of the returned result from the workflow, or [[scala.None]] if it still running. In case of failure, the field is populated with the `getLocalizedMessage` of the exception thrown + * @param result a `String` representation of the returned result from the workflow, or [[scala.None]] if it still running. In case of failure, the field is populated with the localized message of the exception thrown */ case class WorkflowMetrics[KeyT] (piID:KeyT, start:Long=System.currentTimeMillis(), calls:Int=0, finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) def call = copy(calls=calls+1) } -/** Collects/aggregates metrics across multiple process calls and workflow executions - * - */ +/** Collects/aggregates metrics across multiple process calls and workflow executions */ class MetricsAggregator[KeyT] { import scala.collection.immutable.Map @@ -46,7 +44,10 @@ class MetricsAggregator[KeyT] { val workflowMap = scala.collection.mutable.Map[KeyT,WorkflowMetrics[KeyT]]() // Set - + + /** Adds a new [[ProcessMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same IDs + */ def +=(m:ProcessMetrics[KeyT]) = { val old = processMap.get(m.piID) match { case None => Map[Int,ProcessMetrics[KeyT]]() @@ -54,48 +55,146 @@ class MetricsAggregator[KeyT] { } processMap += (m.piID -> (old + (m.ref -> m))) } + /** Adds a new [[WorkflowMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same ID + */ def +=(m:WorkflowMetrics[KeyT]) = workflowMap += (m.piID->m) // Update - + + /** Updates a [[ProcessMetrics]] instance. + * + * @return the updated [[processMap]] or [[scala.None]] if the identified instance does not exist + * + * @param piID the workflow ID of the process + * @param ref the call reference of the process + * @param u a function to update the [[ProcessMetrics]] instance + * + * @example Assume function that updates the name of a process in [[ProcessMetrics]] to `"x"`: + * {{{ def myUpdate(p: ProcessMetrics[KeyT]): ProcessMetrics[KeyT] = p.copy(process="x") }}} + * We can update the metrics of process with workflow ID `5` and call reference `2` as follows: + * {{{ metricsAggregator ^ (5,2,myUpdate) }}} + * + */ def ^(piID:KeyT,ref:Int,u:ProcessMetrics[KeyT]=>ProcessMetrics[KeyT]) = processMap.get(piID).flatMap(_.get(ref)).map { m => this += u(m) } + + /** Updates a [[WorkflowMetrics]] instance + * + * @return the updated [[processMap]] or [[scala.None]] if the identified instance does not exist + * + * @param piID the workflow ID of the process + * @param u a function to update the [[ProcessMetrics]] instance + * + * @example Assume function that increases the number of calls [[WorkflowMetrics]] to `"x"`: + * {{{ def myUpdate(w: WorkflowMetrics[KeyT]): WorkflowMetrics[KeyT] = w.copy(calls=calls+1) }}} + * We can update the metrics of workflow with ID `5` as follows: + * {{{ metricsAggregator ^ (5,myUpdate) }}} + * + */ def ^(piID:KeyT,u:WorkflowMetrics[KeyT]=>WorkflowMetrics[KeyT]) = workflowMap.get(piID).map { m => this += u(m) } - + // Handle events - + + /** Handles the event of a workflow starting, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param time the timestamp to be recorded as the workflow start + */ def workflowStart(piID:KeyT, time:Long=System.currentTimeMillis()):Unit = this += WorkflowMetrics(piID,time) + + /** Handles the event of a workflow finishing successfully, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param result the return value of the workflow + * @param time the timestamp to be recorded as the workflow finish + */ def workflowResult(piID:KeyT, result:Any, time:Long=System.currentTimeMillis()):Unit = this ^ (piID,_.complete(time,result)) + + /** Handles the event of a workflow failing with an exception, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ex the thrown exception (or other [[scala.Throwable]]) + * @param time the timestamp to be recorded as the workflow finish + */ def workflowException(piID:KeyT, ex:Throwable, time:Long=System.currentTimeMillis()):Unit = this ^ (piID,_.complete(time,"Exception: " + ex.getLocalizedMessage)) - + + /** Handles the event of an atomic process call, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param process the name of the atomic process being called + * @param time the timestamp to be recorded as the process start + */ def procCall(piID:KeyT, ref:Int, process:String, time:Long=System.currentTimeMillis()):Unit = { this += ProcessMetrics(piID,ref,process,time) this ^ (piID,_.call) } + + /** Handles the event of an atomic process returning successfully, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param result the result returned by the process + * @param time the timestamp to be recorded as the process finish + */ def procReturn(piID:KeyT, ref:Int, result:Any, time:Long=System.currentTimeMillis()):Unit = this ^ (piID,ref,_.complete(time, result)) - def processException(piID:KeyT, ref:Int, ex:Throwable, time:Long=System.currentTimeMillis()):Unit = processFailure(piID,ref,ex.getLocalizedMessage,time) + + /** Handles the event of an atomic process failing with an exception, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param ex the thrown exception (or other [[scala.Throwable]]) + * @param time the timestamp to be recorded as the process finish + */ + def processException(piID:KeyT, ref:Int, ex:Throwable, time:Long=System.currentTimeMillis()):Unit = + processFailure(piID,ref,ex.getLocalizedMessage,time) + + /** Handles the event of an atomic process failing in any way, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param ex a `String` explanation of what went wrong + * @param time the timestamp to be recorded as the process finish + */ def processFailure(piID:KeyT, ref:Int, ex:String, time:Long=System.currentTimeMillis()):Unit = { this ^ (piID,_.complete(time,"Exception: " + ex)) this ^ (piID,ref,_.complete(time,"Exception: " + ex)) } // Getters - + + /** Returns the collection of workflow IDs that have been tracked. */ def keys = workflowMap.keys + /** Returns all the tracked instances of [[WorkflowMetrics]] sorted by starting time. */ def workflowMetrics = workflowMap.values.toSeq.sortBy(_.start) + /** Returns all the tracked instances of [[ProcessMetrics]] sorted by starting time. */ def processMetrics = processMap.values.flatMap(_.values).toSeq.sortBy(_.start) + /** Returns all the tracked instances of [[ProcessMetrics]] associated with a particular workflow, sorted by starting time. + * @param id the ID of the workflow + * */ def processMetricsOf(id:KeyT) = processMap.getOrElse(id,Map[Int,ProcessMetrics[KeyT]]()).values.toSeq.sortBy(_.start) + /** Returns a [[scala.collection.immutable.Set]] of all process names being tracked. + * This is useful when using process names as a category, for example to colour code tasks in the timeline. + */ def processSet = processMap.values.flatMap(_.values.map(_.process)).toSet[String] } +/** A [[MetricsAggregator]] that is also a [[PiEventHandler]]. + * Aggregates metrics automatically based on [[PiEvent]]s and system time. + * + * @param name a unique name to identify the instance of [[PiEventHandler]] + * @param timeFn the [[PiMetadata]] key to retrieve timing information (the recorder system time by default) + */ class MetricsHandler[KeyT](override val name: String, timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime ) extends MetricsAggregator[KeyT] with PiEventHandler[KeyT] { + /** Converts [[PiEvent]]s to metrics updates. */ override def apply( e: PiEvent[KeyT] ): Boolean = { e match { case PiEventStart(i,t) => workflowStart( i.id, timeFn(t) ) diff --git a/src/com/workflowfm/pew/metrics/Output.scala b/src/com/workflowfm/pew/metrics/Output.scala index ac4a5cf5..7770bc34 100644 --- a/src/com/workflowfm/pew/metrics/Output.scala +++ b/src/com/workflowfm/pew/metrics/Output.scala @@ -4,9 +4,14 @@ import scala.collection.immutable.Queue import org.apache.commons.lang3.time.DurationFormatUtils import java.text.SimpleDateFormat +/** Manipulates a [[MetricsAggregator]] to produce some output via side-effects. + * @tparam KeyT the type used for workflow IDs + */ trait MetricsOutput[KeyT] extends (MetricsAggregator[KeyT] => Unit) { + /** Compose with another [[MetricsOutput]] in sequence. */ def and(h:MetricsOutput[KeyT]) = MetricsOutputs(this,h) } +/** Contains helpful formatting shortcut functions. */ object MetricsOutput { def formatOption[T](v:Option[T], nullValue: String, format:T=>String={ x:T => x.toString }) = v.map(format).getOrElse(nullValue) def formatTime(format:String)(time:Long) = new SimpleDateFormat(format).format(time) @@ -22,45 +27,90 @@ object MetricsOutput { } getOrElse(nullValue) } +/** A [[MetricsOutput]] consisting of a [[scala.collection.immutable.Queue]] of [[MetricsOutput]]s + * to be run sequentially. + */ case class MetricsOutputs[KeyT](handlers:Queue[MetricsOutput[KeyT]]) extends MetricsOutput[KeyT] { + /** Call all included [[MetricsOutput]]s. */ override def apply(aggregator:MetricsAggregator[KeyT]) = handlers map (_.apply(aggregator)) + /** Add another [[MetricsOutput]] in sequence. */ override def and(h:MetricsOutput[KeyT]) = MetricsOutputs(handlers :+ h) } object MetricsOutputs { + /** Shorthand constructor for a [[MetricsOutputs]] from a list of [[MetricsOutput]]s. */ def apply[KeyT](handlers:MetricsOutput[KeyT]*):MetricsOutputs[KeyT] = MetricsOutputs[KeyT](Queue[MetricsOutput[KeyT]]() ++ handlers) } - +/** Generates a string representation of the metrics using a generalized CSV format. */ trait MetricsStringOutput[KeyT] extends MetricsOutput[KeyT] { + /** A string representing null values. */ val nullValue = "NULL" + /** The field names for [[ProcessMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def procHeader(separator:String) = Seq("ID","PID","Process","Start","Finish","Result").mkString(separator) + + /** String representation of a [[ProcessMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + * @param m the [[ProcessMetrics]] instance to be handled + */ def procCSV(separator:String,timeFormat:Option[String])(m:ProcessMetrics[KeyT]) = m match { case ProcessMetrics(id,r,p,s,f,res) => timeFormat match { case None => Seq(id,r,p,s,MetricsOutput.formatOption(f,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) case Some(format) => Seq(id,r,p,MetricsOutput.formatTime(format)(s),MetricsOutput.formatTimeOption(f,format,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) } } - + + /** The field names for [[WorkflowMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def workflowHeader(separator:String) = Seq("ID","PID","Process","Start","Finish","Result").mkString(separator) + + /** String representation of a [[WorkflowMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + * @param m the [[WorkflowMetrics]] instance to be handled + */ def workflowCSV(separator:String,timeFormat:Option[String])(m:WorkflowMetrics[KeyT]) = m match { case WorkflowMetrics(id,s,c,f,res) => timeFormat match { case None => Seq(id,s,c,MetricsOutput.formatOption(f,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) case Some(format) => Seq(id,MetricsOutput.formatTime(format)(s),c,MetricsOutput.formatTimeOption(f,format,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) } } - + + /** Formats all [[ProcessMetrics]] in a [[MetricsAggregator]] in a single string. + * + * @param aggregator the [[MetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate process metrics + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + */ def processes(aggregator:MetricsAggregator[KeyT],separator:String,lineSep:String="\n",timeFormat:Option[String]=None) = aggregator.processMetrics.map(procCSV(separator,timeFormat)).mkString(lineSep) - def workflows(aggregator:MetricsAggregator[KeyT],separator:String,lineSep:String="\n",timeFormat:Option[String]=None) = + + /** Formats all [[WorkflowMetrics]] in a [[MetricsAggregator]] in a single string. + * + * @param aggregator the [[MetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate workflow metrics + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + */ + def workflows(aggregator:MetricsAggregator[KeyT],separator:String,lineSep:String="\n",timeFormat:Option[String]=None) = aggregator.workflowMetrics.map(workflowCSV(separator,timeFormat)).mkString(lineSep) } - -class MetricsPrinter[KeyT] extends MetricsStringOutput[KeyT] { +/** Prints all metrics to standard output. */ +class MetricsPrinter[KeyT] extends MetricsStringOutput[KeyT] { + /** Separates the values. */ val separator = "\t| " + /** Separates metrics instances. */ val lineSep = "\n" + /** Default time format using `java.text.SimpleDateFormat`. */ val timeFormat = Some("YYYY-MM-dd HH:mm:ss.SSS") override def apply(aggregator:MetricsAggregator[KeyT]) = { @@ -80,6 +130,9 @@ ${workflows(aggregator,separator,lineSep,timeFormat)} } } +/** Helper to write stuff to a file. + * @todo Move to [[com.workflowfm.pew.util]] + */ trait FileOutput { import java.io._ @@ -93,6 +146,15 @@ trait FileOutput { } } +/** Outputs metrics to a file using a standard CSV format. + * Generates 2 CSV files, + * one for processes with a "-tasks.csv" suffix, + * and one for workflows with a "-workflows.csv" suffix. + * + * @tparam KeyT the type used for workflow IDs + * @param path path to directory where the files will be placed + * @param name file name prefix + */ class MetricsCSVFileOutput[KeyT](path:String,name:String) extends MetricsStringOutput[KeyT] with FileOutput { val separator = "," @@ -105,6 +167,16 @@ class MetricsCSVFileOutput[KeyT](path:String,name:String) extends MetricsStringO } } +/** Outputs metrics to a file using the d3-timeline format. + * Generates 1 file with a ""-data.js" suffix. + * This can then be combined with the resources at + * [[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/resources/d3-timeline]] + * to render the timeline in a browser. + * + * @tparam KeyT the type used for workflow IDs + * @param path path to directory where the files will be placed + * @param file file name prefix + */ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[KeyT] with FileOutput { import java.io._ @@ -114,7 +186,8 @@ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[Key val dataFile = s"$path$file-data.js" writeToFile(dataFile, result) } - + + /** Helps build the output with a static system time. */ def build(aggregator:MetricsAggregator[KeyT], now:Long) = { var buf:StringBuilder = StringBuilder.newBuilder buf.append("var processes = [\n") @@ -125,7 +198,15 @@ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[Key buf.append("];\n") buf.toString } - + + /** Encodes an entire workflow as a timeline. + * + * @return the encoded timeline for the workflow + * @param m thr [[WorkflowMetrics]] recorded for the particular workflow + * @param agg the [[MetricsAggregator]] containing all the relevant metrics + * @param now the current (real) to be used as the end time of unfinished processes + * @param prefix a string to prefix (usually some whitespace) to prefix the entry + */ def workflowEntry(m:WorkflowMetrics[KeyT], agg:MetricsAggregator[KeyT], now:Long, prefix:String) = { val processes = (Map[String,Queue[ProcessMetrics[KeyT]]]() /: agg.processMetricsOf(m.piID)){ case (m,p) => { val procs = m.getOrElse(p.process, Queue()) :+ p @@ -134,14 +215,29 @@ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[Key val data = processes.map { case (proc,i) => processEntry(proc, i, now, prefix + "\t") } s"""$prefix{id: \"${m.piID}\", data: [\n${data.mkString("")}$prefix]},\n""" } - + + /** Encodes multiple process calls of the same process in a single lane. + * + * @return the encoded timeline lane + * @param proc the name of the process + * @param i the list of [[ProcessMetrics]] recorded for each process call + * @param now the current (real) to be used as the end time of unfinished processes + * @param prefix a string to prefix (usually some whitespace) to prefix the entry + */ def processEntry(proc:String, i:Seq[ProcessMetrics[KeyT]], now:Long, prefix:String) = { if (i.isEmpty) "" else { val times = ("" /: i){ case (s,m) => s"$s${callEntry(now,m,prefix + "\t")}" } s"""$prefix{label: \"$proc\", times: [\n$times$prefix]},\n""" } } - + + /** Encodes a process call as a timeline task. + * + * @return the encoded timeline task + * @param now the current (real) to be used as the end time of unfinished processes + * @param m the [[ProcessMetrics]] recorded for this process call + * @param prefix a string to prefix (usually some whitespace) to prefix the entry + */ def callEntry(now:Long, m:ProcessMetrics[KeyT], prefix:String) = { s"""$prefix{"label":"${m.ref}", "process": "${m.process}", "starting_time": ${m.start}, "ending_time": ${m.finish.getOrElse(now)}, "result":"${MetricsOutput.formatOption(m.result,"NONE")}"},\n""" From 2a63ca630658861181ba43a798a1f967b86f04e9 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 26 Jun 2019 19:13:13 +0100 Subject: [PATCH 10/55] Adds scaladoc documentation to simulation/metrics/Measure.scala --- src/com/workflowfm/pew/metrics/Measure.scala | 2 +- .../pew/simulation/metrics/Measure.scala | 182 ++++++++++++++---- 2 files changed, 147 insertions(+), 37 deletions(-) diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index 183922de..ddeccd76 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -177,7 +177,7 @@ class MetricsAggregator[KeyT] { def processMetrics = processMap.values.flatMap(_.values).toSeq.sortBy(_.start) /** Returns all the tracked instances of [[ProcessMetrics]] associated with a particular workflow, sorted by starting time. * @param id the ID of the workflow - * */ + */ def processMetricsOf(id:KeyT) = processMap.getOrElse(id,Map[Int,ProcessMetrics[KeyT]]()).values.toSeq.sortBy(_.start) /** Returns a [[scala.collection.immutable.Set]] of all process names being tracked. * This is useful when using process names as a category, for example to colour code tasks in the timeline. diff --git a/src/com/workflowfm/pew/simulation/metrics/Measure.scala b/src/com/workflowfm/pew/simulation/metrics/Measure.scala index 7f2a6e3f..a9588029 100644 --- a/src/com/workflowfm/pew/simulation/metrics/Measure.scala +++ b/src/com/workflowfm/pew/simulation/metrics/Measure.scala @@ -2,20 +2,17 @@ package com.workflowfm.pew.simulation.metrics import com.workflowfm.pew.simulation._ - -//trait Metrics { -// def stringValues :List[String] -// def values(sep:String=",") = stringValues.mkString(sep) -//} -// -//class MetricTracker[T <: Metrics](init :T) { -// var metrics :T = init -// def <~(u:T=>T) :this.type = { synchronized { metrics = u(metrics) } ; this } -//} -// -//object TaskMetrics { -// def header(sep:String) = List("Task","Start","Delay","Duration","Cost","Workflow","Resources").mkString(sep) -//} +/** Metrics for a simulated [[Task]] that consumed virtual time. + * + * @param id the unique ID of the [[Task]] + * @param task the name of the [[Task]] + * @param simulation the name of the simulation the [[Task]] belongs to + * @param created the virtual timestamp when the [[Task]] was created and entered the [[Coordinator]] + * @param started the virtual timestamp when the [[Task]] started executing, or [[scala.None]] if it has not started yet + * @param duration the virtual duration of the [[Task]] + * @param cost the cost associated with the [[Task]] + * @param resources the list of names of the [[TaskResource]]s this [[Task]] used + */ case class TaskMetrics ( id:Long, task:String, @@ -26,28 +23,34 @@ case class TaskMetrics ( cost:Long, resources:Seq[String] ) { - //override def stringValues = List(task,start,delay,duration,cost,workflow,"\"" + resources.mkString(",") + "\"") map (_.toString) -// def addDelay(d :Long) = copy(delay = delay + d) -// def addDuration(d :Long) = copy(duration = duration + d) -// def addCost(c:Long) = copy(cost = cost + c) + /** Sets the starting time for the [[Task]]. */ def start(st:Long) = copy(started=Some(st)) + + /** Calculates the task delay as the difference of the creation and starting times. */ def delay = started match { case None => 0L case Some(s) => s - created } + + /** Returns the full task and simulation name. */ def fullName = s"$task($simulation)" -// def done(t:Task, time:Long, cost:Long, costPerTick:Long) = { -// val st = started match { -// case None => time -// case Some(t) => t -// } -// copy( duration = duration + time - st, cost = cost + costPerTick * (time-st), resources = t.resources) -// } } object TaskMetrics { + /** Generates [[TaskMetrics]] from a given [[Task]] assuming it has not started yet. */ def apply(task:Task):TaskMetrics = TaskMetrics(task.id, task.name, task.simulation, task.created, None, task.duration, task.cost, task.resources) } + +/** Metrics for a [[Simulation]] that has already started. + * + * @param name the unique name of the [[Simulation]] + * @param started the virtual timestamp when the [[Simulation]] started executing + * @param duration the virtual duration of the [[Simulation]] + * @param delay the sum of all delays for all involved [[Task]]s + * @param tasks the number of [[Task]]s associated with the [[Simulation]] so far + * @param cost the total cost associated with the [[Simulation]] so far + * @param result a `String` representation of the returned result from the [[Simulation]], or [[scala.None]] if it still running. In case of failure, the field is populated with the localized message of the exception thrown + */ case class SimulationMetrics( name:String, started:Long, @@ -57,16 +60,34 @@ case class SimulationMetrics( cost:Long, result:Option[String] ) { + /** Adds some time to the total duration. */ def addDuration(d:Long) = copy(duration = duration + d) + /** Adds some cost to the total cost. */ def addCost(c:Long) = copy(cost = cost + c) + /** Adds some delay to the total delay. */ def addDelay(d:Long) = copy(delay = delay + d) - def task(task:Task) = copy(tasks = tasks + 1, cost = cost + task.cost) - def done(res:String, time:Long) = copy( result = Some(res), duration = duration + time - started ) // TODO should this and result be one? + /** Updates the metrics given a new [[Task]] that is created as part of the [[Simulation]]. */ + def task(task:Task) = copy(tasks = tasks + 1, cost = cost + task.cost) + /** Updates the metrics given that the [[Simulation]] has completed with a certain result. + * @param res the result of the [[Simulation]] or localized message of the exception in case of failure + * @param time the virtual timestamp when the [[Simulation]] finished + */ + def done(res:String, time:Long) = copy( result = Some(res), duration = duration + time - started ) } object SimulationMetrics { + /** Initialize metrics for a named [[Simulation]] starting at the given virtual time. */ def apply(name:String, t:Long):SimulationMetrics = SimulationMetrics(name,t,0L,0L,0,0L,None) } + +/** Metrics for at [[TaskResource]]. + * + * @param name the unique name of the [[TaskResource]] + * @param busyTime the total amount of virtual time that the [[TaskResource]] has been busy, i.e. attached to a [[Task]] + * @param idleTime the total amount of virtual time that the [[TaskResource]] has been idle, i.e. not attached to any [[Task]] + * @param tasks the number of different [[Task]]s that have been attached to this [[TaskResource]] + * @param cost the total cost associated with this [[TaskResource]] + */ case class ResourceMetrics ( name:String, busyTime:Long, @@ -74,7 +95,9 @@ case class ResourceMetrics ( tasks:Int, cost:Long ) { + /** Adds some idle time to the total. */ def idle(i:Long) = copy(idleTime = idleTime + i) + /** Updates the metrics given a new [[Task]] has been attached to the [[TaskResource]]. */ def task(task:Task, costPerTick:Long) = copy( tasks = tasks + 1, cost = cost + task.duration * costPerTick, @@ -82,63 +105,150 @@ case class ResourceMetrics ( ) } object ResourceMetrics { - def apply(name:String):ResourceMetrics = ResourceMetrics(name,0L,0L,0,0L) + /** Initialize metrics given the name of a [[TaskResource]]. */ + def apply(name:String):ResourceMetrics = ResourceMetrics(name,0L,0L,0,0L) + /** Ihitialize metrics given a [[TaskResource]]. */ def apply(r:TaskResource):ResourceMetrics = ResourceMetrics(r.name,0L,0L,0,0L) } + +/** Collects/aggregates metrics across multiple tasks, resources, and simulations. */ class SimMetricsAggregator { import scala.collection.immutable.Map + /** The '''real''' (system) time that measurement started, or [[scala.None]] if it has not started yet. */ var start:Option[Long] = None + /** The '''real''' (system) time that measurement finished, or [[scala.None]] if it has not finished yet. */ var end:Option[Long] = None + /** Marks the start of metrics measurement with the current system time. */ def started = start match { case None => start = Some(System.currentTimeMillis()) case _ => () } + /** Marks the end of metrics measurement with the current system time. */ def ended = end = Some(System.currentTimeMillis()) + /** Task metrics indexed by task ID. */ val taskMap = scala.collection.mutable.Map[Long,TaskMetrics]() + /** Simulation metrics indexed by name. */ val simMap = scala.collection.mutable.Map[String,SimulationMetrics]() + /** Resource metrics indexed by name. */ val resourceMap = scala.collection.mutable.Map[String,ResourceMetrics]() // Set - + + /** Adds a new [[TaskMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same IDs + */ def +=(m:TaskMetrics):TaskMetrics = { taskMap += (m.id->m) ; m } + /** Adds a new [[SimulationMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same IDs + */ def +=(m:SimulationMetrics):SimulationMetrics = { simMap += (m.name->m) ; m } + /** Adds a new [[ResourceMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same IDs + */ def +=(m:ResourceMetrics):ResourceMetrics = { resourceMap += (m.name->m) ; m } - + + /** Initializes and adds a new [[TaskMetrics]] instance given a new [[Task]]. */ def +=(task:Task):TaskMetrics = this += TaskMetrics(task) + /** Initializes and adds a new [[SimulationMetrics]] instance given the name of the simulation starting now + * and the current virtual time. + */ def +=(s:Simulation,t:Long):SimulationMetrics = this += SimulationMetrics(s.name,t) + /** Initializes and adds a new [[ResourceMetrics]] instance given a new [[TaskResource]]. */ def +=(r:TaskResource):ResourceMetrics = this += ResourceMetrics(r) // Update - + /** Updates a [[TaskMetrics]] instance. + * + * @return the updated [[taskMap]] or [[scala.None]] if the identified instance does not exist + * + * @param taskID the task ID for the involved [[Task]] + * @param u a function to update the [[TaskMetrics]] instance + * + * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context + */ def ^(taskID:Long)(u:TaskMetrics=>TaskMetrics):Option[TaskMetrics] = taskMap.get(taskID).map { m => this += u(m) } + + /** Updates a [[TaskMetrics]] instance. + * + * @return the updated [[taskMap]] or [[scala.None]] if the identified instance does not exist + * + * @param task the involved [[Task]] + * @param u a function to update the [[TaskMetrics]] instance + * + * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context + * + */ def ^(task:Task)(u:TaskMetrics=>TaskMetrics):Option[TaskMetrics] = taskMap.get(task.id).map { m => this += u(m) } - def ^(simulation:Simulation)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = + + /** Updates a [[SimulationMetrics]] instance. + * + * @return the updated [[simMap]] or [[scala.None]] if the identified instance does not exist + * + * @param simulation the involved [[Simulation]] + * @param u a function to update the [[SimulationMetrics]] instance + * + * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context + * + */ + def ^(simulation:Simulation)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = simMap.get(simulation.name).map { m => this += u(m) } + + /** Updates a [[SimulationMetrics]] instance. + * + * @return the updated [[simMap]] or [[scala.None]] if the identified instance does not exist + * + * @param simulationName the name of the involved [[Simulation]] + * @param u a function to update the [[SimulationMetrics]] instance + * + * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context + * + */ def ^(simulationName:String)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = simMap.get(simulationName).map { m => this += u(m) } -// def ^(resource:String)(u:ResourceMetrics=>ResourceMetrics):Option[ResourceMetrics] = -// resourceMap.get(resource).map { m => this += u(m) } + + /** Updates a [[ResourceMetrics]] instance. + * + * @return the updated [[resourceMap]] or [[scala.None]] if the identified instance does not exist + * + * @param resource the involved [[TaskResource]] + * @param u a function to update the [[ResourceMetrics]] instance + * + * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context + * + */ def ^(resource:TaskResource)(u:ResourceMetrics=>ResourceMetrics):Option[ResourceMetrics] = resourceMap.get(resource.name).map { m => this += u(m) } // Getters - + + /** Returns all the tracked instances of [[TaskMetrics]] sorted by starting time. */ def taskMetrics = taskMap.values.toSeq.sortBy(_.started) + /** Returns all the tracked instances of [[SimulationMetrics]] sorted by simulation name. */ def simulationMetrics = simMap.values.toSeq.sortBy(_.name) + /** Returns all the tracked instances of [[ResourceMetrics]] sorted by resource time. */ def resourceMetrics = resourceMap.values.toSeq.sortBy(_.name) + /** Returns a [[scala.collection.immutable.Set]] of all task names being tracked. + * This is useful when using task names as a category, for example to colour code tasks in the timeline. + */ def taskSet = taskMap.values.map(_.task).toSet[String] - + + /** Returns all the tracked instances of [[TaskMetrics]] associated with a particular [[TaskResource]], sorted by starting time. + * @param r the tracked [[ResourceMetrics]] of the resource + */ // TODO: we used to have 2 levels of sorting! def taskMetricsOf(r:ResourceMetrics) = taskMap.values.toSeq.filter(_.resources.contains(r.name)).sortBy(_.started) + /** Returns all the tracked instances of [[TaskMetrics]] associated with a particular [[Simulation]], sorted by starting time. + * @param r the tracked [[SimulationMetrics]] of the resource + */ def taskMetricsOf(s:SimulationMetrics) = taskMap.values.toSeq.filter(_.simulation.equals(s.name)).sortBy(_.started) } From 960b8901eb48c7f2829dd37fd1ffd13dd865a1c9 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 00:06:09 +0100 Subject: [PATCH 11/55] Removes mutable reference to `Coordinator` in `SimMetricsActor` This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. --- CHANGELOG.md | 1 + .../pew/simulation/metrics/Actor.scala | 31 ++++++++----------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 45735a19..90dcccf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ Includes feature updates, bug fixes, and open issues. * Implemented `PiStream` using Akka's `BroadcastHub` to enable more flexible event handling (see also [#34](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/34)). Executors can now be mixed in with (at least) either of the two default observables, namely `SimplePiObservable` and `PiStream`. * Fixed some codec issues (see also [#31](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/31)). * The simulator now measures the simulation's real (system) duration. +* `SimMetricsActor` no longer keeps a reference to the `Coordinator`. This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. * Various improvements in Executor unit tests. diff --git a/src/com/workflowfm/pew/simulation/metrics/Actor.scala b/src/com/workflowfm/pew/simulation/metrics/Actor.scala index cba860f4..e84863af 100644 --- a/src/com/workflowfm/pew/simulation/metrics/Actor.scala +++ b/src/com/workflowfm/pew/simulation/metrics/Actor.scala @@ -9,39 +9,26 @@ import com.workflowfm.pew.simulation.Simulation import com.workflowfm.pew.simulation.Coordinator -object SimMetricsActor { - case class Start(coordinator:ActorRef) - case class StartSims(coordinator:ActorRef,sims:Seq[(Long,Simulation)],executor:SimulatorExecutor[_]) - case class StartSimsNow(coordinator:ActorRef,sims:Seq[Simulation],executor:SimulatorExecutor[_]) - - def props(m:SimMetricsOutput, callbackActor:Option[ActorRef]=None)(implicit system: ActorSystem): Props = Props(new SimMetricsActor(m,callbackActor)(system)) -} - // Provide a callbackActor to get a response when we are done. Otherwise we'll shutdown the ActorSystem class SimMetricsActor(m:SimMetricsOutput, callbackActor:Option[ActorRef])(implicit system: ActorSystem) extends Actor { - var coordinator:Option[ActorRef] = None - + def receive = { - case SimMetricsActor.Start(coordinator) if this.coordinator.isEmpty => { - this.coordinator = Some(coordinator) + case SimMetricsActor.Start(coordinator) => { coordinator ! Coordinator.Start } - case SimMetricsActor.StartSims(coordinator,sims,executor) if this.coordinator.isEmpty => { - this.coordinator = Some(coordinator) + case SimMetricsActor.StartSims(coordinator,sims,executor) => { coordinator ! Coordinator.AddSims(sims,executor) coordinator ! Coordinator.Start } - case SimMetricsActor.StartSimsNow(coordinator,sims,executor) if this.coordinator.isEmpty => { - this.coordinator = Some(coordinator) + case SimMetricsActor.StartSimsNow(coordinator,sims,executor) => { coordinator ! Coordinator.AddSimsNow(sims,executor) coordinator ! Coordinator.Start } - case Coordinator.Done(t:Long,ma:SimMetricsAggregator) if this.coordinator == Some(sender) => { - this.coordinator = None + case Coordinator.Done(t:Long,ma:SimMetricsAggregator) => { m(t,ma) callbackActor match { case None => system.terminate() @@ -50,3 +37,11 @@ class SimMetricsActor(m:SimMetricsOutput, callbackActor:Option[ActorRef])(implic } } } + +object SimMetricsActor { + case class Start(coordinator:ActorRef) + case class StartSims(coordinator:ActorRef,sims:Seq[(Long,Simulation)],executor:SimulatorExecutor[_]) + case class StartSimsNow(coordinator:ActorRef,sims:Seq[Simulation],executor:SimulatorExecutor[_]) + + def props(m:SimMetricsOutput, callbackActor:Option[ActorRef]=None)(implicit system: ActorSystem): Props = Props(new SimMetricsActor(m,callbackActor)(system)) +} From b5463528bb6a1955c428f43a886d5741662d7a19 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 00:08:12 +0100 Subject: [PATCH 12/55] Removes white space from CHANGELOG --- CHANGELOG.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90dcccf1..ae96a86b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,9 +16,9 @@ Includes feature updates, bug fixes, and open issues. ### Features -* All `PiEvents` now carry an array of `PiMetadata`. The default value contains the system time of the event. Atomic processes can expose additional metadata for `PiEventReturn` (see also [#21](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/21)). +* All `PiEvents` now carry an array of `PiMetadata`. The default value contains the system time of the event. Atomic processes can expose additional metadata for `PiEventReturn` (see also [#21](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/21)). * Improved `PiEventHandlers`. The `PromiseHandler` is now generalized to return a single object at the end of the workflow. The old `PromiseHandler` is an instance called `ResultHandler` (see also [#26](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/26)). -* Implemented `PiStream` using Akka's `BroadcastHub` to enable more flexible event handling (see also [#34](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/34)). Executors can now be mixed in with (at least) either of the two default observables, namely `SimplePiObservable` and `PiStream`. +* Implemented `PiStream` using Akka's `BroadcastHub` to enable more flexible event handling (see also [#34](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/34)). Executors can now be mixed in with (at least) either of the two default observables, namely `SimplePiObservable` and `PiStream`. * Fixed some codec issues (see also [#31](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/31)). * The simulator now measures the simulation's real (system) duration. * `SimMetricsActor` no longer keeps a reference to the `Coordinator`. This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. @@ -29,9 +29,9 @@ Includes feature updates, bug fixes, and open issues. ### Features -* Fixed `AkkaPiObservable` registering handlers globally (see also [#7](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/7)). -* Improved simulation `Coordinator`. Processes can now interact with the `Coordinator` and its clock cycle is a bit more robust (see also [#28](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/28)). -* Fixed/improved Codecs for custom data types, which includes `AnyCodec` and associated parts (see also [#29](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/29)). +* Fixed `AkkaPiObservable` registering handlers globally (see also [#7](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/7)). +* Improved simulation `Coordinator`. Processes can now interact with the `Coordinator` and its clock cycle is a bit more robust (see also [#28](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/28)). +* Fixed/improved Codecs for custom data types, which includes `AnyCodec` and associated parts (see also [#29](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/29)). * Fixed issues with the `ClassLoader` crashing in Kafka (see also [#30](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/30)). From d443e68c6fc20e7c3307d5a811afaa7fb09959f8 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 00:42:58 +0100 Subject: [PATCH 13/55] Fixes compilation issues in `KafkaExecutorTests` due to merge --- .../pew/stateless/KafkaExecutorTests.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala index c69e56ce..fec14643 100644 --- a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala +++ b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala @@ -10,7 +10,7 @@ import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors.{DrainControl, sendMessages} import com.workflowfm.pew.stateless.instances.kafka.settings.{KafkaExecutorEnvironment, KafkaExecutorSettings} import com.workflowfm.pew.{PiEventFinish, _} -import com.workflowfm.pew.stream.PromiseHandler +import com.workflowfm.pew.stream.ResultHandler import org.apache.kafka.common.utils.Utils import org.bson.types.ObjectId import org.junit.runner.RunWith @@ -239,11 +239,11 @@ class KafkaExecutorTests def call( p: PiProcess, args: PiObject* ): Future[Any] = { val pii = PiInstance(ObjectId.get, p, args: _*) - val handler = new PromiseHandler("test", pii.id) + val handler = new ResultHandler("test", pii.id) listener.subscribe(handler) sendMessages(ReduceRequest(pii, Seq()), PiiLog(PiEventStart(pii))) - handler.promise.future + handler.future } def shutdown(): Unit = { @@ -290,11 +290,11 @@ class KafkaExecutorTests def baremetalCall( ex: CustomKafkaExecutor, p: PiProcess, args: PiObject* ): Future[Any] = { val piiId = await( ex.init( p, args.toSeq ) ) - val handler = new PromiseHandler("test", piiId) + val handler = new ResultHandler("test", piiId) ex.subscribe(handler) ex.start(piiId) - handler.promise.future + handler.future } it should "call an atomic PbI (baremetal interface)" in { @@ -682,7 +682,7 @@ class KafkaExecutorTests val onCompletion: MessageMap = { - val handler = new PromiseHandler[ObjectId]("testhandler", ourPiiId) + val handler = new ResultHandler[ObjectId]("testhandler", ourPiiId) val ex2 = makeExecutor(completeProcess.settings) tryBut { From d566082611473098cafef98d6ebb5ca615750f72 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 00:47:04 +0100 Subject: [PATCH 14/55] Updates CHANGELOG --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae96a86b..6641f9f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,8 @@ Includes feature updates, bug fixes, and open issues. * Fixed some codec issues (see also [#31](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/31)). * The simulator now measures the simulation's real (system) duration. * `SimMetricsActor` no longer keeps a reference to the `Coordinator`. This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. +* Some `PiEvents` got rearranged or renamed (see also [#45](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/45)). +* Bugfixes and improvements for `KafkaExecutor` and `KafkaExecutorTests` (see also [#45](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/45)). * Various improvements in Executor unit tests. From 92de26b9f5541e1bf79972b86bf5b1005991202d Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 00:59:51 +0100 Subject: [PATCH 15/55] Updates CHANGELOG and version --- CHANGELOG.md | 14 +++++++++++--- build.sbt | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6641f9f6..242bafb3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,16 +12,24 @@ Includes feature updates, bug fixes, and open issues. -## [v1.3.0](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.3.0) - 2018-12-14 +## [v1.4.0](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.4.0) - 2019-07-01 ### Features -* All `PiEvents` now carry an array of `PiMetadata`. The default value contains the system time of the event. Atomic processes can expose additional metadata for `PiEventReturn` (see also [#21](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/21)). * Improved `PiEventHandlers`. The `PromiseHandler` is now generalized to return a single object at the end of the workflow. The old `PromiseHandler` is an instance called `ResultHandler` (see also [#26](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/26)). * Implemented `PiStream` using Akka's `BroadcastHub` to enable more flexible event handling (see also [#34](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/34)). Executors can now be mixed in with (at least) either of the two default observables, namely `SimplePiObservable` and `PiStream`. +* `SimMetricsActor` no longer keeps a reference to the `Coordinator`. This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. + + +## [v1.3.0](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.3.0) - 2019-06-19 + +For some unknown reason, the version number was increased in `build.sbt` back in December without actually merging the intended changes or creating a new tag. In the meantime, [#45](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/45)) was merged with various bug fixes and minor changes, the Ski example was updated and some documentation was added. I decided to create the tag now and push the stream changes to 1.4.0. + +### Features + +* All `PiEvents` now carry an array of `PiMetadata`. The default value contains the system time of the event. Atomic processes can expose additional metadata for `PiEventReturn` (see also [#21](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/21)). * Fixed some codec issues (see also [#31](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/31)). * The simulator now measures the simulation's real (system) duration. -* `SimMetricsActor` no longer keeps a reference to the `Coordinator`. This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. * Some `PiEvents` got rearranged or renamed (see also [#45](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/45)). * Bugfixes and improvements for `KafkaExecutor` and `KafkaExecutorTests` (see also [#45](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/45)). * Various improvements in Executor unit tests. diff --git a/build.sbt b/build.sbt index 8dc9f7e5..b8adc84c 100644 --- a/build.sbt +++ b/build.sbt @@ -3,7 +3,7 @@ name := "PEW" sbtVersion := "1.2.6" lazy val commonSettings = Seq ( - version := "1.3.0-SNAPSHOT", + version := "1.4.0-SNAPSHOT", organization := "com.workflowfm", scalaVersion := "2.12.6" ) From 94686d0732f0cb785953949fae119971de0b4807 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 02:10:51 +0100 Subject: [PATCH 16/55] Adds Scaladoc to simulation/metrics/Actor.scala --- src/com/workflowfm/pew/metrics/Measure.scala | 4 ++-- src/com/workflowfm/pew/simulation/metrics/Actor.scala | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index eca9b847..9931a0c7 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -186,10 +186,10 @@ class MetricsAggregator[KeyT] { def processSet = processMap.values.flatMap(_.values.map(_.process)).toSet[String] } -/** A [[MetricsAggregator]] that is also a [[PiEventHandler]]. +/** A [[MetricsAggregator]] that is also a [[com.workflowfm.pew.stream.PiEventHandler]]. * Aggregates metrics automatically based on [[PiEvent]]s and system time. * - * @param name a unique name to identify the instance of [[PiEventHandler]] + * @param name a unique name to identify the instance of [[com.workflowfm.pew.stream.PiEventHandler]] * @param timeFn the [[PiMetadata]] key to retrieve timing information (the recorder system time by default) */ class MetricsHandler[KeyT](override val name: String, timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime ) diff --git a/src/com/workflowfm/pew/simulation/metrics/Actor.scala b/src/com/workflowfm/pew/simulation/metrics/Actor.scala index e84863af..695c403b 100644 --- a/src/com/workflowfm/pew/simulation/metrics/Actor.scala +++ b/src/com/workflowfm/pew/simulation/metrics/Actor.scala @@ -10,7 +10,10 @@ import com.workflowfm.pew.simulation.Coordinator // Provide a callbackActor to get a response when we are done. Otherwise we'll shutdown the ActorSystem - +/** Interacts with a [[Coordinator]] to run simulations and apply a [[SimMetricsOutput]] when they are done. + * @param m the [[SimMetricsOutput]] that will be applied to the metrics from every simulation [[Coordinator]] when it is done + * @param callbackActor optional argument for a callback actor that we can forward the [[Coordinator.Done]] message. If this is [[scala.None]], we do not forward the message, but we shutdown the actor system, as we assume all simulations are completed. + */ class SimMetricsActor(m:SimMetricsOutput, callbackActor:Option[ActorRef])(implicit system: ActorSystem) extends Actor { def receive = { @@ -38,6 +41,7 @@ class SimMetricsActor(m:SimMetricsOutput, callbackActor:Option[ActorRef])(implic } } +/** Contains the messages involved in [[SimMetricsActor]] and the [[akka.actor.Props]] initializer. */ object SimMetricsActor { case class Start(coordinator:ActorRef) case class StartSims(coordinator:ActorRef,sims:Seq[(Long,Simulation)],executor:SimulatorExecutor[_]) From 66874646669689978b1fb95811df9a7b1de0dc03 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 16:22:36 +0100 Subject: [PATCH 17/55] Adds Scaladoc to simulation/metrics/Output.scala --- src/com/workflowfm/pew/metrics/Output.scala | 10 +- .../pew/simulation/metrics/Output.scala | 106 +++++++++++++++++- 2 files changed, 105 insertions(+), 11 deletions(-) diff --git a/src/com/workflowfm/pew/metrics/Output.scala b/src/com/workflowfm/pew/metrics/Output.scala index 7770bc34..7541f471 100644 --- a/src/com/workflowfm/pew/metrics/Output.scala +++ b/src/com/workflowfm/pew/metrics/Output.scala @@ -146,10 +146,10 @@ trait FileOutput { } } -/** Outputs metrics to a file using a standard CSV format. - * Generates 2 CSV files, - * one for processes with a "-tasks.csv" suffix, - * and one for workflows with a "-workflows.csv" suffix. +/** Outputs metrics to files using a standard CSV format. + * Generates 2 CSV files: + * 1. one for processes with a "-tasks.csv" suffix, + * 2. and one for workflows with a "-workflows.csv" suffix. * * @tparam KeyT the type used for workflow IDs * @param path path to directory where the files will be placed @@ -168,7 +168,7 @@ class MetricsCSVFileOutput[KeyT](path:String,name:String) extends MetricsStringO } /** Outputs metrics to a file using the d3-timeline format. - * Generates 1 file with a ""-data.js" suffix. + * Generates 1 file with a "-data.js" suffix. * This can then be combined with the resources at * [[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/resources/d3-timeline]] * to render the timeline in a browser. diff --git a/src/com/workflowfm/pew/simulation/metrics/Output.scala b/src/com/workflowfm/pew/simulation/metrics/Output.scala index 07b28ed6..587a0ed9 100644 --- a/src/com/workflowfm/pew/simulation/metrics/Output.scala +++ b/src/com/workflowfm/pew/simulation/metrics/Output.scala @@ -3,57 +3,126 @@ package com.workflowfm.pew.simulation.metrics import scala.collection.immutable.Queue import com.workflowfm.pew.metrics.{ FileOutput, MetricsOutput } +/** Manipulates a [[SimMetricsAggregator]] to produce some output via side-effects. + * + * As a function, takes 2 arguments: + * - a [[Long]] representing the total virtual time elapsed + * - the [[SimMetricsAggregator]] to act upon + */ trait SimMetricsOutput extends ((Long,SimMetricsAggregator) => Unit) { - def and(h:SimMetricsOutput) = SimMetricsOutputs(this,h) + /** Compose with another [[SimMetricsOutput]] in sequence. */ + def and(h:SimMetricsOutput) = SimMetricsOutputs(this,h) } +/** A [[SimMetricsOutput]] consisting of a [[scala.collection.immutable.Queue]] of [[SimMetricsOutput]]s + * to be run sequentially. + */ case class SimMetricsOutputs(handlers:Queue[SimMetricsOutput]) extends SimMetricsOutput { + /** Call all included [[MetricsOutput]]s. */ override def apply(time:Long,aggregator:SimMetricsAggregator) = handlers map (_.apply(time,aggregator)) + /** Add another [[SimMetricsOutput]] in sequence. */ override def and(h:SimMetricsOutput) = SimMetricsOutputs(handlers :+ h) } object SimMetricsOutputs { + /** Shorthand constructor for a [[SimMetricsOutputs]] from a list of [[MetricsOutput]]s. */ def apply(handlers:SimMetricsOutput*):SimMetricsOutputs = SimMetricsOutputs(Queue[SimMetricsOutput]() ++ handlers) - def none = SimMetricsOutputs() } +/** Generates a string representation of the metrics using a generalized CSV format. */ trait SimMetricsStringOutput extends SimMetricsOutput { + /** A string representing null values. */ val nullValue = "NULL" - + + /** The field names for [[TaskMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def taskHeader(separator:String) = Seq("ID","Task","Simulation","Created","Start","Delay","Duration","Cost","Resources").mkString(separator) + + /** String representation of a [[TaskMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param resSeparator a string (such as a space or comma) to separate the list of names of [[TaskResources]] in the [[TaskMetrics]] + * @param m the [[TaskMetrics]] instance to be handled + */ def taskCSV(separator:String, resSeparator:String)(m:TaskMetrics) = m match { case TaskMetrics(id,task,sim,ct,st,dur,cost,res) => Seq(id,task,sim,ct,MetricsOutput.formatOption(st,nullValue),m.delay,dur,cost,res.mkString(resSeparator)).mkString(separator) } + /** The field names for [[SimulationMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def simHeader(separator:String) = Seq("Name","Start","Duration","Delay","Tasks","Cost","Result").mkString(separator) + + /** String representation of a [[SimulationMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param m the [[SimulationMetrics]] instance to be handled + */ def simCSV(separator:String)(m:SimulationMetrics) = m match { case SimulationMetrics(name,st,dur,delay,ts,c,res) => Seq(name,st,dur,delay,ts,c,res).mkString(separator) } + /** The field names for [[ResourceMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def resHeader(separator:String) = Seq("Name","Busy","Idle","Tasks","Cost").mkString(separator) + + /** String representation of a [[ResourceMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param m the [[ResourceMetrics]] instance to be handled + */ def resCSV(separator:String)(m:ResourceMetrics) = m match { case ResourceMetrics(name,b,i,ts,c) => Seq(name,b,i,ts,c).mkString(separator) } - + + /** Formats all [[TaskMetrics]] in a [[SimMetricsAggregator]] in a single string. + * + * @param aggregator the [[SimMetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate tasks + * @param resSeparator a string (such as a space or comma) to separate the list of names of [[TaskResources]] in the [[TaskMetrics]] + */ def tasks(aggregator:SimMetricsAggregator,separator:String,lineSep:String="\n",resSeparator:String=";") = aggregator.taskMetrics.map(taskCSV(separator,resSeparator)).mkString(lineSep) - def simulations(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = + + /** Formats all [[SimulationMetrics]] in a [[SimMetricsAggregator]] in a single string. + * + * @param aggregator the [[SimMetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate simulations + */ + def simulations(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = aggregator.simulationMetrics.map(simCSV(separator)).mkString(lineSep) + + /** Formats all [[ResourceMetrics]] in a [[SimMetricsAggregator]] in a single string. + * + * @param aggregator the [[SimMetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate resources + */ def resources(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = aggregator.resourceMetrics.map(resCSV(separator)).mkString(lineSep) } +/** Prints all simulation metrics to standard output. */ class SimMetricsPrinter extends SimMetricsStringOutput { def apply(totalTicks:Long,aggregator:SimMetricsAggregator) = { + /** Separates the values. */ val sep = "\t| " + /** Separates metrics instances. */ val lineSep = "\n" + /** Default time format using `java.text.SimpleDateFormat`. */ val timeFormat = "YYYY-MM-dd HH:mm:ss.SSS" + /** Default duration format using [[org.apache.commons.lang3.time.DurationFormatUtils.formatDuration]]. */ val durFormat = "HH:mm:ss.SSS" + /** A string representing null time values. */ val nullTime = "NONE" println( s""" @@ -81,6 +150,15 @@ Duration: ${MetricsOutput.formatDuration(aggregator.start, aggregator.end, durFo } } +/** Outputs simulation metrics to files using a standard CSV format. + * Generates 3 CSV files, + * 1. One for tasks with a "-tasks.csv" suffix, + * 2. One for simulations with a "-simulations.csv" suffix. + * 3. One for resources with a "-resources.csv" suffix. + * + * @param path path to directory where the files will be placed + * @param name file name prefix + */ class SimCSVFileOutput(path:String,name:String) extends SimMetricsStringOutput with FileOutput { import java.io._ @@ -98,6 +176,21 @@ class SimCSVFileOutput(path:String,name:String) extends SimMetricsStringOutput w } +/** Outputs simulation metrics to a file using the d3-timeline format. + * Generates 1 file with a "-simdata.js" suffix. + * This can then be combined with the resources at + * [[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/resources/d3-timeline]] + * to render the timeline in a browser. + * + * The timeline can display either virtual units of time or millisecond durations. + * If we choose the latter, we can provide the size of the virtual unit of time in milliseconds + * and all the durations will be scaled to that. For example, if our virtual unit of time is minutes, + * we need to provide a `tick` value of 60000. + * + * @param path path to directory where the files will be placed + * @param file file name prefix + * @param tick the size of 1 unit of virtual time + */ class SimD3Timeline(path:String,file:String,tick:Int=1) extends SimMetricsOutput with FileOutput { import java.io._ @@ -107,7 +200,8 @@ class SimD3Timeline(path:String,file:String,tick:Int=1) extends SimMetricsOutput val dataFile = s"$path$file-simdata.js" writeToFile(dataFile, result) } - + + /** Helps build the output with a static system time. */ def build(aggregator:SimMetricsAggregator, now:Long) = { var buf:StringBuilder = StringBuilder.newBuilder buf.append("var tasks = [\n") From 070c18a89119e3d2aadc7f3ec699f422f0c4af92 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 27 Jun 2019 18:13:13 +0100 Subject: [PATCH 18/55] Updates docs/metrics.org --- docs/metrics.org | 178 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 170 insertions(+), 8 deletions(-) diff --git a/docs/metrics.org b/docs/metrics.org index 7ad4aee1..9913112e 100644 --- a/docs/metrics.org +++ b/docs/metrics.org @@ -14,25 +14,187 @@ Metrics can be tracked in 2 settings: We describe the general setting and the individual metrics next. + * Setting + The general idea for PEW, is that the engine will automatically collect all the available metrics at runtime. The user can then implement an output function to generate some analytics from the collected metrics. The following key concepts are used by PEW for capturing and managing metrics: -** `Metrics` +** ~Metrics~ Metrics are captured around individual concepts, such as an atomic process or task, a persistent resource, or a workflow. Each set of metrics is captured in an immutable case class. This includes the different features being measured and the methods to update them based on what is happening in the engine. -** `Aggregator` -An `Aggregator` is a mutable class that collects all the `Metrics` collected across multiple workflows in one place. It contains methods to update the different metrics, indexed by some id, and based on different events that may take place. +** ~Aggregator~ +An ~Aggregator~ is a mutable class that collects all the ~Metrics~ collected across multiple workflows in one place. It contains methods to update the different metrics, indexed by some id, and based on different events that may take place. + +** ~Output~ +An ~Output~ is essentially a function that can generate any outputs from the ~Metrics~ within an ~Aggregator~. Outputs may include analytics, visualizations, reports, or anything else. -** `Output` -An `Output` is essentially a function that can generate any outputs from the `Metrics` within an `Aggregator`. Outputs may include analytics, visualizations, reports, or anything else. * <>Real-time execution metrics -Real-time metrics are aimed to be generic and domain-independent. We describe each of the general setting concepts above in this particular context. +[[[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/src/com/workflowfm/pew/metrics][Source]]] + +Real-time metrics are minimal, as they are aimed to be generic and domain-independent. We capture metrics about calls to atomic processes and metrics abount executions of entire workflows. + +The metrics are gathered in a ~MetricsAggregator~ and can be processed through a ~MetricsOutput~. + +A ~MetricsHandler~ is the most convenient way of gathering metrics directly from a ~ProcessExecutor~. It is a ~PiEventHandler~, which means you can register it directly to the executor and process the results afterwards. + +Here's an example pattern: +#+BEGIN_SRC scala +// Instantiate your handler. Call it "metrics". +val handler = new MetricsHandler[Int]("metrics") + +// Instantiate your executor (assuming a list of processes). +val executor = new AkkaExecutor(processes :_*) + +// Subscribe the handler and obtain a kill switch to unsubscribe it when done. +val killSwitch = executor.subscribe(handler) + +/////////////////////////////////// +// Execute all your workflows here. +// Wait for them to finish. +/////////////////////////////////// + +// Stop/unsubscribe the handler. +killSwitch.map(_.stop) + +// Instantiate your output, in this case a simple MetricsPrinter. +val output = new MetricsPrinter[Int]() + +// Run it on the results. +output(handler) +#+END_SRC + +The 2 types of metrics available are described next. + +** ~ProcessMetrics~ + +This captures metrics about a particular call of an ~AtomicProcess~. +- ~piID~: The ID of the workflow that executed the atomic process. +- ~ref~: A unique call ID for this process call within the particular workflow. +- ~process~: The name of the process. +- ~start~: The system time in milliseconds when the process call started. +- ~finish~: The system time in milliseconds that the process call finished, or ~None~ if it is still running. +- ~result~: A ~String~ representation of the returned result from the process call, or ~None~ if it still running. In case of failure, the field is populated with the localized message of the exception thrown. + +** ~WorkflowMetrics~ + +This captures metrics for a particular execution of a workflow (~CompositeProcess~). +- ~piID~: The unique ID of the workflow. +- ~start~: The system time in milliseconds when the workflow started executing. +- ~calls~: The number of individual calls performed to atomic processes. +- ~finish~: The system time in milliseconds that the workflow finished, or ~None~ if it is still running. +- ~result~: a ~String~ representation of the returned result from the workflow, or ~None~ if it still running. In case of failure, the field is populated with the localized message of the exception thrown. + + +* <>Simulation metrics + +[[[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/src/com/workflowfm/pew/simulation/metrics][Source]]] + +Simulation metrics are somewhat richer than the real-time ones. We capture metrics about each ~Task~, ~Simulation~ and ~TaskResource~ used. More details about these concepts can be found [[https://github.com/PetrosPapapa/WorkflowFM-PEW/wiki/Simulation][here]]. + +The metrics are gathered in a ~SimMetricsAggregator~ and can be processed through a ~SimMetricsOutput~. + +The general assumption is that simulations run on ~AkkaExecutor~. Under this assumption, we can expand the implementation to ensure asynchronous gathering of the metrics and automatic execution of the output in the end. For this reason we have introduced the [[https://github.com/PetrosPapapa/WorkflowFM-PEW/blob/master/src/com/workflowfm/pew/simulation/metrics/Actor.scala][~SimMetricsActor~]] that takes care of all of this for us. + +Here is an example setup to manage simulation metrics, assuming an active ~ActorSystem~: +#+BEGIN_SRC scala +// Instantiate the Coordinator. +val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) + +// Load the a list of available TaskResources to the Coordinator. +coordinator ! Coordinator.AddResources(machines) + +// Instantiate your output, in this case a simple SimMetricsPrinter. +val output = new SimMetricsPrinter() + +// Create the SimMetricsActor. +val metricsActor = system.actorOf(SimMetricsActor.props(output)) + +// Set up a list of simulations, paired with their starting times. +val simulations:Seq[(Long,Simulation)] = ... + +// Instantiate the executor. +val executor = new AkkaExecutor(simulations flatMap (_._2.getProcesses()) :_*) + +// Start the simulations through the SimMetricsActor. +metricsActor ! SimMetricsActor.StartSims(coordinator,simulations,executor) +#+END_SRC + +The ~metricsActor~ will automatically run the output function (the printer in this case) on the results. + +Note that, in this scenario, the ~metricsActor~ will also shutdown the ~ActorSystem~. If you want to avoid that, e.g. when you need to run multiple independent simulations, you need to set up your own actor that will be given the opportunity to act when the simulation and metrics output have finished. Assuming ~a:ActorRef~ is that actor, you can pass it to the ~metricsActor~ at construction as follows: +#+BEGIN_SRC scala +val metricsActor = system.actorOf(SimMetricsActor.props(output, Some(a))) +#+END_SRC + +Your actor will receive a ~Coordinator.Done~ message when everything is done and the ~ActorSystem~ will remain active. + +The 3 types of gathered metrics are described next. + +** ~TaskMetrics~ + +This captures metrics for a simulated ~Task~. +- ~id~: The unique ID of the ~Task~. +- ~task~: The name of the ~Task~. +- ~simulation~: The name of the simulation the ~Task~ belongs to. +- ~created~: The virtual timestamp when the ~Task~ was created and entered the ~Coordinator~. +- ~started~: The virtual timestamp when the ~Task~ started executing, or ~None~ if it has not started yet. +- ~duration~: The virtual duration of the ~Task~. +- ~cost~: The cost associated with the ~Task~. +- ~resources~: The list of names of the ~TaskResource~ this ~Task~ used. + +** ~SimulationMetrics~ + +This captures metrics for a particular ~Simulation~. +- ~name~: The unique name of the ~Simulation~. +- ~started~: The virtual timestamp when the ~Simulation~ started executing. +- ~duration~: The virtual duration of the ~Simulation~. +- ~delay~: The sum of all delays for all involved ~Task~. +- ~tasks~: The number of ~Task~ associated with the ~Simulation~ so far. +- ~cost~: The total cost associated with the ~Simulation~ so far. +- ~result~: a ~String~ representation of the returned result from the ~Simulation~, or ~None~ if it still running. In case of failure, the field is populated with the localized message of the exception thrown. + +** ~ResourceMetrics~ + +This captures metrics for a particular ~TaskResource~. +- ~name~: The unique name of the ~TaskResource~. +- ~busyTime~: The total amount of virtual time that the ~TaskResource~ has been busy, i.e. attached to a ~Task~. +- ~idleTime~: The total amount of virtual time that the ~TaskResource~ has been idle, i.e. not attached to any ~Task~. +- ~tasks~: The number of different ~Task~ that have been attached to this ~TaskResource~. +- ~cost~: The total cost associated with this ~TaskResource~. + + +* Extending the metrics + +Analytics that can be derived from the current metrics can be calculated by a custom output function. + +Implementation of new types of metrics in the current setup requires an extension of each of the 3 main concepts and, more importantly, a computational way to generate these metrics at runtime. + +The former can be easily achieved by: +1) Implementing your own custom case classes for your metrics. +2) Extending one of the existing aggregators to hold your new metrics. +3) Extending the output classes to deal with your custom metrics. + +The latter is harder, as the current metrics are measured directly in the ~PiEvent~'s generated by the executor or by the simulation ~Coordinator~. + +Metrics that can be calculated by atomic processes (or tasks), can be given as metadata output in the process implementation. Instead of implementing a standard ~AtomicProcess~, switch its inheritance to a ~MetadataAtomicProcess~. You can then implement the ~run~ function so that it returns calculated metrics as one or more ~PiMetadataElem~. -** `Metrics` +Here's an example pattern: +#+BEGIN_SRC scala +override def runMeta( args: Seq[PiObject] )( implicit ec: ExecutionContext ): Future[MetadataAtomicResult] = { + // run this as a regular AtomicProcess + run( args ).map { result => + // calculate your metrics + val metrics :Future[Seq[PiMetadataElem]] = ... + // return the combined result (assuming metrics is a Future here) + metrics.map { m => MetadataAtomicProcess.result(result, m :_*) } + } +} +#+END_SRC -We collect the [[https://github.com/PetrosPapapa/WorkflowFM-PEW/blob/master/src/com/workflowfm/pew/metrics/Measure.scala][following real-time metrics]]: +The generated metadata will be attached to the corresponding ~PiEventReturn~, so you can use a ~PiEventHandler~ to grab it and pass it to your aggregator. +Calculating the metrics at the same time as the result requires refactoring of the automatically generated code. From 133ec7c0962f120a5aa9a0507eaefb4a6b7263a1 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 28 Jun 2019 01:54:31 +0100 Subject: [PATCH 19/55] Adds some scaladoc documentation --- build.sbt | 4 ++- .../pew/stream/PiEventHandler.scala | 9 +++++-- .../workflowfm/pew/stream/PiObservable.scala | 18 +++++++++++-- src/com/workflowfm/pew/stream/PiStream.scala | 26 ++++++++++--------- 4 files changed, 40 insertions(+), 17 deletions(-) diff --git a/build.sbt b/build.sbt index b8adc84c..bdf95c38 100644 --- a/build.sbt +++ b/build.sbt @@ -8,7 +8,9 @@ lazy val commonSettings = Seq ( scalaVersion := "2.12.6" ) -// The dependencies are in Maven format, with % separating the parts. +autoAPIMappings := true + +// The dependencies are in Maven format, with % separating the parts. // Notice the extra bit "test" on the end of JUnit and ScalaTest, which will // mean it is only a test dependency. // diff --git a/src/com/workflowfm/pew/stream/PiEventHandler.scala b/src/com/workflowfm/pew/stream/PiEventHandler.scala index 96736547..ab2ce6ee 100644 --- a/src/com/workflowfm/pew/stream/PiEventHandler.scala +++ b/src/com/workflowfm/pew/stream/PiEventHandler.scala @@ -7,16 +7,21 @@ import java.text.SimpleDateFormat import scala.collection.immutable.Queue // Return true if the handler is done and needs to be unsubscribed. - +/** A listener for [[PiEvent]]s. */ trait PiEventHandler[KeyT] extends (PiEvent[KeyT]=>Boolean) { def name:String + /** Compose with another handler. */ def and(h:PiEventHandler[KeyT]) = MultiPiEventHandler(this,h) } +/** A factory that generates a handler that is related to a particular workflow ID. + * The ID is generated by the [[com.workflowfm.pew.execution.ProcessExecutor]] so we do not know it in advance. + */ trait PiEventHandlerFactory[T,H <: PiEventHandler[T]] { def build(id:T):H } +/** Example of a [[PiEventHandler]] that simply prints a string representation of the event to `System.err`. */ class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { val formatter = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss.SSS") override def apply(e:PiEvent[T]) = { @@ -26,7 +31,7 @@ class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { } } - +/** A [[PiEventHandler]] consisting of a queue of multiple handlers. */ case class MultiPiEventHandler[T](handlers:Queue[PiEventHandler[T]]) extends PiEventHandler[T] { override def name = handlers map (_.name) mkString(",") override def apply(e:PiEvent[T]) = handlers map (_(e)) forall (_ == true) diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala index 3bb31c43..d34dbf43 100644 --- a/src/com/workflowfm/pew/stream/PiObservable.scala +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -4,19 +4,34 @@ import com.workflowfm.pew.PiEvent import scala.concurrent.{ Promise, Future, ExecutionContext } +/** Has the ability to publish [[PiEvent]]s. + * This is separate from [[PiObservable]] as in some cases publishing events + * and handling listeners happens in 2 different places. + */ trait PiPublisher[T] { protected def publish(evt:PiEvent[T]):Unit } +/** A kill switch allowing us to stop a [[PiEventHandler]]. */ trait PiSwitch { def stop:Unit } +/** Anything that can be observed by a [[PiEventHandler]]. + * This is separate from [[PiPublisher]] as in some cases publishing events + * and handling listeners happens in 2 different places. + */ trait PiObservable[T] { + /** Subscribes a [[com.workflowfm.pew.stream.PiEventHandler]] to observe. + * @param handler the handler to subscribe + * @return the [[com.workflowfm.pew.stream.PiSwitch]] that allows us to stop/unsubscribe the subscribed handler + */ def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] } - +/** A simple [[PiObservable]] and [[PiPublisher]] with a mutable map of handlers. + * @note Assumes each handler has a unique name. + */ trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { import collection.mutable.Map @@ -25,7 +40,6 @@ trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { protected val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = Future { - //System.err.println("Subscribed: " + handler.name) handlers += (handler.name -> handler) Switch(handler.name) } diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index 3cab4d36..bfe97af1 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -9,6 +9,7 @@ import akka.{ NotUsed, Done } import scala.concurrent.{ Future, ExecutionContext } +/** A [[PiObservable]] implemented using an Akka Stream Source. */ trait PiSource[T] extends PiObservable[T] { implicit val materializer:Materializer @@ -26,11 +27,15 @@ trait PiSource[T] extends PiObservable[T] { } +/** A [[PiSource]] that also implements [[PiPublisher]] with Akka Streams. + * Uses [[akka.stream.scaladsl.BroadcastHub]], which allows us to clone new sources and attach the + * handlers as sinks. + */ trait PiStream[T] extends PiSource[T] with PiPublisher[T] { implicit val system:ActorSystem override implicit val materializer = ActorMaterializer() - private val sourceQueue = Source.queue[PiEvent[T]](PiSource.bufferSize, PiSource.overflowStrategy) + private val sourceQueue = Source.queue[PiEvent[T]](PiStream.bufferSize, PiStream.overflowStrategy) private val ( queue: SourceQueueWithComplete[PiEvent[T]], @@ -38,7 +43,6 @@ trait PiStream[T] extends PiSource[T] with PiPublisher[T] { ) = { val (q,s) = sourceQueue.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run() s.runWith(Sink.ignore) - //s.runForeach(e => println(s">>>>>>>>>> ${e.id} ${e.time - 1542976910000L}")) (q,s) } @@ -46,30 +50,28 @@ trait PiStream[T] extends PiSource[T] with PiPublisher[T] { override def getSource = source //def subscribe(sink:PiEvent[T]=>Unit):Future[Done] = source.runForeach(sink(_)) - + override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = { // Using a shared kill switch even though we only want to shutdown one stream, because // the shared switch is available immediately (pre-materialization) so we can use it // as a sink. val killSwitch = KillSwitches.shared(s"kill:${handler.name}") val x = source - .via(killSwitch.flow) - //.map { e => println(s">>> ${handler.name}: ${e.id} ${e.time- 1542976910000L}") ; e } - .map(handler(_)) - .runForeach { r => if (r) { - //println(s"===KILLING IN THE NAME OF: ${handler.name}") - killSwitch.shutdown() } - } + .via(killSwitch.flow) // Use the killSwitch as a flow so we can kill the entire stream. + .map(handler(_)) // Run the events through the handler. + .runForeach { r => if (r) { // if the returned value is true, we need to "unsubscribe" + killSwitch.shutdown() // we could not do this with a regular killSwitch, only with a shared one + } } Future.successful(PiKillSwitch(killSwitch)) } } -object PiSource { +object PiStream { val bufferSize = 5 val overflowStrategy = OverflowStrategy.backpressure } - +/** A wrapper of [[akka.stream.KillSwitch]] to stop [[PiEventHandler]]s. */ case class PiKillSwitch(switch:KillSwitch) extends PiSwitch { override def stop = switch.shutdown() } From 3e2706e37dc35b1d364befa1251b5559cea3082b Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 28 Jun 2019 02:07:50 +0100 Subject: [PATCH 20/55] `PiEventHandler` no longer needs a name. --- .../pew/execution/ProcessExecutor.scala | 4 ++-- .../workflowfm/pew/stream/PiEventHandler.scala | 4 +--- src/com/workflowfm/pew/stream/PiObservable.scala | 5 +++-- src/com/workflowfm/pew/stream/PiStream.scala | 2 +- .../workflowfm/pew/stream/PromiseHandler.scala | 14 ++++++-------- .../pew/execution/AkkaExecutorTests.scala | 16 ++++++++-------- .../pew/execution/SingleStateExecutorTests.scala | 2 +- .../pew/mongodb/MongoExecutorTests.scala | 2 +- .../pew/stateless/KafkaExecutorTests.scala | 6 +++--- 9 files changed, 26 insertions(+), 29 deletions(-) diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index b115c22e..2aeab721 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -84,7 +84,7 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => * @return A Future with the result of the executed process */ def execute(process:PiProcess,args:Seq[Any]):Future[Any] = - call(process,args,new ResultHandlerFactory[KeyT]({ id => s"[$id]"})) flatMap (_.future) + call(process,args,new ResultHandlerFactory[KeyT]) flatMap (_.future) } object ProcessExecutor { @@ -110,7 +110,7 @@ trait SimulatorExecutor[KeyT] extends ProcessExecutor[KeyT] { this:PiObservable[ * @return A Future with the result of the executed process */ def simulate(process:PiProcess,args:Seq[Any],timeout:FiniteDuration=10.seconds):Future[Any] = { - val f = call(process,args,new ResultHandlerFactory[KeyT]({ id => s"[$id]"})) + val f = call(process,args,new ResultHandlerFactory[KeyT]) val handler = Await.result(f, timeout) handler.future } diff --git a/src/com/workflowfm/pew/stream/PiEventHandler.scala b/src/com/workflowfm/pew/stream/PiEventHandler.scala index ab2ce6ee..05d5d79f 100644 --- a/src/com/workflowfm/pew/stream/PiEventHandler.scala +++ b/src/com/workflowfm/pew/stream/PiEventHandler.scala @@ -9,7 +9,6 @@ import scala.collection.immutable.Queue // Return true if the handler is done and needs to be unsubscribed. /** A listener for [[PiEvent]]s. */ trait PiEventHandler[KeyT] extends (PiEvent[KeyT]=>Boolean) { - def name:String /** Compose with another handler. */ def and(h:PiEventHandler[KeyT]) = MultiPiEventHandler(this,h) } @@ -22,7 +21,7 @@ trait PiEventHandlerFactory[T,H <: PiEventHandler[T]] { } /** Example of a [[PiEventHandler]] that simply prints a string representation of the event to `System.err`. */ -class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { +class PrintEventHandler[T] extends PiEventHandler[T] { val formatter = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss.SSS") override def apply(e:PiEvent[T]) = { val time = formatter.format(e.rawTime) @@ -33,7 +32,6 @@ class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { /** A [[PiEventHandler]] consisting of a queue of multiple handlers. */ case class MultiPiEventHandler[T](handlers:Queue[PiEventHandler[T]]) extends PiEventHandler[T] { - override def name = handlers map (_.name) mkString(",") override def apply(e:PiEvent[T]) = handlers map (_(e)) forall (_ == true) override def and(h:PiEventHandler[T]) = MultiPiEventHandler(handlers :+ h) } diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala index d34dbf43..fa5a9025 100644 --- a/src/com/workflowfm/pew/stream/PiObservable.scala +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -40,8 +40,9 @@ trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { protected val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = Future { - handlers += (handler.name -> handler) - Switch(handler.name) + val name = java.util.UUID.randomUUID.toString + handlers += (name -> handler) + Switch(name) } def unsubscribe(handlerName:String):Future[Boolean] = Future { diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index bfe97af1..c04e87e8 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -55,7 +55,7 @@ trait PiStream[T] extends PiSource[T] with PiPublisher[T] { // Using a shared kill switch even though we only want to shutdown one stream, because // the shared switch is available immediately (pre-materialization) so we can use it // as a sink. - val killSwitch = KillSwitches.shared(s"kill:${handler.name}") + val killSwitch = KillSwitches.shared("PiEventHandlerKillSwitch") val x = source .via(killSwitch.flow) // Use the killSwitch as a flow so we can kill the entire stream. .map(handler(_)) // Run the events through the handler. diff --git a/src/com/workflowfm/pew/stream/PromiseHandler.scala b/src/com/workflowfm/pew/stream/PromiseHandler.scala index f3f303a4..a1c19cf5 100644 --- a/src/com/workflowfm/pew/stream/PromiseHandler.scala +++ b/src/com/workflowfm/pew/stream/PromiseHandler.scala @@ -43,20 +43,19 @@ trait PromiseHandler[T,R] extends PiEventHandler[T] { def fail(exception:PiException[T]) :Either[R,Exception] } -class ResultHandler[T](override val name:String, override val id:T) extends PromiseHandler[T,Any] { +class ResultHandler[T](override val id:T) extends PromiseHandler[T,Any] { override def succeed(result:Any) = result override def fail(exception:PiException[T]) = Right(exception) } -class ResultHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,ResultHandler[T]] { - def this(name:String) = this { _:T => name } - override def build(id:T) = new ResultHandler[T](name(id),id) +class ResultHandlerFactory[T] extends PiEventHandlerFactory[T,ResultHandler[T]] { + override def build(id:T) = new ResultHandler[T](id) } -class CounterHandler[T](override val name:String, override val id:T) extends PromiseHandler[T,Int] { +class CounterHandler[T](override val id:T) extends PromiseHandler[T,Int] { private var counter:Int = 0 def count = counter @@ -70,9 +69,8 @@ class CounterHandler[T](override val name:String, override val id:T) extends Pro override def fail(exception:PiException[T]) = Left(counter) } -class CounterHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,CounterHandler[T]] { - def this(name:String) = this { _:T => name } - override def build(id:T) = new CounterHandler[T](name(id),id) +class CounterHandlerFactory[T] extends PiEventHandlerFactory[T,CounterHandler[T]] { + override def build(id:T) = new CounterHandler[T](id) } diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index 1b05344f..20e4cc11 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -145,8 +145,8 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "execute Rexample with a CounterHandler" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter") - val kill = ex.subscribe(new PrintEventHandler("printer")) + val factory = new CounterHandlerFactory[Int] + val kill = ex.subscribe(new PrintEventHandler) val f1 = ex.call(ri,Seq(21),factory) flatMap(_.future) @@ -158,9 +158,9 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "allow separate handlers per executor" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) val ex2 = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter") - val k1 = ex.subscribe(new PrintEventHandler("printer")) - val k2 = ex2.subscribe(new PrintEventHandler("printer")) + val factory = new CounterHandlerFactory[Int] + val k1 = ex.subscribe(new PrintEventHandler) + val k2 = ex2.subscribe(new PrintEventHandler) val f1 = ex.call(ri,Seq(99),factory) flatMap(_.future) val f2 = ex2.execute(ri,Seq(11)) @@ -177,7 +177,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "allow separate handlers for separate workflows" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter" + _) + val factory = new CounterHandlerFactory[Int] val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) val f2 = ex.call(ri,Seq(11),factory) flatMap(_.future) @@ -194,8 +194,8 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "unsubscribe handlers successfully" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter" + _) - val kill = ex.subscribe(new PrintEventHandler("printerX")) + val factory = new CounterHandlerFactory[Int] + val kill = ex.subscribe(new PrintEventHandler) kill.map(_.stop) val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) diff --git a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala index dba17aca..966f9dba 100644 --- a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala @@ -29,7 +29,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut "SingleStateExecutor" should "execute Rexample concurrently" in { val executor = new SingleStateExecutor(pai,pbi,pci,ri) - executor.subscribe(new PrintEventHandler("printer")) + executor.subscribe(new PrintEventHandler) exe(executor,ri,13)//.isEmpty should be( false ) //exe(new SingleStateExecutor(pai,pbi,pci,ri),ri,31)//.isEmpty should be( false ) } diff --git a/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala b/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala index e0f2de50..661477b5 100644 --- a/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala +++ b/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala @@ -116,7 +116,7 @@ class MongoExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll w it should "fail properly when a workflow doesn't exist" in { val ex = new MongoExecutor(client, "pew", "test_exec_insts",pai,pbi,pci,pci2,ri,ri2) val id = new ObjectId() - val handler = new ResultHandler("unitHandler",id) + val handler = new ResultHandler(id) ex.subscribe(handler) ex.postResult(id, 0, MetadataAtomicProcess.result(PiItem(0))) diff --git a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala index fec14643..d90fc129 100644 --- a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala +++ b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala @@ -239,7 +239,7 @@ class KafkaExecutorTests def call( p: PiProcess, args: PiObject* ): Future[Any] = { val pii = PiInstance(ObjectId.get, p, args: _*) - val handler = new ResultHandler("test", pii.id) + val handler = new ResultHandler(pii.id) listener.subscribe(handler) sendMessages(ReduceRequest(pii, Seq()), PiiLog(PiEventStart(pii))) @@ -290,7 +290,7 @@ class KafkaExecutorTests def baremetalCall( ex: CustomKafkaExecutor, p: PiProcess, args: PiObject* ): Future[Any] = { val piiId = await( ex.init( p, args.toSeq ) ) - val handler = new ResultHandler("test", piiId) + val handler = new ResultHandler(piiId) ex.subscribe(handler) ex.start(piiId) @@ -682,7 +682,7 @@ class KafkaExecutorTests val onCompletion: MessageMap = { - val handler = new ResultHandler[ObjectId]("testhandler", ourPiiId) + val handler = new ResultHandler[ObjectId](ourPiiId) val ex2 = makeExecutor(completeProcess.settings) tryBut { From 00e8805eb98f6dadf93be08a3e5d120829115513 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 28 Jun 2019 02:27:31 +0100 Subject: [PATCH 21/55] `AkkaExecutor` now uses `java.util.UUID` for workflow IDs No longer needs the internal counter as well. Also fixed `MetricsHandler` to not need a name. --- .../pew/execution/AkkaExecutor.scala | 197 +++++++++--------- src/com/workflowfm/pew/metrics/Measure.scala | 2 +- .../workflowfm/pew/stream/PiObservable.scala | 4 +- .../pew/execution/AkkaExecutorTests.scala | 9 +- .../workflowfm/pew/metrics/MetricsTests.scala | 11 +- 5 files changed, 111 insertions(+), 112 deletions(-) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index bd536fd4..c306fac6 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -10,48 +10,49 @@ import scala.concurrent._ import scala.concurrent.duration._ import scala.util.{Failure, Success} +import java.util.UUID class AkkaExecutor ( - store:PiInstanceStore[Int], + store:PiInstanceStore[UUID], processes:PiProcessStore )( implicit val system: ActorSystem, override implicit val executionContext: ExecutionContext = ExecutionContext.global, implicit val timeout:FiniteDuration = 10.seconds -) extends SimulatorExecutor[Int] with PiObservable[Int] { +) extends SimulatorExecutor[UUID] with PiObservable[UUID] { - def this(store:PiInstanceStore[Int], l:PiProcess*) + def this(store:PiInstanceStore[UUID], l:PiProcess*) (implicit system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration) = this(store,SimpleProcessStore(l :_*)) def this(system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration,l:PiProcess*) = - this(SimpleInstanceStore[Int](),SimpleProcessStore(l :_*))(system,context,timeout) + this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*))(system,context,timeout) def this(l:PiProcess*)(implicit system: ActorSystem) = - this(SimpleInstanceStore[Int](),SimpleProcessStore(l :_*))(system,ExecutionContext.global,10.seconds) + this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*))(system,ExecutionContext.global,10.seconds) val execActor = system.actorOf(AkkaExecutor.execprops(store,processes)) - implicit val tOut = Timeout(timeout) + implicit val tOut = Timeout(timeout) override def simulationReady = Await.result(execActor ? AkkaExecutor.SimReady,timeout).asInstanceOf[Boolean] - override protected def init(process:PiProcess,args:Seq[PiObject]):Future[Int] = - execActor ? AkkaExecutor.Init(process,args) map (_.asInstanceOf[Int]) - - override protected def start(id:Int) = execActor ! AkkaExecutor.Start(id) + override protected def init(process:PiProcess,args:Seq[PiObject]):Future[UUID] = + execActor ? AkkaExecutor.Init(process,args) map (_.asInstanceOf[UUID]) + + override protected def start(id:UUID) = execActor ! AkkaExecutor.Start(id) - override def subscribe(handler:PiEventHandler[Int]):Future[PiSwitch] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[PiSwitch] + override def subscribe(handler:PiEventHandler[UUID]):Future[PiSwitch] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[PiSwitch] } object AkkaExecutor { case class Init(p:PiProcess,args:Seq[PiObject]) - case class Start(id:Int) - case class Result(id:Int,ref:Int,res:MetadataAtomicProcess.MetadataAtomicResult) - case class Error(id:Int,ref:Int,ex:Throwable) + case class Start(id:UUID) + case class Result(id:UUID,ref:Int,res:MetadataAtomicProcess.MetadataAtomicResult) + case class Error(id:UUID,ref:Int,ex:Throwable) - case class ACall(id:Int,ref:Int,p:MetadataAtomicProcess,args:Seq[PiObject],actor:ActorRef) + case class ACall(id:UUID,ref:Int,p:MetadataAtomicProcess,args:Seq[PiObject],actor:ActorRef) case object AckCall case class AFuture(f:Future[Any]) @@ -59,130 +60,128 @@ object AkkaExecutor { case object Ping case object SimReady - case class Subscribe(handler:PiEventHandler[Int]) + case class Subscribe(handler:PiEventHandler[UUID]) def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) - def execprops(store:PiInstanceStore[Int], processes:PiProcessStore)(implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes)) + def execprops(store:PiInstanceStore[UUID], processes:PiProcessStore)(implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes)) } class AkkaExecActor( - var store:PiInstanceStore[Int], + var store:PiInstanceStore[UUID], processes:PiProcessStore )( override implicit val system: ActorSystem, implicit val executionContext: ExecutionContext = ExecutionContext.global -) extends Actor with PiStream[Int] { - - var ctr:Int = 0 +) extends Actor with PiStream[UUID] { - def init(p:PiProcess,args:Seq[PiObject]):Int = { - val inst = PiInstance(ctr,p,args:_*) - store = store.put(inst) - ctr = ctr + 1 - ctr-1 + def init(p:PiProcess,args:Seq[PiObject]):UUID = { + val id = java.util.UUID.randomUUID + val inst = PiInstance(id,p,args:_*) + store = store.put(inst) + id } - def start(id:Int):Unit = store.get(id) match { + def start(id:UUID):Unit = store.get(id) match { case None => publish(PiFailureNoSuchInstance(id)) case Some(inst) => { publish(PiEventStart(inst)) val ni = inst.reduce if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(id) - } + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(id) + } } else { - val (toCall,resi) = ni.handleThreads(handleThread(ni)) - val futureCalls = toCall flatMap (resi.piFutureOf) - //System.err.println("*** [" + ctr + "] Updating state after init") - store = store.put(resi) - (toCall zip futureCalls) map runThread(resi) + val (toCall,resi) = ni.handleThreads(handleThread(ni)) + val futureCalls = toCall flatMap (resi.piFutureOf) + //System.err.println("*** [" + ctr + "] Updating state after init") + store = store.put(resi) + (toCall zip futureCalls) map runThread(resi) } } } - final def postResult(id:Int,ref:Int, res:MetadataAtomicProcess.MetadataAtomicResult):Unit = { + final def postResult(id:UUID, ref:Int, res:MetadataAtomicProcess.MetadataAtomicResult):Unit = { publish(PiEventReturn(id,ref,PiObject.get(res._1),res._2)) store.get(id) match { case None => publish(PiFailureNoSuchInstance(id)) - case Some(i) => + case Some(i) => if (i.id != id) System.err.println("*** [" + id + "] Different instance ID encountered: " + i.id) // This should never happen. We trust the Instance Store! else { //System.err.println("*** [" + id + "] Running!") val ni = i.postResult(ref, res._1).reduce - if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(ni.id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(ni.id) - } - } else { - val (toCall,resi) = ni.handleThreads(handleThread(ni)) - val futureCalls = toCall flatMap (resi.piFutureOf) - //System.err.println("*** [" + i.id + "] Updating state after: " + ref) - store = store.put(resi) - (toCall zip futureCalls) map runThread(resi) - } + if (ni.completed) ni.result match { + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(ni.id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(ni.id) + } + } else { + val (toCall,resi) = ni.handleThreads(handleThread(ni)) + val futureCalls = toCall flatMap (resi.piFutureOf) + //System.err.println("*** [" + i.id + "] Updating state after: " + ref) + store = store.put(resi) + (toCall zip futureCalls) map runThread(resi) + } } - } + } } - - def handleThread(i:PiInstance[Int])(ref:Int,f:PiFuture):Boolean = { + + def handleThread(i:PiInstance[UUID])(ref:Int,f:PiFuture):Boolean = { //System.err.println("*** [" + id + "] Checking thread: " + ref + " (" + f.fun + ")") f match { - case PiFuture(name, outChan, args) => i.getProc(name) match { - case None => { - publish(PiFailureUnknownProcess(i, name)) - false - } - case Some(p:MetadataAtomicProcess) => true - case Some(p:CompositeProcess) => { // TODO this should never happen! - publish(PiFailureAtomicProcessIsComposite(i, name)) - false + case PiFuture(name, outChan, args) => i.getProc(name) match { + case None => { + publish(PiFailureUnknownProcess(i, name)) + false + } + case Some(p:MetadataAtomicProcess) => true + case Some(p:CompositeProcess) => { // TODO this should never happen! + publish(PiFailureAtomicProcessIsComposite(i, name)) + false + } } - } - } } - // + } } + // - def runThread(i:PiInstance[Int])(t:(Int,PiFuture)):Unit = { + def runThread(i:PiInstance[UUID])(t:(Int,PiFuture)):Unit = { //System.err.println("*** [" + id + "] Running thread: " + t._1 + " (" + t._2.fun + ")") t match { - case (ref,PiFuture(name, outChan, args)) => i.getProc(name) match { - case None => { - // This should never happen! We already checked! - System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name + " even though we checked already") - } - case Some(p:MetadataAtomicProcess) => { - implicit val tOut = Timeout(1.second) - val objs = args map (_.obj) - try { - publish(PiEventCall(i.id,ref,p,objs)) - // TODO Change from ! to ? to require an acknowledgement - system.actorOf(AkkaExecutor.atomicprops()) ! AkkaExecutor.ACall(i.id,ref,p,objs,self) - } catch { - case _:Throwable => Unit //TODO specify timeout exception here! - also print a warning + case (ref,PiFuture(name, outChan, args)) => i.getProc(name) match { + case None => { + // This should never happen! We already checked! + System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name + " even though we checked already") + } + case Some(p:MetadataAtomicProcess) => { + implicit val tOut = Timeout(1.second) + val objs = args map (_.obj) + try { + publish(PiEventCall(i.id,ref,p,objs)) + // TODO Change from ! to ? to require an acknowledgement + system.actorOf(AkkaExecutor.atomicprops()) ! AkkaExecutor.ACall(i.id,ref,p,objs,self) + } catch { + case _:Throwable => Unit //TODO specify timeout exception here! - also print a warning + } + } + case Some(p:CompositeProcess) => {// This should never happen! We already checked! + publish(PiFailureAtomicProcessIsComposite(i, name)) } } - case Some(p:CompositeProcess) => {// This should never happen! We already checked! - publish(PiFailureAtomicProcessIsComposite(i, name)) - } - } - } } - + } } + def simulationReady():Boolean = store.simulationReady def receive = { case AkkaExecutor.Init(p,args) => sender() ! init(p,args) case AkkaExecutor.Start(id) => start(id) - case AkkaExecutor.Result(id,ref,res) => postResult(id,ref,res) + case AkkaExecutor.Result(id,ref,res) => postResult(id,ref,res) case AkkaExecutor.Error(id,ref,ex) => { publish( PiFailureAtomicProcessException(id,ref,ex) ) store = store.del(id) @@ -197,12 +196,12 @@ class AkkaExecActor( } class AkkaAtomicProcessExecutor(implicit val exc: ExecutionContext = ExecutionContext.global) extends Actor { //(executor:ActorRef,p:PiProcess,args:Seq[PiObject]) - def receive = { + def receive = { case AkkaExecutor.ACall(id,ref,p,args,actor) => { //System.err.println("*** [" + id + "] Calling atomic process: " + p.name + " ref:" + ref) - p.runMeta(args).onComplete{ - case Success(res) => actor ! AkkaExecutor.Result(id,ref,res) - case Failure(ex) => actor ! AkkaExecutor.Error(id,ref,ex) + p.runMeta(args).onComplete{ + case Success(res) => actor ! AkkaExecutor.Result(id,ref,res) + case Failure(ex) => actor ! AkkaExecutor.Error(id,ref,ex) } actor ! AkkaExecutor.AckCall } diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index 31a96f29..da1cce8f 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -66,7 +66,7 @@ class MetricsAggregator[KeyT] { def processSet = processMap.values.flatMap(_.values.map(_.process)).toSet[String] } -class MetricsHandler[KeyT](override val name: String, timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime ) +class MetricsHandler[KeyT](timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime) extends MetricsAggregator[KeyT] with PiEventHandler[KeyT] { override def apply( e: PiEvent[KeyT] ): Boolean = { diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala index fa5a9025..b9b328ce 100644 --- a/src/com/workflowfm/pew/stream/PiObservable.scala +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -29,9 +29,7 @@ trait PiObservable[T] { def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] } -/** A simple [[PiObservable]] and [[PiPublisher]] with a mutable map of handlers. - * @note Assumes each handler has a unique name. - */ +/** A simple [[PiObservable]] and [[PiPublisher]] with a mutable map of handlers. */ trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { import collection.mutable.Map diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index 20e4cc11..aec554df 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -10,6 +10,7 @@ import com.workflowfm.pew._ import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution._ import RexampleTypes._ +import java.util.UUID @RunWith(classOf[JUnitRunner]) class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll with ProcessExecutorTester { @@ -145,7 +146,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "execute Rexample with a CounterHandler" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int] + val factory = new CounterHandlerFactory[UUID] val kill = ex.subscribe(new PrintEventHandler) val f1 = ex.call(ri,Seq(21),factory) flatMap(_.future) @@ -158,7 +159,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "allow separate handlers per executor" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) val ex2 = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int] + val factory = new CounterHandlerFactory[UUID] val k1 = ex.subscribe(new PrintEventHandler) val k2 = ex2.subscribe(new PrintEventHandler) @@ -177,7 +178,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "allow separate handlers for separate workflows" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int] + val factory = new CounterHandlerFactory[UUID] val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) val f2 = ex.call(ri,Seq(11),factory) flatMap(_.future) @@ -194,7 +195,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "unsubscribe handlers successfully" in { val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int] + val factory = new CounterHandlerFactory[UUID] val kill = ex.subscribe(new PrintEventHandler) kill.map(_.stop) diff --git a/test/com/workflowfm/pew/metrics/MetricsTests.scala b/test/com/workflowfm/pew/metrics/MetricsTests.scala index bbcc86a5..9f7d1720 100644 --- a/test/com/workflowfm/pew/metrics/MetricsTests.scala +++ b/test/com/workflowfm/pew/metrics/MetricsTests.scala @@ -13,6 +13,7 @@ import com.workflowfm.pew._ import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution._ import RexampleTypes._ +import java.util.UUID @RunWith(classOf[JUnitRunner]) class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with ProcessExecutorTester { @@ -34,7 +35,7 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr } it should "measure things" in { - val handler = new MetricsHandler[Int]("metrics") + val handler = new MetricsHandler[UUID] val ex = new AkkaExecutor(pai,pbi,pci,ri) val k1 = ex.subscribe(handler) @@ -47,11 +48,11 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr handler.keys.size shouldBe 1 handler.processMetrics.size shouldBe 3 handler.workflowMetrics.size shouldBe 1 - handler.processMetricsOf(0).size shouldBe 3 + //handler.processMetricsOf(0).size shouldBe 3 // TODO need to find a way to test this } it should "output a D3 timeline of 3 Rexample workflows" in { - val handler = new MetricsHandler[Int]("metrics") + val handler = new MetricsHandler[UUID] val ex = new AkkaExecutor(pai,pbi,pci,ri) val k1 = ex.subscribe(handler) @@ -69,8 +70,8 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr k1.map(_.stop) - new MetricsPrinter[Int]()(handler) - new MetricsD3Timeline[Int]("resources/d3-timeline","Rexample3")(handler) + new MetricsPrinter[UUID]()(handler) + new MetricsD3Timeline[UUID]("resources/d3-timeline","Rexample3")(handler) } } From d519408939effd5cbceedf39636d01512c956a6f Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 28 Jun 2019 02:40:50 +0100 Subject: [PATCH 22/55] Fixes broken `SingleStateExecutorTests` --- .../com/workflowfm/pew/execution/SingleStateExecutorTests.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala index 966f9dba..c212fde3 100644 --- a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala @@ -41,7 +41,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut try { await(f1) } catch { - case (e:Exception) => e.getMessage.contains("Exception: Fail") should be (true) + case (e:Exception) => e.getMessage.contains("Fail") should be (true) } } From 00148890cac838d34413b84c2cac76fa906b115e Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 28 Jun 2019 09:49:31 +0100 Subject: [PATCH 23/55] Removes legacy pom.xml --- pom.xml | 50 -------------------------------------------------- 1 file changed, 50 deletions(-) delete mode 100644 pom.xml diff --git a/pom.xml b/pom.xml deleted file mode 100644 index b8725dd4..00000000 --- a/pom.xml +++ /dev/null @@ -1,50 +0,0 @@ - - 4.0.0 - workflowfm-pew - pew - 0.0.1-SNAPSHOT - WorkflowFM PEW - Persistent Execution of Workflows - - src - - - maven-compiler-plugin - 3.7.0 - - 1.8 - 1.8 - - - - - - - org.scalatest - scalatest-maven-plugin - 1.0 - maven-plugin - - - junit - junit - 4.8.2 - - - org.scalatest - scalatest_2.12 - 3.2.0-SNAP10 - test - - - com.typesafe.akka - akka-actor_2.12.0-RC2 - 2.4.12 - - - org.mongodb.scala - mongo-scala-driver_2.12 - 2.2.1 - - - \ No newline at end of file From def134f933c034cbd13e1bbff6f2313641eebc3e Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 12 Jul 2019 16:24:15 +0100 Subject: [PATCH 24/55] Cleans up simulator stuff and adds subproject for pew-simulator --- build.sbt | 38 ++- .../d3-timeline/data-example-simulation.js | 103 ------- .../d3-timeline/pew-timeline-simulation.html | 19 -- .../d3-timeline/pew-timeline-simulation.js | 121 -------- simulator/.gitignore | 17 ++ .../pew/simulator/PiSimulation.scala | 4 + skiexample/.project | 13 - .../pew/simulation/Coordinator.scala | 282 ------------------ .../pew/simulation/Generators.scala | 16 - .../workflowfm/pew/simulation/Scheduler.scala | 167 ----------- .../pew/simulation/Simulation.scala | 40 --- src/com/workflowfm/pew/simulation/Task.scala | 101 ------- .../pew/simulation/TaskResource.scala | 62 ---- .../pew/simulation/metrics/Actor.scala | 51 ---- .../pew/simulation/metrics/Measure.scala | 254 ---------------- .../pew/simulation/metrics/Output.scala | 241 --------------- .../pew/simulation/CoordinatorTests.scala | 204 ------------- .../workflowfm/pew/simulation/Scheduler.scala | 191 ------------ test/com/workflowfm/pew/simulation/Task.scala | 52 ---- 19 files changed, 43 insertions(+), 1933 deletions(-) delete mode 100644 resources/d3-timeline/data-example-simulation.js delete mode 100644 resources/d3-timeline/pew-timeline-simulation.html delete mode 100644 resources/d3-timeline/pew-timeline-simulation.js create mode 100644 simulator/.gitignore create mode 100644 simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala delete mode 100644 skiexample/.project delete mode 100644 src/com/workflowfm/pew/simulation/Coordinator.scala delete mode 100644 src/com/workflowfm/pew/simulation/Generators.scala delete mode 100644 src/com/workflowfm/pew/simulation/Scheduler.scala delete mode 100644 src/com/workflowfm/pew/simulation/Simulation.scala delete mode 100644 src/com/workflowfm/pew/simulation/Task.scala delete mode 100644 src/com/workflowfm/pew/simulation/TaskResource.scala delete mode 100644 src/com/workflowfm/pew/simulation/metrics/Actor.scala delete mode 100644 src/com/workflowfm/pew/simulation/metrics/Measure.scala delete mode 100644 src/com/workflowfm/pew/simulation/metrics/Output.scala delete mode 100644 test/com/workflowfm/pew/simulation/CoordinatorTests.scala delete mode 100644 test/com/workflowfm/pew/simulation/Scheduler.scala delete mode 100644 test/com/workflowfm/pew/simulation/Task.scala diff --git a/build.sbt b/build.sbt index 6b4a8713..8cccbd20 100644 --- a/build.sbt +++ b/build.sbt @@ -1,5 +1,3 @@ -name := "PEW" - sbtVersion := "1.2.6" lazy val commonSettings = Seq ( @@ -37,21 +35,29 @@ libraryDependencies += "org.mongodb.scala" %% "mongo-scala-driver" % "2.2.1" libraryDependencies += "junit" % "junit" % "4.8.2" -EclipseKeys.preTasks := Seq(compile in Compile, compile in Test) +lazy val skiexample = project + .in(file("skiexample")) + .settings( + commonSettings, + scalaSource in Compile := baseDirectory.value / "src", + scalaSource in Test := baseDirectory.value / "test" + ).dependsOn(rootRef) + +lazy val simulator = project + .in(file("simulator")) + .settings( + commonSettings, + name := "pew-simulator", + libraryDependencies += "com.workflowfm" %% "workflowfm-simulator" % "0.1" + ).dependsOn(rootRef) -lazy val skiexample = project - .in(file("skiexample")) - .settings( - commonSettings, - scalaSource in Compile := baseDirectory.value / "src", - scalaSource in Test := baseDirectory.value / "test" - ).dependsOn(rootRef) lazy val root = project - .in(file(".")) - .settings( - commonSettings, - scalaSource in Compile := baseDirectory.value / "src", - scalaSource in Test := baseDirectory.value / "test" - ) + .in(file(".")) + .settings( + commonSettings, + name := "pew", + scalaSource in Compile := baseDirectory.value / "src", + scalaSource in Test := baseDirectory.value / "test" + ) lazy val rootRef = LocalProject("root") diff --git a/resources/d3-timeline/data-example-simulation.js b/resources/d3-timeline/data-example-simulation.js deleted file mode 100644 index c031ce20..00000000 --- a/resources/d3-timeline/data-example-simulation.js +++ /dev/null @@ -1,103 +0,0 @@ -var tasks = [ - "AwardContract", - "ProvideService", - "TaskSimTask", - "CheckOutcome", -]; - -var resourceData = [ -{label: "Petros", times: [ - {"label":"AwardContract(D2)", task: "AwardContract", "starting_time": 0, "ending_time": 1, delay: 0, cost: 2}, - {"label":"TaskSimTask(TaskSim)", task: "TaskSimTask", "starting_time": 6, "ending_time": 11, delay: 0, cost: 15}, - {"label":"CheckOutcome(D2)", task: "CheckOutcome", "starting_time": 11, "ending_time": 13, delay: 5, cost: 3}, - {"label":"AwardContract(A1)", task: "AwardContract", "starting_time": 13, "ending_time": 14, delay: 4, cost: 2}, - {"label":"AwardContract(A3)", task: "AwardContract", "starting_time": 14, "ending_time": 15, delay: 3, cost: 2}, - {"label":"AwardContract(A7)", task: "AwardContract", "starting_time": 22, "ending_time": 23, delay: 0, cost: 2}, - {"label":"ProvideService(D4)", task: "ProvideService", "starting_time": 23, "ending_time": 28, delay: 1, cost: 6}, - {"label":"AwardContract(D8)", task: "AwardContract", "starting_time": 28, "ending_time": 29, delay: 6, cost: 2}, -]}, -{label: "Orphen", times: [ - {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, - {"label":"TaskSimTask(TaskSim)", task: "TaskSimTask", "starting_time": 6, "ending_time": 11, delay: 0, cost: 15}, - {"label":"ProvideService(A5)", task: "ProvideService", "starting_time": 11, "ending_time": 16, delay: 1, cost: 6}, - {"label":"ProvideService(D6)", task: "ProvideService", "starting_time": 16, "ending_time": 21, delay: 5, cost: 6}, - {"label":"AwardContract(D4)", task: "AwardContract", "starting_time": 21, "ending_time": 22, delay: 9, cost: 2}, - {"label":"ProvideService(A1)", task: "ProvideService", "starting_time": 22, "ending_time": 27, delay: 8, cost: 6}, - {"label":"ProvideService(A3)", task: "ProvideService", "starting_time": 27, "ending_time": 32, delay: 12, cost: 6}, - {"label":"CheckOutcome(A5)", task: "CheckOutcome", "starting_time": 32, "ending_time": 34, delay: 16, cost: 3}, - {"label":"CheckOutcome(D4)", task: "CheckOutcome", "starting_time": 34, "ending_time": 36, delay: 6, cost: 3}, - {"label":"ProvideService(A7)", task: "ProvideService", "starting_time": 37, "ending_time": 42, delay: 14, cost: 6}, - {"label":"CheckOutcome(A1)", task: "CheckOutcome", "starting_time": 42, "ending_time": 44, delay: 15, cost: 3}, - {"label":"CheckOutcome(A3)", task: "CheckOutcome", "starting_time": 44, "ending_time": 46, delay: 12, cost: 3}, - {"label":"CheckOutcome(A7)", task: "CheckOutcome", "starting_time": 46, "ending_time": 48, delay: 4, cost: 3}, -]}, -{label: "Patient1", times: [ - {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, - {"label":"CheckOutcome(D2)", task: "CheckOutcome", "starting_time": 11, "ending_time": 13, delay: 5, cost: 3}, - {"label":"ProvideService(A1)", task: "ProvideService", "starting_time": 22, "ending_time": 27, delay: 8, cost: 6}, - {"label":"ProvideService(A3)", task: "ProvideService", "starting_time": 27, "ending_time": 32, delay: 12, cost: 6}, - {"label":"ProvideService(D8)", task: "ProvideService", "starting_time": 32, "ending_time": 37, delay: 3, cost: 6}, - {"label":"ProvideService(A7)", task: "ProvideService", "starting_time": 37, "ending_time": 42, delay: 14, cost: 6}, - {"label":"CheckOutcome(A1)", task: "CheckOutcome", "starting_time": 42, "ending_time": 44, delay: 15, cost: 3}, - {"label":"CheckOutcome(A3)", task: "CheckOutcome", "starting_time": 44, "ending_time": 46, delay: 12, cost: 3}, - {"label":"CheckOutcome(A7)", task: "CheckOutcome", "starting_time": 46, "ending_time": 48, delay: 4, cost: 3}, -]}, -{label: "Blah", times: [ - {"label":"AwardContract(A5)", task: "AwardContract", "starting_time": 9, "ending_time": 10, delay: 0, cost: 2}, - {"label":"AwardContract(D6)", task: "AwardContract", "starting_time": 10, "ending_time": 11, delay: 1, cost: 2}, - {"label":"ProvideService(D8)", task: "ProvideService", "starting_time": 32, "ending_time": 37, delay: 3, cost: 6}, -]}, -{label: "Patient3", times: [ - {"label":"ProvideService(A5)", task: "ProvideService", "starting_time": 11, "ending_time": 16, delay: 1, cost: 6}, - {"label":"ProvideService(D6)", task: "ProvideService", "starting_time": 16, "ending_time": 21, delay: 5, cost: 6}, - {"label":"CheckOutcome(A5)", task: "CheckOutcome", "starting_time": 32, "ending_time": 34, delay: 16, cost: 3}, -]}, -{label: "Patient2", times: [ - {"label":"ProvideService(D4)", task: "ProvideService", "starting_time": 23, "ending_time": 28, delay: 1, cost: 6}, - {"label":"CheckOutcome(D4)", task: "CheckOutcome", "starting_time": 34, "ending_time": 36, delay: 6, cost: 3}, -]}, -]; - -var workflowData = [ -{label: "D2", times: [ - {"label":"AwardContract(D2)", task: "AwardContract", "starting_time": 0, "ending_time": 1, delay: 0, cost: 2}, - {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, - {"label":"CheckOutcome(D2)", task: "CheckOutcome", "starting_time": 11, "ending_time": 13, delay: 5, cost: 3}, -]}, -{label: "TaskSim", times: [ - {"label":"TaskSimTask(TaskSim)", task: "TaskSimTask", "starting_time": 6, "ending_time": 11, delay: 0, cost: 15}, -]}, -{label: "A1", times: [ - {"label":"AwardContract(A1)", task: "AwardContract", "starting_time": 13, "ending_time": 14, delay: 4, cost: 2}, - {"label":"ProvideService(A1)", task: "ProvideService", "starting_time": 22, "ending_time": 27, delay: 8, cost: 6}, - {"label":"CheckOutcome(A1)", task: "CheckOutcome", "starting_time": 42, "ending_time": 44, delay: 15, cost: 3}, -]}, -{label: "A5", times: [ - {"label":"AwardContract(A5)", task: "AwardContract", "starting_time": 9, "ending_time": 10, delay: 0, cost: 2}, - {"label":"ProvideService(A5)", task: "ProvideService", "starting_time": 11, "ending_time": 16, delay: 1, cost: 6}, - {"label":"CheckOutcome(A5)", task: "CheckOutcome", "starting_time": 32, "ending_time": 34, delay: 16, cost: 3}, -]}, -{label: "D6", times: [ - {"label":"AwardContract(D6)", task: "AwardContract", "starting_time": 10, "ending_time": 11, delay: 1, cost: 2}, - {"label":"ProvideService(D6)", task: "ProvideService", "starting_time": 16, "ending_time": 21, delay: 5, cost: 6}, -]}, -{label: "A3", times: [ - {"label":"AwardContract(A3)", task: "AwardContract", "starting_time": 14, "ending_time": 15, delay: 3, cost: 2}, - {"label":"ProvideService(A3)", task: "ProvideService", "starting_time": 27, "ending_time": 32, delay: 12, cost: 6}, - {"label":"CheckOutcome(A3)", task: "CheckOutcome", "starting_time": 44, "ending_time": 46, delay: 12, cost: 3}, -]}, -{label: "D4", times: [ - {"label":"AwardContract(D4)", task: "AwardContract", "starting_time": 21, "ending_time": 22, delay: 9, cost: 2}, - {"label":"ProvideService(D4)", task: "ProvideService", "starting_time": 23, "ending_time": 28, delay: 1, cost: 6}, - {"label":"CheckOutcome(D4)", task: "CheckOutcome", "starting_time": 34, "ending_time": 36, delay: 6, cost: 3}, -]}, -{label: "A7", times: [ - {"label":"AwardContract(A7)", task: "AwardContract", "starting_time": 22, "ending_time": 23, delay: 0, cost: 2}, - {"label":"ProvideService(A7)", task: "ProvideService", "starting_time": 37, "ending_time": 42, delay: 14, cost: 6}, - {"label":"CheckOutcome(A7)", task: "CheckOutcome", "starting_time": 46, "ending_time": 48, delay: 4, cost: 3}, -]}, -{label: "D8", times: [ - {"label":"AwardContract(D8)", task: "AwardContract", "starting_time": 28, "ending_time": 29, delay: 6, cost: 2}, - {"label":"ProvideService(D8)", task: "ProvideService", "starting_time": 32, "ending_time": 37, delay: 3, cost: 6}, -]}, -]; diff --git a/resources/d3-timeline/pew-timeline-simulation.html b/resources/d3-timeline/pew-timeline-simulation.html deleted file mode 100644 index eb0634b9..00000000 --- a/resources/d3-timeline/pew-timeline-simulation.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - WorkflowFM - Results Timeline - - - - -

Resources:

-
-

Simulations:

-
- - - - diff --git a/resources/d3-timeline/pew-timeline-simulation.js b/resources/d3-timeline/pew-timeline-simulation.js deleted file mode 100644 index 061324ce..00000000 --- a/resources/d3-timeline/pew-timeline-simulation.js +++ /dev/null @@ -1,121 +0,0 @@ -function displayResults(selection,data) { - var widthPerTick = 60 - var leftMargin = 100 - var rightMargin = 30 - - var colorScale = d3.scale.category20().domain(tasks); - //var ticks = Array(totalTicks).fill().map((v,i)=>i); - - var tRange = timeRange(data) - var startTime = tRange[0] - var endTime = tRange[1] - - var totalTicks = endTime - startTime - - //var tickTime = d3.time.minutes - //var totalTicks = tickTime(startTime,endTime).length - - console.log("Total Ticks: " + totalTicks) - - var chart = d3.timeline() - .tickFormat( // - {format: d3.format.utc("03d"), - tickInterval: 1, // This forces us to start from 001 - numTicks: totalTicks, - //tickValues: ticks, // Use this to start from 000 - tickSize: 10, - }) - /*.tickFormat( // - {format: d3.time.format.utc("%H"), - tickTime: tickTime, - tickInterval: 1, - tickSize: 10, - })*/ - .stack() - .margin({left:100, right:30, top:0, bottom:0}) - .colors( colorScale ) - .colorProperty('task') - .width(totalTicks*widthPerTick+leftMargin+rightMargin); - - chart.showTimeAxisTick(); - chart.relativeTime(); - //chart.rowSeparators("#555555"); - - var backgroundColor = "#eeeeee"; - var altBackgroundColor = "white"; - chart.background(function (datum, i) { - var odd = (i % 2) === 0; - return odd ? altBackgroundColor : backgroundColor; - }); - chart.fullLengthBackgrounds(); - - var div = selection.append("div") - .attr("class", "tooltip") - .style("opacity", 0); - - chart.mouseover(function (d, i, datum) { - // d is the current rendering object - // i is the index during d3 rendering - // datum is the data object - div.style("left", (d3.event.pageX) + "px") - .style("top", (d3.event.pageY - 28) + "px"); - div.text(d.task + "\n" + - chart.tickFormat().format(new Date(d.starting_time)) + "-" + chart.tickFormat().format(new Date(d.ending_time)) + "\n" + - "Delay: " + chart.tickFormat().format(new Date(d.delay)) + "\n" + - "Cost: " + d.cost - ); - div.transition() - .duration(200) - .style("opacity", .9); - }); - chart.mouseout(function (d, i, datum) { - div.transition() - .duration(500) - .style("opacity", 0); - }); - - selection.select("svg").selectAll("g").remove(); - var svg = selection.select("svg") - .datum(data) - //.attr("width", '100%') - .attr("width", totalTicks*widthPerTick+leftMargin+rightMargin) - .call(chart); -} - -function timeRange(data) { - var start = new Date().getTime(); - var finish = 0; - for (var i = 0; i < data.length; i++) { - for (var j = 0; j < data[i].times.length; j++) { - if (data[i].times[j].starting_time < start) - start = data[i].times[j].starting_time; - if (data[i].times[j].ending_time > finish) - finish = data[i].times[j].ending_time; - } - } - return [start,finish] -} - -function workflow(datum) { - var selection = d3.select(this); - displayResults(selection,datum.data); -} - -function newWorkflow(datum) { - var selection = d3.select(this); - selection.append("p").text(datum.id); - selection.append("svg") - //.attr("width", '100%') - //.attr("width", totalTicks*widthPerTick); - displayResults(selection,datum.data); -} - -function displayOne(tag,workflowData) { - var div = d3.select(tag)//.data(workflowData) - div.selectAll("svg").remove() - div.append("svg") - displayResults(div,workflowData) -} - -displayOne("#resources",resourceData); -displayOne("#simulations",simulationData); diff --git a/simulator/.gitignore b/simulator/.gitignore new file mode 100644 index 00000000..95e1782a --- /dev/null +++ b/simulator/.gitignore @@ -0,0 +1,17 @@ +*.class +*.log +/bin/ +.cache* +/target/ +/project/target/ +/project/project/target/ +/.settings/ +/.classpath +/.idea +/resources/data +.ensime +.ensime_cache/ +*~ +\#*\# +.\#* +.projectile \ No newline at end of file diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala new file mode 100644 index 00000000..ca87d077 --- /dev/null +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -0,0 +1,4 @@ +package com.workflowfm.pew.simulator + +class PiSimulation { +} diff --git a/skiexample/.project b/skiexample/.project deleted file mode 100644 index 50a850c1..00000000 --- a/skiexample/.project +++ /dev/null @@ -1,13 +0,0 @@ - - skiexample - - - org.scala-ide.sdt.core.scalabuilder - - - - org.scala-ide.sdt.core.scalanature - org.eclipse.jdt.core.javanature - - - \ No newline at end of file diff --git a/src/com/workflowfm/pew/simulation/Coordinator.scala b/src/com/workflowfm/pew/simulation/Coordinator.scala deleted file mode 100644 index 198065a9..00000000 --- a/src/com/workflowfm/pew/simulation/Coordinator.scala +++ /dev/null @@ -1,282 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor._ -import akka.util.Timeout -import akka.pattern.ask -import scala.concurrent.duration._ -import com.workflowfm.pew.simulation.metrics._ -import com.workflowfm.pew.execution._ -import scala.collection.mutable.PriorityQueue -import scala.concurrent.{ Promise, Await, ExecutionContext } -import scala.util.{ Success, Failure } - - - -object Coordinator { - case object Start - //TODO case object Stop - case class Done(time:Long,metrics:SimMetricsAggregator) - case object Ping - case class Time(time:Long) - - case class AddSim(t:Long,sim:Simulation,exe:SimulatorExecutor[_]) - case class AddSims(l:Seq[(Long,Simulation)],exe:SimulatorExecutor[_]) - case class AddSimNow(sim:Simulation,exe:SimulatorExecutor[_]) - case class AddSimsNow(l:Seq[Simulation],exe:SimulatorExecutor[_]) - - case class AddResource(r:TaskResource) - case class AddResources(l:Seq[TaskResource]) - - case class SimDone(name:String,result:String) - - case class AddTask(t:TaskGenerator, promise:Promise[TaskMetrics], resources:Seq[String]) - case object AckTask - - private case object Tick - private case object Tack - private case object Tock - - def props( - scheduler: Scheduler, - startingTime: Long = 0L, - timeoutMillis: Int = 50 - )(implicit system: ActorSystem, executionContext:ExecutionContext - ): Props = Props(new Coordinator(scheduler,startingTime,timeoutMillis)(system,executionContext)) -} - -class Coordinator( - scheduler :Scheduler, - startingTime:Long, - timeoutMillis:Int -)( - implicit system: ActorSystem, - implicit val executionContext:ExecutionContext -) extends Actor { - - import scala.collection.mutable.Queue - - sealed trait Event extends Ordered[Event] { - def time:Long - def compare(that:Event) = { - that.time.compare(time) - } - } - case class FinishingTask(override val time:Long,task:Task) extends Event - case class StartingSim(override val time:Long,simulation:Simulation,executor:SimulatorExecutor[_]) extends Event - - var resourceMap :Map[String,TaskResource] = Map[String,TaskResource]() ///: resources){ case (m,r) => m + (r.name -> r)} - var simulations :Map[String,SimulatorExecutor[_]] = Map[String,SimulatorExecutor[_]]() - val tasks :Queue[Task] = Queue() - - val events = new PriorityQueue[Event]() - - var starter:Option[ActorRef] = None - var time = startingTime - var taskID = 0L - - val metrics = new SimMetricsAggregator() - - //implicit val timeout = Timeout(timeoutMillis.millis) - - def addResource(r:TaskResource) = if (!resourceMap.contains(r.name)) { - println(s"[$time] Adding resource ${r.name}") - resourceMap += r.name -> r - metrics += r - } - - protected def handleEvent(event:Event) = event match { - //println("["+time+"] ========= Event! ========= ") - // A task is finished - case FinishingTask(t,task) if (t == time) => { - // Unbind the resources - //resourceMap map { case (n,r) => (n,resourceUpdate(r)) } TODO why was that better originally? - task.taskResources(resourceMap).foreach(_.finishTask(time)) - // Mark the task as completed - // This will cause workflows to reduce and maybe produce more tasks - task.complete(metrics.taskMap.getOrElse(task.id, TaskMetrics(task).start(time - task.duration))) - } - // A simulation (workflow) is starting now - case StartingSim(t,sim,exec) if (t == time)=> startSimulation(time,sim,exec) - case _ => println(s"[$time] <*> <*> <*> Failed to handle event: $event") - } - - protected def startSimulation(t:Long, s:Simulation, e:SimulatorExecutor[_]) :Boolean = { - if (t == time) { - println("["+time+"] Starting simulation: \"" + s.name +"\".") - metrics += (s,t) - s.run(e).onComplete({ - case Success(res) => { - stopSimulation(s.name, res.toString) - println("*** Result of " + s.name + ": " + res) - } - case Failure(ex) => { - stopSimulation(s.name, ex.getLocalizedMessage) - println("*** Exception in " + s.name + ": ") - ex.printStackTrace() - } - }) - - simulations += (s.name -> e) - true - } else false - } - - protected def stopSimulation(name:String, result:String) = { - simulations -= name - (metrics^name) (_.done(result,time)) - println(s"[$time] Simulation $name reported done.") - } - - // protected def updateSimulation(s:String, f:WorkflowMetricTracker=>WorkflowMetricTracker) = simulations = simulations map { - // case (n,m,e) if n.equals(s) => (n,f(m),e) - // case x => x - // } - - protected def resourceIdle(r:TaskResource) = if (r.isIdle) { - (metrics^r)(_.idle(time-r.lastUpdate)) - r.update(time) - } - - protected def startTask(task:Task) { - tasks.dequeueFirst (_.id == task.id) - // Mark the start of the task in the metrics - (metrics^task.id)(_.start(time)) - task.taskResources(resourceMap) map { r => - // Update idle time if resource has been idle - if (r.isIdle) (metrics^r)(_.idle(time-r.lastUpdate)) - // Bind each resource to this task - r.startTask(task, time) - // Add the task and resource cost to the resource metrics - (metrics^r)(_.task(task, r.costPerTick)) - } - // Add the task to the simulation metrics - (metrics^task.simulation)(_.task(task).addDelay(time - task.created)) - // Generate a FinishTask event to be triggered at the end of the event - events += FinishingTask(time+task.duration,task) - } - - /** - * Runs all tasks that require no resources - */ - protected def runNoResourceTasks() = tasks.dequeueAll(_.resources.isEmpty).map(startTask) - - protected def workflowsReady = simulations forall (_._2.simulationReady) - - /** - * First half of a clock tick - * We need two halves because we want to give the workflows a chance to reduce - * and register new tasks to the Coordinator. The Coordinator must receive those - * through messages between now and the Tack message. - */ - protected def tick :Unit = { - - // Are events pending? - if (!events.isEmpty) { - // Grab the first event - val event = events.dequeue() - // Did we somehow go past the event time? This should never happen. - if (event.time < time) { - println(s"[$time] *** Unable to handle past event for time: [${event.time}]") - } else { - // Jump ahead to the event time. This is a priority queue so we shouldn't skip any events - time = event.time - handleEvent(event) - - // Handle all events that are supposed to happen now, so that we free resources for the Scheduler - while (events.headOption.map(_.time == time).getOrElse(false)) - handleEvent(events.dequeue) - } - } - // val startedActors = res filter (_._1 == true) map (_._2.executor) - // for (a <- startedActors) - // a ? AkkaExecutor.Ping - - self ! Coordinator.Tack - } - - - protected def tack :Unit = { - println(s"[$time] Waiting for workflow progress...") - Thread.sleep(timeoutMillis) - - if (!workflowsReady) { - self ! Coordinator.Tack - // val simActors = simulations map (_._2.executor) - // for (a <- simActors) - // a ? AkkaExecutor.Ping} - - } else { - //println("################### pass!!! #######################") - self ! Coordinator.Tock - } - } - - - protected def tock :Unit = { - // Make sure we run tasks that need no resources - runNoResourceTasks() - - // Assign the next tasks - scheduler.getNextTasks(tasks, time, resourceMap).foreach(startTask) - // Update idle resources - resourceMap.values.foreach(resourceIdle) - - - // We finish if there are no events, no tasks, and all workflows have reduced - // Actually all workflows must have reduced at this stage, but we check anyway - //println(s"!TOCK! Events:${events.size} Tasks:${tasks.size} Workflows:$workflowsReady Simulations:${simulations.size} ") - if (events.isEmpty && tasks.isEmpty && workflowsReady) { // && simulations.isEmpty) { //&& resources.forall(_.isIdle) - println("["+time+"] All events done. All tasks done. All workflows idle.") // All simulations done..") - metrics.ended - // Tell whoever started us that we are done - starter map { a => a ! Coordinator.Done(time,metrics) } - - } else { - self ! Coordinator.Tick - } - } - - def start(a:ActorRef) = if (starter.isEmpty) { - starter = Some(a) - metrics.started - tick - } - - def receive = { - case Coordinator.AddSim(t,s,e) => events += StartingSim(t,s,e) - case Coordinator.AddSims(l,e) => events ++= l map { case (t,s) => StartingSim(t,s,e) } - case Coordinator.AddSimNow(s,e) => events += StartingSim(time,s,e) - case Coordinator.AddSimsNow(l,e) => events ++= l map { s => StartingSim(time,s,e) } - - case Coordinator.AddResource(r) => addResource(r) - case Coordinator.AddResources(r) => r foreach addResource - - case Coordinator.AddTask(gen,promise,resources) => { - val creation = if (gen.createTime >= 0) gen.createTime else time - // Create the task - val t = gen.create(taskID,creation,resources:_*) - // This is ok only if the simulation is running in memory - // Promises cannot be sent over messages otherwise as they are not serializable - promise.completeWith(t.promise.future) - // Make sure the next taskID will be fresh - taskID = taskID + 1L - println(s"[$time] Adding task [$taskID] created at [$creation]: ${t.name} (${t.simulation}).") - - // Calculate the cost of all resource usage. We only know this now! - val resourceCost = (0L /: t.taskResources(resourceMap)) { case (c,r) => c + r.costPerTick * t.duration } - t.addCost(resourceCost) - - metrics += t - tasks += t - - //sender() ! Coordinator.AckTask(t) //uncomment this to acknowledge AddTask - } - - case Coordinator.Start => start(sender) - case Coordinator.Tick => tick - case Coordinator.Tack => tack - case Coordinator.Tock => tock - case Coordinator.Ping => sender() ! Coordinator.Time(time) - - } -} diff --git a/src/com/workflowfm/pew/simulation/Generators.scala b/src/com/workflowfm/pew/simulation/Generators.scala deleted file mode 100644 index a52b4bde..00000000 --- a/src/com/workflowfm/pew/simulation/Generators.scala +++ /dev/null @@ -1,16 +0,0 @@ -package com.workflowfm.pew.simulation - -trait ValueGenerator[T] { - def get :T - def estimate :T -} - -case class ConstantGenerator[T](value :T) extends ValueGenerator[T] { - def get = value - def estimate = value -} - -case class UniformGenerator(min :Int, max:Int) extends ValueGenerator[Int] { - def get = new util.Random().nextInt(max-min) + min - def estimate = (max + min) / 2 -} \ No newline at end of file diff --git a/src/com/workflowfm/pew/simulation/Scheduler.scala b/src/com/workflowfm/pew/simulation/Scheduler.scala deleted file mode 100644 index 1c808186..00000000 --- a/src/com/workflowfm/pew/simulation/Scheduler.scala +++ /dev/null @@ -1,167 +0,0 @@ -package com.workflowfm.pew.simulation - -import scala.annotation.tailrec - -trait Scheduler { - def getNextTasks(tasks:Seq[Task], currentTime:Long, resourceMap:Map[String,TaskResource]) :Seq[Task] - - def isIdleResource(r:String, resourceMap:Map[String,TaskResource]) = resourceMap.get(r) match { - case None => false - case Some(s) => s.isIdle - } - -} - -object DefaultScheduler extends Scheduler { - import scala.collection.immutable.Queue - - override def getNextTasks(tasks:Seq[Task], currentTime:Long, resourceMap:Map[String,TaskResource]) :Seq[Task] = - findNextTasks(currentTime, resourceMap, resourceMap.mapValues(Schedule(_)), tasks.toList, Queue()) - - @tailrec - def findNextTasks( - currentTime:Long, - resourceMap:Map[String,TaskResource], - schedules:Map[String,Schedule], - tasks:List[Task], - result:Queue[Task] - ):Seq[Task] = tasks match { - case Nil => result - case t :: rest => { - val start = Schedule.merge(t.resources.flatMap(schedules.get(_))) ? (currentTime,t) - val schedules2 = (schedules /: t.resources) { - case (s,r) => s + (r -> (s.getOrElse(r,Schedule(Nil)) +> (start,t))) - } - val result2 = if (start == currentTime && t.taskResources(resourceMap).forall(_.isIdle)) result :+ t else result - findNextTasks(currentTime, resourceMap, schedules2, rest, result2) - } - } -} - -case class Schedule(tasks:List[(Long,Long)]) { - - def +(start:Long,end:Long):Option[Schedule] = Schedule.add(start,end,tasks) match { - case None => None - case Some(l) => Some(copy(tasks=l)) - } - - def +>(start:Long,end:Long):Schedule = Schedule.add(start,end,tasks) match { - case None => { - System.err.println(s"*** Unable to add ($start,$end) to Schedule: $tasks") - this - } - case Some(l) => copy(tasks=l) - } - - def +>(startTime:Long, t:Task):Schedule = this +> (startTime,startTime+t.estimatedDuration) - - def ?(currentTime:Long, t:Task):Long = Schedule.fit(currentTime,t.estimatedDuration,tasks) - - - def ++(s:Schedule):Schedule = Schedule(Schedule.merge(tasks,s.tasks)) - - def isValid = Schedule.isValid(tasks) -} - -object Schedule { - import scala.collection.immutable.Queue - - def apply(r:TaskResource):Schedule = r.currentTask match { - case None => Schedule(List()) - case Some((s,t)) => Schedule((s,s + t.estimatedDuration) :: Nil) - } - - @tailrec - def add ( - start:Long, end:Long, - tasks:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):Option[List[(Long,Long)]] = tasks match { - case Nil => Some(result :+ (start,end) toList) - case (l:Long,r:Long) :: t => - if (l > end) Some(result ++ ((start,end) :: (l,r) :: t) toList) - else if (l == end) Some(result ++ ((start,r) :: t) toList) - else if (r < start) add(start,end,t,result :+ ((l,r))) - else if (r == start) add(l,end,t,result) - else /* if (r >= end) */ None - //else None - } - - @tailrec - def fit ( - start:Long, - duration:Long, - tasks:List[(Long,Long)] - ):Long = tasks match { - case Nil => start - case (l,_) :: _ if (l >= start + duration) => start - case (_,r) :: t => fit(r,duration,t) - } - - @tailrec - def merge( - g1:List[(Long,Long)], - g2:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):List[(Long,Long)] = g1 match { - case Nil => result ++ g2 toList - case (l1,r1) :: t1 => g2 match { - case Nil => result ++ g1 toList - case (l2,r2) :: t2 => { - if (r2 < l1) merge(g1,t2,result :+ (l2,r2)) - else if (r1 < l2) merge(t1,g2,result :+ (l1,r1)) - else if (r1 == r2) merge(t1,t2,result :+ (math.min(l1,l2),r1)) - else if (r2 == l1) merge((l2,r1)::t1,t2,result) - else if (r1 == l2) merge(t1,(l1,r2)::t2,result) - else if (r1 < r2) merge(t1,(math.min(l1,l2),r2)::t2,result) - else /* if (r1 > r2)*/ merge((math.min(l1,l2),r1)::t1,t2,result) - } - } - } - - def merge(schedules:Seq[Schedule]):Schedule = { - (Schedule(List()) /: schedules)(_ ++ _) - } - - @deprecated("No longer using gaps in Schedule","1.2.0") - def fitInGaps ( - start:Long, end:Long, - gaps:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):Option[List[(Long,Long)]] = gaps match { - case Nil => Some(result.toList) - case (l:Long,r:Long) :: t => - if (l == start && end == r) fitInGaps(start,end,t,result) // event fits exactly - else if (l == start && end <= r) fitInGaps(start,end,t,result :+ ((end,r)) )// add an event at the beginning of the gap - else if (l <= start && end == r) fitInGaps(start,end,t,result :+ ((l,start)) ) // add an event at the end of the gaps - else if (l < start && end < r) fitInGaps(start,end,t,result :+ ((l,start)) :+ ((end,r)) ) // add an event within a gap - else if (start > r || end < l) fitInGaps(start,end,t,result :+ ((l,r)) ) - else None - } - - @deprecated("No longer using gaps in Schedule","1.2.0") - // we assume all gap lists finish with a (t,Long.MaxValue) gap - def mergeGaps ( - g1:List[(Long,Long)], - g2:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):List[(Long,Long)] = g1 match { - case Nil => result toList - case (l1,r1) :: t1 => g2 match { - case Nil => result toList - case (l2,r2) :: t2 => { - if (r2 <= l1) mergeGaps(g1,t2,result) - else if (r1 <= l2) mergeGaps (t1,g2,result) - else if (r1 == Long.MaxValue && r1 == r2) result :+ (math.max(l1,l2),r1) toList - else if (r2 <= r1) mergeGaps(g1,t2,result :+ (math.max(l1,l2),r2)) - else /* if (r1 < r2) */ mergeGaps(t1,g2,result :+ (math.max(l1,l2),r1)) - } - } - } - - def isValid(gaps:List[(Long,Long)], end:Long = Long.MinValue):Boolean = gaps match { - case Nil => true - case (l,r) :: t if end < l && l < r => isValid(t, r) - case _ => false - } -} diff --git a/src/com/workflowfm/pew/simulation/Simulation.scala b/src/com/workflowfm/pew/simulation/Simulation.scala deleted file mode 100644 index a939f4be..00000000 --- a/src/com/workflowfm/pew/simulation/Simulation.scala +++ /dev/null @@ -1,40 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor._ -import scala.concurrent.duration._ -import scala.concurrent.Await -import scala.concurrent.Future -import com.workflowfm.pew.execution.AkkaExecutor -import scala.util.Success -import scala.util.Failure -import scala.concurrent.ExecutionContext -import com.workflowfm.pew.PiProcess -import com.workflowfm.pew.execution.SimulatorExecutor - - -abstract class Simulation(val name:String) { //extends SimulationMetricTracker - def run(executor:SimulatorExecutor[_]):Future[Any] - def getProcesses():Seq[PiProcess] -} - -class TaskSimulation(simulationName:String, coordinator:ActorRef, resources:Seq[String], duration:ValueGenerator[Long], val cost:ValueGenerator[Long]=new ConstantGenerator(0L), interrupt:Int=(-1), priority:Task.Priority=Task.Medium)(implicit system: ActorSystem) extends Simulation(simulationName) { - def run(executor:SimulatorExecutor[_]) = { - TaskGenerator(simulationName + "Task", simulationName, duration, cost, interrupt, priority).addTo(coordinator,resources :_*) - } - override def getProcesses() = Seq() -} - -class MockExecutor extends Actor { - def receive = { - case AkkaExecutor.Ping => sender() ! AkkaExecutor.Ping - } -} - -trait SimulatedProcess { this:PiProcess => - def simulationName:String - override def isSimulatedProcess = true - - def simulate[T](gen:TaskGenerator, coordinator:ActorRef, result:T, resources:String*)(implicit system: ActorSystem, context: ExecutionContext = ExecutionContext.global):Future[T] ={ - gen.addTo(coordinator, resources:_*).map(_ => result) - } -} diff --git a/src/com/workflowfm/pew/simulation/Task.scala b/src/com/workflowfm/pew/simulation/Task.scala deleted file mode 100644 index 8d64909a..00000000 --- a/src/com/workflowfm/pew/simulation/Task.scala +++ /dev/null @@ -1,101 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor._ -import akka.pattern.ask -import akka.util.Timeout -import scala.concurrent.Promise -import scala.concurrent.duration._ -import com.workflowfm.pew.simulation.metrics._ - -object Task { - sealed trait Priority extends Ordered[Priority] { - def value:Int - def compare(that:Priority) = this.value - that.value - } - case object Highest extends Priority { val value = 5 } - case object High extends Priority { val value = 4 } - case object Medium extends Priority { val value = 3 } - case object Low extends Priority { val value = 2 } - case object VeryLow extends Priority { val value = 1 } -} - -class Task ( - val id:Long, - val name:String, - val simulation:String, - val created:Long, - val resources:Seq[String], - val duration:Long, - val estimatedDuration:Long, - val initialCost:Long, - val interrupt:Int=Int.MaxValue, - val priority:Task.Priority=Task.Medium - ) extends Ordered[Task] { - - val promise:Promise[TaskMetrics] = Promise() - - var cost:Long = initialCost - - // execute will be called once by each associated TaskResource - def complete(metrics:TaskMetrics) = if (!promise.isCompleted) promise.success(metrics) - - def addCost(extra:Long) = cost += extra - - def nextPossibleStart(currentTime:Long, resourceMap:Map[String,TaskResource]) = { - (currentTime /: resources){ case (i,rN) => resourceMap.get(rN) match { - case None => throw new RuntimeException(s"Resource $rN not found!") - case Some(r) => Math.max(i,r.nextAvailableTimestamp(currentTime)) - }} - } - - def taskResources(resourceMap:Map[String,TaskResource]) = resources flatMap (resourceMap.get(_)) - - - def compare(that:Task) = { - lazy val cPriority = that.priority.compare(this.priority) - lazy val cResources = that.resources.size.compare(this.resources.size) - lazy val cAge = this.created.compare(that.created) - lazy val cDuration = that.estimatedDuration.compare(this.estimatedDuration) - lazy val cInterrupt = this.interrupt.compare(that.interrupt) - lazy val cID = this.id.compare(that.id) - - if (cPriority != 0) cPriority - else if (cAge != 0) cAge - else if (cResources != 0) cResources - else if (cDuration != 0) cDuration - else if (cInterrupt != 0) cInterrupt - else cID - } - - override def toString = { - val res = resources.mkString(",") - s"Task($name)($res)" - } -} - -case class TaskGenerator ( - name :String, - simulation:String, - duration:ValueGenerator[Long], - cost:ValueGenerator[Long], - interrupt:Int=(-1), - priority:Task.Priority=Task.Medium, - createTime:Long=(-1) -) { - def create(id:Long, time:Long, resources:String*) = new Task(id,name,simulation,time,resources,duration.get,duration.estimate,cost.get,interrupt,priority) - def withPriority(p:Task.Priority) = copy(priority = p) - def withInterrupt(int:Int) = copy(interrupt = int) - def withDuration(dur:ValueGenerator[Long]) = copy(duration = dur) - def withName(n:String) = copy(name = n) - def withSimulation(s:String) = copy(simulation=s) - def withCreationTime(t:Long) = copy(createTime=t) - - def addTo(coordinator:ActorRef, resources:String*)(implicit system: ActorSystem) = { - //implicit val timeout = Timeout(1.second) - // change this to ? to require an acknowledgement - val promise = Promise[TaskMetrics]() - coordinator ! Coordinator.AddTask(this,promise,resources) - promise.future - } - -} diff --git a/src/com/workflowfm/pew/simulation/TaskResource.scala b/src/com/workflowfm/pew/simulation/TaskResource.scala deleted file mode 100644 index 5ea38c56..00000000 --- a/src/com/workflowfm/pew/simulation/TaskResource.scala +++ /dev/null @@ -1,62 +0,0 @@ -package com.workflowfm.pew.simulation - -import scala.collection.mutable.Queue -import com.workflowfm.pew.simulation.metrics._ - -object TaskResource { - sealed trait State - case object Busy extends State - case class Finished(t:Task) extends State - case object Idle extends State -} - -class TaskResource(val name:String,val costPerTick:Int) { - var currentTask :Option[(Long,Task)] = None - var lastUpdate :Long = 1 - - def isIdle :Boolean = currentTask == None - - def finishTask(currentTime:Long) :Option[Task] = currentTask match { - case None => { - //println("["+currentTime+"] \"" + name + "\" is idle.") - None - } - case Some((startTime,task)) => - if (currentTime >= startTime + task.duration) { - println("["+currentTime+"] \"" + name + "\" detached from task \"" + task.name + " (" + task.simulation +")\".") - currentTask = None - lastUpdate = currentTime - Some(task) - } - else { - //println("["+currentTime+"] \"" + name + "\" is attached to task \"" + task.name + " (" + task.simulation +")\" - " + (startTime + duration - currentTime) + " ticks remaining.") - None - } - } - - def startTask(task:Task,currentTime:Long) = { - currentTask match { - case None => { - println("["+currentTime+"] \"" + name + "\" is NOW attached to task \"" + task.name + " (" + task.simulation +")\" - " + task.duration + " ticks remaining.") - currentTask = Some(currentTime,task) - lastUpdate = currentTime - true - } - case Some((_,currentTask)) => { - println("["+currentTime+"] <*> <*> <*> ERROR <*> <*> <*> \"" + name + "\" tried to attach to \"" + task.name + " (" + task.simulation +")\" but is already attached to \"" + currentTask.name + "\"!") - false - } - } - } - - - def nextAvailableTimestamp(currentTime:Long) :Long = currentTask match { - case None => currentTime - case Some((startTime,t)) => { - startTime + t.estimatedDuration - } - } - - - def update(time:Long) = lastUpdate = time -} \ No newline at end of file diff --git a/src/com/workflowfm/pew/simulation/metrics/Actor.scala b/src/com/workflowfm/pew/simulation/metrics/Actor.scala deleted file mode 100644 index 695c403b..00000000 --- a/src/com/workflowfm/pew/simulation/metrics/Actor.scala +++ /dev/null @@ -1,51 +0,0 @@ -package com.workflowfm.pew.simulation.metrics - -import akka.actor.Props -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.ActorSystem -import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.pew.simulation.Simulation -import com.workflowfm.pew.simulation.Coordinator - - -// Provide a callbackActor to get a response when we are done. Otherwise we'll shutdown the ActorSystem -/** Interacts with a [[Coordinator]] to run simulations and apply a [[SimMetricsOutput]] when they are done. - * @param m the [[SimMetricsOutput]] that will be applied to the metrics from every simulation [[Coordinator]] when it is done - * @param callbackActor optional argument for a callback actor that we can forward the [[Coordinator.Done]] message. If this is [[scala.None]], we do not forward the message, but we shutdown the actor system, as we assume all simulations are completed. - */ -class SimMetricsActor(m:SimMetricsOutput, callbackActor:Option[ActorRef])(implicit system: ActorSystem) extends Actor { - - def receive = { - case SimMetricsActor.Start(coordinator) => { - coordinator ! Coordinator.Start - } - - case SimMetricsActor.StartSims(coordinator,sims,executor) => { - coordinator ! Coordinator.AddSims(sims,executor) - coordinator ! Coordinator.Start - } - - case SimMetricsActor.StartSimsNow(coordinator,sims,executor) => { - coordinator ! Coordinator.AddSimsNow(sims,executor) - coordinator ! Coordinator.Start - } - - case Coordinator.Done(t:Long,ma:SimMetricsAggregator) => { - m(t,ma) - callbackActor match { - case None => system.terminate() - case Some(actor) => actor ! Coordinator.Done(t,ma) - } - } - } -} - -/** Contains the messages involved in [[SimMetricsActor]] and the [[akka.actor.Props]] initializer. */ -object SimMetricsActor { - case class Start(coordinator:ActorRef) - case class StartSims(coordinator:ActorRef,sims:Seq[(Long,Simulation)],executor:SimulatorExecutor[_]) - case class StartSimsNow(coordinator:ActorRef,sims:Seq[Simulation],executor:SimulatorExecutor[_]) - - def props(m:SimMetricsOutput, callbackActor:Option[ActorRef]=None)(implicit system: ActorSystem): Props = Props(new SimMetricsActor(m,callbackActor)(system)) -} diff --git a/src/com/workflowfm/pew/simulation/metrics/Measure.scala b/src/com/workflowfm/pew/simulation/metrics/Measure.scala deleted file mode 100644 index a9588029..00000000 --- a/src/com/workflowfm/pew/simulation/metrics/Measure.scala +++ /dev/null @@ -1,254 +0,0 @@ -package com.workflowfm.pew.simulation.metrics - -import com.workflowfm.pew.simulation._ - -/** Metrics for a simulated [[Task]] that consumed virtual time. - * - * @param id the unique ID of the [[Task]] - * @param task the name of the [[Task]] - * @param simulation the name of the simulation the [[Task]] belongs to - * @param created the virtual timestamp when the [[Task]] was created and entered the [[Coordinator]] - * @param started the virtual timestamp when the [[Task]] started executing, or [[scala.None]] if it has not started yet - * @param duration the virtual duration of the [[Task]] - * @param cost the cost associated with the [[Task]] - * @param resources the list of names of the [[TaskResource]]s this [[Task]] used - */ -case class TaskMetrics ( - id:Long, - task:String, - simulation:String, - created:Long, - started:Option[Long], - duration:Long, - cost:Long, - resources:Seq[String] - ) { - /** Sets the starting time for the [[Task]]. */ - def start(st:Long) = copy(started=Some(st)) - - /** Calculates the task delay as the difference of the creation and starting times. */ - def delay = started match { - case None => 0L - case Some(s) => s - created - } - - /** Returns the full task and simulation name. */ - def fullName = s"$task($simulation)" -} -object TaskMetrics { - /** Generates [[TaskMetrics]] from a given [[Task]] assuming it has not started yet. */ - def apply(task:Task):TaskMetrics = TaskMetrics(task.id, task.name, task.simulation, task.created, None, task.duration, task.cost, task.resources) -} - - -/** Metrics for a [[Simulation]] that has already started. - * - * @param name the unique name of the [[Simulation]] - * @param started the virtual timestamp when the [[Simulation]] started executing - * @param duration the virtual duration of the [[Simulation]] - * @param delay the sum of all delays for all involved [[Task]]s - * @param tasks the number of [[Task]]s associated with the [[Simulation]] so far - * @param cost the total cost associated with the [[Simulation]] so far - * @param result a `String` representation of the returned result from the [[Simulation]], or [[scala.None]] if it still running. In case of failure, the field is populated with the localized message of the exception thrown - */ -case class SimulationMetrics( - name:String, - started:Long, - duration:Long, - delay:Long, - tasks:Int, - cost:Long, - result:Option[String] - ) { - /** Adds some time to the total duration. */ - def addDuration(d:Long) = copy(duration = duration + d) - /** Adds some cost to the total cost. */ - def addCost(c:Long) = copy(cost = cost + c) - /** Adds some delay to the total delay. */ - def addDelay(d:Long) = copy(delay = delay + d) - /** Updates the metrics given a new [[Task]] that is created as part of the [[Simulation]]. */ - def task(task:Task) = copy(tasks = tasks + 1, cost = cost + task.cost) - /** Updates the metrics given that the [[Simulation]] has completed with a certain result. - * @param res the result of the [[Simulation]] or localized message of the exception in case of failure - * @param time the virtual timestamp when the [[Simulation]] finished - */ - def done(res:String, time:Long) = copy( result = Some(res), duration = duration + time - started ) -} -object SimulationMetrics { - /** Initialize metrics for a named [[Simulation]] starting at the given virtual time. */ - def apply(name:String, t:Long):SimulationMetrics = SimulationMetrics(name,t,0L,0L,0,0L,None) -} - - -/** Metrics for at [[TaskResource]]. - * - * @param name the unique name of the [[TaskResource]] - * @param busyTime the total amount of virtual time that the [[TaskResource]] has been busy, i.e. attached to a [[Task]] - * @param idleTime the total amount of virtual time that the [[TaskResource]] has been idle, i.e. not attached to any [[Task]] - * @param tasks the number of different [[Task]]s that have been attached to this [[TaskResource]] - * @param cost the total cost associated with this [[TaskResource]] - */ -case class ResourceMetrics ( - name:String, - busyTime:Long, - idleTime:Long, - tasks:Int, - cost:Long - ) { - /** Adds some idle time to the total. */ - def idle(i:Long) = copy(idleTime = idleTime + i) - /** Updates the metrics given a new [[Task]] has been attached to the [[TaskResource]]. */ - def task(task:Task, costPerTick:Long) = copy( - tasks = tasks + 1, - cost = cost + task.duration * costPerTick, - busyTime = busyTime + task.duration - ) -} -object ResourceMetrics { - /** Initialize metrics given the name of a [[TaskResource]]. */ - def apply(name:String):ResourceMetrics = ResourceMetrics(name,0L,0L,0,0L) - /** Ihitialize metrics given a [[TaskResource]]. */ - def apply(r:TaskResource):ResourceMetrics = ResourceMetrics(r.name,0L,0L,0,0L) -} - - -/** Collects/aggregates metrics across multiple tasks, resources, and simulations. */ -class SimMetricsAggregator { - import scala.collection.immutable.Map - - /** The '''real''' (system) time that measurement started, or [[scala.None]] if it has not started yet. */ - var start:Option[Long] = None - /** The '''real''' (system) time that measurement finished, or [[scala.None]] if it has not finished yet. */ - var end:Option[Long] = None - - /** Marks the start of metrics measurement with the current system time. */ - def started = start match { - case None => start = Some(System.currentTimeMillis()) - case _ => () - } - - /** Marks the end of metrics measurement with the current system time. */ - def ended = end = Some(System.currentTimeMillis()) - - /** Task metrics indexed by task ID. */ - val taskMap = scala.collection.mutable.Map[Long,TaskMetrics]() - /** Simulation metrics indexed by name. */ - val simMap = scala.collection.mutable.Map[String,SimulationMetrics]() - /** Resource metrics indexed by name. */ - val resourceMap = scala.collection.mutable.Map[String,ResourceMetrics]() - - - // Set - - /** Adds a new [[TaskMetrics]] instance, taking care of indexing automatically - * Overwrites a previous instance with the same IDs - */ - def +=(m:TaskMetrics):TaskMetrics = { taskMap += (m.id->m) ; m } - /** Adds a new [[SimulationMetrics]] instance, taking care of indexing automatically - * Overwrites a previous instance with the same IDs - */ - def +=(m:SimulationMetrics):SimulationMetrics = { simMap += (m.name->m) ; m } - /** Adds a new [[ResourceMetrics]] instance, taking care of indexing automatically - * Overwrites a previous instance with the same IDs - */ - def +=(m:ResourceMetrics):ResourceMetrics = { resourceMap += (m.name->m) ; m } - - /** Initializes and adds a new [[TaskMetrics]] instance given a new [[Task]]. */ - def +=(task:Task):TaskMetrics = this += TaskMetrics(task) - /** Initializes and adds a new [[SimulationMetrics]] instance given the name of the simulation starting now - * and the current virtual time. - */ - def +=(s:Simulation,t:Long):SimulationMetrics = this += SimulationMetrics(s.name,t) - /** Initializes and adds a new [[ResourceMetrics]] instance given a new [[TaskResource]]. */ - def +=(r:TaskResource):ResourceMetrics = this += ResourceMetrics(r) - - - // Update - /** Updates a [[TaskMetrics]] instance. - * - * @return the updated [[taskMap]] or [[scala.None]] if the identified instance does not exist - * - * @param taskID the task ID for the involved [[Task]] - * @param u a function to update the [[TaskMetrics]] instance - * - * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context - */ - def ^(taskID:Long)(u:TaskMetrics=>TaskMetrics):Option[TaskMetrics] = - taskMap.get(taskID).map { m => this += u(m) } - - /** Updates a [[TaskMetrics]] instance. - * - * @return the updated [[taskMap]] or [[scala.None]] if the identified instance does not exist - * - * @param task the involved [[Task]] - * @param u a function to update the [[TaskMetrics]] instance - * - * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context - * - */ - def ^(task:Task)(u:TaskMetrics=>TaskMetrics):Option[TaskMetrics] = - taskMap.get(task.id).map { m => this += u(m) } - - /** Updates a [[SimulationMetrics]] instance. - * - * @return the updated [[simMap]] or [[scala.None]] if the identified instance does not exist - * - * @param simulation the involved [[Simulation]] - * @param u a function to update the [[SimulationMetrics]] instance - * - * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context - * - */ - def ^(simulation:Simulation)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = - simMap.get(simulation.name).map { m => this += u(m) } - - /** Updates a [[SimulationMetrics]] instance. - * - * @return the updated [[simMap]] or [[scala.None]] if the identified instance does not exist - * - * @param simulationName the name of the involved [[Simulation]] - * @param u a function to update the [[SimulationMetrics]] instance - * - * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context - * - */ - def ^(simulationName:String)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = - simMap.get(simulationName).map { m => this += u(m) } - - /** Updates a [[ResourceMetrics]] instance. - * - * @return the updated [[resourceMap]] or [[scala.None]] if the identified instance does not exist - * - * @param resource the involved [[TaskResource]] - * @param u a function to update the [[ResourceMetrics]] instance - * - * @see [[com.workflowfm.pew.metrics.MetricsAggregator]] for examples in a similar context - * - */ - def ^(resource:TaskResource)(u:ResourceMetrics=>ResourceMetrics):Option[ResourceMetrics] = - resourceMap.get(resource.name).map { m => this += u(m) } - - - // Getters - - /** Returns all the tracked instances of [[TaskMetrics]] sorted by starting time. */ - def taskMetrics = taskMap.values.toSeq.sortBy(_.started) - /** Returns all the tracked instances of [[SimulationMetrics]] sorted by simulation name. */ - def simulationMetrics = simMap.values.toSeq.sortBy(_.name) - /** Returns all the tracked instances of [[ResourceMetrics]] sorted by resource time. */ - def resourceMetrics = resourceMap.values.toSeq.sortBy(_.name) - /** Returns a [[scala.collection.immutable.Set]] of all task names being tracked. - * This is useful when using task names as a category, for example to colour code tasks in the timeline. - */ - def taskSet = taskMap.values.map(_.task).toSet[String] - - /** Returns all the tracked instances of [[TaskMetrics]] associated with a particular [[TaskResource]], sorted by starting time. - * @param r the tracked [[ResourceMetrics]] of the resource - */ - // TODO: we used to have 2 levels of sorting! - def taskMetricsOf(r:ResourceMetrics) = taskMap.values.toSeq.filter(_.resources.contains(r.name)).sortBy(_.started) - /** Returns all the tracked instances of [[TaskMetrics]] associated with a particular [[Simulation]], sorted by starting time. - * @param r the tracked [[SimulationMetrics]] of the resource - */ - def taskMetricsOf(s:SimulationMetrics) = taskMap.values.toSeq.filter(_.simulation.equals(s.name)).sortBy(_.started) -} diff --git a/src/com/workflowfm/pew/simulation/metrics/Output.scala b/src/com/workflowfm/pew/simulation/metrics/Output.scala deleted file mode 100644 index 587a0ed9..00000000 --- a/src/com/workflowfm/pew/simulation/metrics/Output.scala +++ /dev/null @@ -1,241 +0,0 @@ -package com.workflowfm.pew.simulation.metrics - -import scala.collection.immutable.Queue -import com.workflowfm.pew.metrics.{ FileOutput, MetricsOutput } - -/** Manipulates a [[SimMetricsAggregator]] to produce some output via side-effects. - * - * As a function, takes 2 arguments: - * - a [[Long]] representing the total virtual time elapsed - * - the [[SimMetricsAggregator]] to act upon - */ -trait SimMetricsOutput extends ((Long,SimMetricsAggregator) => Unit) { - /** Compose with another [[SimMetricsOutput]] in sequence. */ - def and(h:SimMetricsOutput) = SimMetricsOutputs(this,h) -} - -/** A [[SimMetricsOutput]] consisting of a [[scala.collection.immutable.Queue]] of [[SimMetricsOutput]]s - * to be run sequentially. - */ -case class SimMetricsOutputs(handlers:Queue[SimMetricsOutput]) extends SimMetricsOutput { - /** Call all included [[MetricsOutput]]s. */ - override def apply(time:Long,aggregator:SimMetricsAggregator) = handlers map (_.apply(time,aggregator)) - /** Add another [[SimMetricsOutput]] in sequence. */ - override def and(h:SimMetricsOutput) = SimMetricsOutputs(handlers :+ h) -} -object SimMetricsOutputs { - /** Shorthand constructor for a [[SimMetricsOutputs]] from a list of [[MetricsOutput]]s. */ - def apply(handlers:SimMetricsOutput*):SimMetricsOutputs = SimMetricsOutputs(Queue[SimMetricsOutput]() ++ handlers) -} - - -/** Generates a string representation of the metrics using a generalized CSV format. */ -trait SimMetricsStringOutput extends SimMetricsOutput { - /** A string representing null values. */ - val nullValue = "NULL" - - /** The field names for [[TaskMetrics]]. - * @param separator a string (such as a space or comma) to separate the names - */ - def taskHeader(separator:String) = Seq("ID","Task","Simulation","Created","Start","Delay","Duration","Cost","Resources").mkString(separator) - - /** String representation of a [[TaskMetrics]] instance. - * - * @param separator a string (such as a space or comma) to separate the values - * @param resSeparator a string (such as a space or comma) to separate the list of names of [[TaskResources]] in the [[TaskMetrics]] - * @param m the [[TaskMetrics]] instance to be handled - */ - def taskCSV(separator:String, resSeparator:String)(m:TaskMetrics) = m match { - case TaskMetrics(id,task,sim,ct,st,dur,cost,res) => - Seq(id,task,sim,ct,MetricsOutput.formatOption(st,nullValue),m.delay,dur,cost,res.mkString(resSeparator)).mkString(separator) - } - - /** The field names for [[SimulationMetrics]]. - * @param separator a string (such as a space or comma) to separate the names - */ - def simHeader(separator:String) = Seq("Name","Start","Duration","Delay","Tasks","Cost","Result").mkString(separator) - - /** String representation of a [[SimulationMetrics]] instance. - * - * @param separator a string (such as a space or comma) to separate the values - * @param m the [[SimulationMetrics]] instance to be handled - */ - def simCSV(separator:String)(m:SimulationMetrics) = m match { - case SimulationMetrics(name,st,dur,delay,ts,c,res) => - Seq(name,st,dur,delay,ts,c,res).mkString(separator) - } - - /** The field names for [[ResourceMetrics]]. - * @param separator a string (such as a space or comma) to separate the names - */ - def resHeader(separator:String) = Seq("Name","Busy","Idle","Tasks","Cost").mkString(separator) - - /** String representation of a [[ResourceMetrics]] instance. - * - * @param separator a string (such as a space or comma) to separate the values - * @param m the [[ResourceMetrics]] instance to be handled - */ - def resCSV(separator:String)(m:ResourceMetrics) = m match { - case ResourceMetrics(name,b,i,ts,c) => - Seq(name,b,i,ts,c).mkString(separator) - } - - - /** Formats all [[TaskMetrics]] in a [[SimMetricsAggregator]] in a single string. - * - * @param aggregator the [[SimMetricsAggregator]] to retrieve the metrics to be formatted - * @param separator a string (such as a space or comma) to separate values - * @param lineSep a string (such as a new line) to separate tasks - * @param resSeparator a string (such as a space or comma) to separate the list of names of [[TaskResources]] in the [[TaskMetrics]] - */ - def tasks(aggregator:SimMetricsAggregator,separator:String,lineSep:String="\n",resSeparator:String=";") = - aggregator.taskMetrics.map(taskCSV(separator,resSeparator)).mkString(lineSep) - - /** Formats all [[SimulationMetrics]] in a [[SimMetricsAggregator]] in a single string. - * - * @param aggregator the [[SimMetricsAggregator]] to retrieve the metrics to be formatted - * @param separator a string (such as a space or comma) to separate values - * @param lineSep a string (such as a new line) to separate simulations - */ - def simulations(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = - aggregator.simulationMetrics.map(simCSV(separator)).mkString(lineSep) - - /** Formats all [[ResourceMetrics]] in a [[SimMetricsAggregator]] in a single string. - * - * @param aggregator the [[SimMetricsAggregator]] to retrieve the metrics to be formatted - * @param separator a string (such as a space or comma) to separate values - * @param lineSep a string (such as a new line) to separate resources - */ - def resources(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = - aggregator.resourceMetrics.map(resCSV(separator)).mkString(lineSep) -} - - -/** Prints all simulation metrics to standard output. */ -class SimMetricsPrinter extends SimMetricsStringOutput { - def apply(totalTicks:Long,aggregator:SimMetricsAggregator) = { - /** Separates the values. */ - val sep = "\t| " - /** Separates metrics instances. */ - val lineSep = "\n" - /** Default time format using `java.text.SimpleDateFormat`. */ - val timeFormat = "YYYY-MM-dd HH:mm:ss.SSS" - /** Default duration format using [[org.apache.commons.lang3.time.DurationFormatUtils.formatDuration]]. */ - val durFormat = "HH:mm:ss.SSS" - /** A string representing null time values. */ - val nullTime = "NONE" - println( -s""" -Tasks ------ -${taskHeader(sep)} -${tasks(aggregator,sep,lineSep)} - -Simulations ------------ -${simHeader(sep)} -${simulations(aggregator,sep,lineSep)} - -Resources ---------- -${resHeader(sep)} -${resources(aggregator,sep,lineSep)} ---------- - -Started: ${MetricsOutput.formatTimeOption(aggregator.start, timeFormat, nullTime)} -Ended: ${MetricsOutput.formatTimeOption(aggregator.end, timeFormat, nullTime)} -Duration: ${MetricsOutput.formatDuration(aggregator.start, aggregator.end, durFormat, nullTime)} -""" - ) - } -} - -/** Outputs simulation metrics to files using a standard CSV format. - * Generates 3 CSV files, - * 1. One for tasks with a "-tasks.csv" suffix, - * 2. One for simulations with a "-simulations.csv" suffix. - * 3. One for resources with a "-resources.csv" suffix. - * - * @param path path to directory where the files will be placed - * @param name file name prefix - */ -class SimCSVFileOutput(path:String,name:String) extends SimMetricsStringOutput with FileOutput { - import java.io._ - - val separator = "," - val lineSep = "\n" - - def apply(totalTicks:Long,aggregator:SimMetricsAggregator) = { - val taskFile = s"$path$name-tasks.csv" - val simulationFile = s"$path$name-simulations.csv" - val resourceFile = s"$path$name-resources.csv" - writeToFile(taskFile, taskHeader(separator) + "\n" + tasks(aggregator,separator,lineSep)) - writeToFile(simulationFile, simHeader(separator) + "\n" + simulations(aggregator,separator,lineSep)) - writeToFile(resourceFile, resHeader(separator) + "\n" + resources(aggregator,separator,lineSep)) - } -} - - -/** Outputs simulation metrics to a file using the d3-timeline format. - * Generates 1 file with a "-simdata.js" suffix. - * This can then be combined with the resources at - * [[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/resources/d3-timeline]] - * to render the timeline in a browser. - * - * The timeline can display either virtual units of time or millisecond durations. - * If we choose the latter, we can provide the size of the virtual unit of time in milliseconds - * and all the durations will be scaled to that. For example, if our virtual unit of time is minutes, - * we need to provide a `tick` value of 60000. - * - * @param path path to directory where the files will be placed - * @param file file name prefix - * @param tick the size of 1 unit of virtual time - */ -class SimD3Timeline(path:String,file:String,tick:Int=1) extends SimMetricsOutput with FileOutput { - import java.io._ - - override def apply(totalTicks:Long, aggregator:SimMetricsAggregator) = { - val result = build(aggregator,System.currentTimeMillis()) - println(result) - val dataFile = s"$path$file-simdata.js" - writeToFile(dataFile, result) - } - - /** Helps build the output with a static system time. */ - def build(aggregator:SimMetricsAggregator, now:Long) = { - var buf:StringBuilder = StringBuilder.newBuilder - buf.append("var tasks = [\n") - for (p <- aggregator.taskSet) buf.append(s"""\t"$p",\n""") - buf.append("];\n\n") - buf.append("var resourceData = [\n") - for (m <- aggregator.resourceMetrics) buf.append(s"""${resourceEntry(m, aggregator)}\n""") - buf.append("];\n\n") - buf.append("var simulationData = [\n") - for (m <- aggregator.simulationMetrics) buf.append(s"""${simulationEntry(m, aggregator)}\n""") - buf.append("];\n") - buf.toString - } - - def simulationEntry(s:SimulationMetrics,agg:SimMetricsAggregator) = { - val times = agg.taskMetricsOf(s).map(taskEntry).mkString(",\n") -s"""{label: "${s.name}", times: [ -$times -]},""" - } - - def resourceEntry(res:ResourceMetrics,agg:SimMetricsAggregator) = { - val times = agg.taskMetricsOf(res).map(taskEntry).mkString(",\n") -s"""{label: "${res.name}", times: [ -$times -]},""" - } - - - def taskEntry(m:TaskMetrics) = { - // we don't want it to be 0 because D3 doesn't deal with it well - val start = m.started.getOrElse(1L) * tick - val finish = (m.started.getOrElse(1L) + m.duration) * tick - val delay = m.delay * tick - s"""\t{"label":"${m.fullName}", task: "${m.task}", "id":${m.id}, "starting_time": $start, "ending_time": $finish, delay: $delay, cost: ${m.cost}}""" - } -} diff --git a/test/com/workflowfm/pew/simulation/CoordinatorTests.scala b/test/com/workflowfm/pew/simulation/CoordinatorTests.scala deleted file mode 100644 index 9925889c..00000000 --- a/test/com/workflowfm/pew/simulation/CoordinatorTests.scala +++ /dev/null @@ -1,204 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor.ActorSystem -import akka.testkit.{ ImplicitSender, TestActors, TestKit } -import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner -import scala.concurrent._ -import scala.concurrent.Await -import scala.concurrent.duration._ -import com.workflowfm.pew._ -import com.workflowfm.pew.execution._ -import com.workflowfm.pew.simulation.metrics._ - - -@RunWith(classOf[JUnitRunner]) -class CoordinatorTests extends TestKit(ActorSystem("CoordinatorTests")) with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { - implicit val executionContext = ExecutionContext.global //system.dispatchers.lookup("akka.my-dispatcher") - implicit val timeout:FiniteDuration = 10.seconds - - override def afterAll:Unit = { - TestKit.shutdownActorSystem(system) - } - - "The Coordinator" must { - - val executor = new AkkaExecutor() - val handler = SimMetricsOutputs(new SimMetricsPrinter()) - - //expectNoMessage(200.millis) - "execute a simple task" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s = new TaskSimulation("S", coordinator, Seq("A","B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (3L) - } - - "execute two independent tasks in parallel" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (3L) - } - - "queue two tasks with the same resource" in { - val resA = new TaskResource("A",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (5L) - } - - "measure delays and idling appropriately" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("A","B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Medium) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (5L) - val metricB = done.metrics.resourceMetrics.find { x => x.name.equals("B") } - metricB should not be empty - metricB map { x => x.idleTime should be (2L) } - val metricS2T = done.metrics.taskMetrics.find { x => x.fullName.equals("S2Task(S2)") } - metricS2T should not be empty - metricS2T map { x => x.delay should be (2L) } - } - - "measure intermediate delays and idling appropriately" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("B"), new ConstantGenerator(3), new ConstantGenerator(2), -1, Task.Highest) - val s3 = new TaskSimulation("S3", coordinator, Seq("A","B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Medium) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.AddSim(2,s3,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - - done.time should be (6L) - val metricA = done.metrics.resourceMetrics.find { x => x.name.equals("A") } - metricA should not be empty - metricA map { x => x.idleTime should be (1L) } - val metricS3T = done.metrics.taskMetrics.find { x => x.fullName.equals("S3Task(S3)") } - metricS3T should not be empty - metricS3T map { x => x.delay should be (2L) } - } - - "run a task with no resources" in { - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq(), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - handler(done.time,done.metrics) - - done.time should be (3) - done.metrics.resourceMetrics.isEmpty should be (true) - done.metrics.simulationMetrics.size should be (1) - done.metrics.taskMetrics.size should be (1) - } - - "run multiple tasks with no resources" in { - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq(), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq(), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - handler(done.time,done.metrics) - - done.time should be (3L) - done.metrics.resourceMetrics.isEmpty should be (true) - done.metrics.simulationMetrics.size should be (2) - done.metrics.taskMetrics.size should be (2) - } - } - - "The MetricsActor" must { - - val executor = new AkkaExecutor() - val handler = SimMetricsOutputs(new SimMetricsPrinter()) - - //expectNoMessage(200.millis) - "work properly" in { - println ("*** MetricsActor results should appear here:") - - val resA = new TaskResource("A",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s = new TaskSimulation("S", coordinator, Seq("A"), new ConstantGenerator(1), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA)) - coordinator ! Coordinator.AddSim(1,s,executor) - - val metricsActor = system.actorOf(SimMetricsActor.props(handler,Some(self))) - metricsActor ! SimMetricsActor.Start(coordinator) - - expectMsgType[Coordinator.Done](20.seconds) - } - - "start multiple simulations with one message" in { - println ("*** MetricsActor multiple results should appear here:") - - val resA = new TaskResource("A",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(1), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("A"), new ConstantGenerator(1), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA)) - - val metricsActor = system.actorOf(SimMetricsActor.props(handler,Some(self))) - metricsActor ! SimMetricsActor.StartSims(coordinator,Seq((1,s1),(1,s2)),executor) - - expectMsgType[Coordinator.Done](20.seconds) - } - } -} - diff --git a/test/com/workflowfm/pew/simulation/Scheduler.scala b/test/com/workflowfm/pew/simulation/Scheduler.scala deleted file mode 100644 index fe224470..00000000 --- a/test/com/workflowfm/pew/simulation/Scheduler.scala +++ /dev/null @@ -1,191 +0,0 @@ -package com.workflowfm.pew.simulation - -import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner - -@RunWith(classOf[JUnitRunner]) -class SchedulerTests extends WordSpecLike with Matchers with BeforeAndAfterAll - with TaskTester with ScheduleTester { - - "The Schedule" must { - - "fit a task at the edge of another" in { - s((1,2)) + (2,3) should be (Some(s((1,3)))) - s((3,4)) + (2,3) should be (Some(s((2,4)))) - } - - "fit a task at the edge of two others" in { - s((1,2),(3,4)) + (2,3) should be (Some(s((1,4)))) - s((1,2),(4,5)) + (2,3) should be (Some(s((1,3),(4,5)))) - s((1,2),(4,5)) + (3,4) should be (Some(s((1,2),(3,5)))) - } - - "fit a task in gaps" in { - s((1,2)) + (3,4) should be (Some(s((1,2),(3,4)))) - s((3,4)) + (1,2) should be (Some(s((1,2),(3,4)))) - s((1,2),(3,4)) + (5,6) should be (Some(s((1,2),(3,4),(5,6)))) - s((1,2),(5,6)) + (3,4) should be (Some(s((1,2),(3,4),(5,6)))) - s((3,4),(5,6)) + (1,2) should be (Some(s((1,2),(3,4),(5,6)))) - } - - "not fit tasks that clash with the start of another task" in { - s((1,2)) + (1,3) should be (None) - s((1,4)) + (1,3) should be (None) - s((2,3)) + (1,3) should be (None) - s((2,4)) + (1,3) should be (None) - } - - "not fit tasks that clash with the end of another task" in { - s((2,4)) + (3,4) should be (None) - s((3,4)) + (2,4) should be (None) - s((1,4)) + (1,5) should be (None) - s((1,5)) + (1,4) should be (None) - s((2,3)) + (1,3) should be (None) - } - - "not fit tasks that overlap with another task" in { - s((1,3)) + (1,3) should be (None) - s((1,4)) + (2,3) should be (None) - s((2,3)) + (1,4) should be (None) - } - - "not fit tasks that clash with two other tasks" in { - s((1,2),(4,6)) + (2,5) should be (None) - s((1,2),(4,6)) + (3,5) should be (None) - s((1,2),(4,6)) + (2,6) should be (None) - s((1,2),(4,6)) + (3,6) should be (None) - s((1,2),(4,6)) + (2,7) should be (None) - s((1,2),(4,6)) + (3,7) should be (None) - s((1,3),(4,6)) + (2,4) should be (None) - } - - "merge single tasks with common start" in { - s((1,2)) ++ s((1,2)) should be (s((1,2))) - s((1,2)) ++ s((1,3)) should be (s((1,3))) - s((1,3)) ++ s((1,2)) should be (s((1,3))) - } - - "merge single tasks with common finish" in { - s((1,3)) ++ s((2,3)) should be (s((1,3))) - s((2,3)) ++ s((1,3)) should be (s((1,3))) - } - - "merge single tasks that don't overlap" in { - s((1,2)) ++ s((2,3)) should be (s((1,3))) - s((1,2)) ++ s((3,4)) should be (s((1,2),(3,4))) - s((1,2),(5,6)) ++ s((3,4)) should be (s((1,2),(3,4),(5,6))) - s((2,3)) ++ s((1,2)) should be (s((1,3))) - s((3,4)) ++ s((1,2)) should be (s((1,2),(3,4))) - s((3,4)) ++ s((1,2),(5,6)) should be (s((1,2),(3,4),(5,6))) - } - - "merge multiple tasks with one overlapping task" in { - s((1,2),(3,4),(5,6)) ++ s((1,6)) should be (s((1,6))) - s((1,2),(3,4),(5,6)) ++ s((0,6)) should be (s((0,6))) - s((1,2),(3,4),(5,6)) ++ s((1,7)) should be (s((1,7))) - s((1,2),(3,4),(5,6)) ++ s((0,7)) should be (s((0,7))) - s((1,6)) ++ s((1,2),(3,4),(5,6)) should be (s((1,6))) - s((0,6)) ++ s((1,2),(3,4),(5,6)) should be (s((0,6))) - s((1,7)) ++ s((1,2),(3,4),(5,6)) should be (s((1,7))) - s((0,7)) ++ s((1,2),(3,4),(5,6)) should be (s((0,7))) - } - - "merge multiple overlapping tasks" in { - s((1,2),(3,4),(5,6)) ++ s((2,3),(4,5),(6,7)) should be (s((1,7))) - s((1,2),(3,4),(5,6)) ++ s((2,3),(4,5)) should be (s((1,6))) - } - } - - "The DefaultScheduler" must { - - "select a single task" in { - val m = new TestResourceMap("A") - m.s(t(1L,Seq("A"))) should be (Seq(1L)) - } - - "select multiple tasks" in { - val m = new TestResourceMap("A","B") - m.s( - t(1L,Seq("A")), - t(2L,Seq("B")) - ) should be (Seq(1L,2L)) - } - - "select an earlier task" in { - val m = new TestResourceMap("A") - m.s( - t(1L,Seq("A"),Task.Medium,2L), - t(2L,Seq("A"),Task.Medium,1L)) should be (Seq(2L)) - } - - "not select a blocked task" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("A","B"),Task.Highest), - t(2L,Seq("A"),Task.VeryLow,0L,2L)) should be (Nil) - } - - "select a lower priority task if it will finish on time" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("A","B"),Task.Highest), - t(2L,Seq("A"),Task.VeryLow)) should be (List(2L)) - } - - "not block higher priority tasks" in { - val m = new TestResourceMap("A","B") + ("B",1L) - //DefaultScheduler.nextEstimatedTaskStart(t(1L,Seq("A","B"),Task.Highest), 0L, m.m,Seq( t(1L,Seq("A","B"),Task.Highest),t(2L,Seq("A"),Task.VeryLow,0L,100L))) should be (1L) - m.s( - t(1L,Seq("A","B"),Task.Highest), - t(2L,Seq("A"),Task.VeryLow,0L,100L)) should be (Nil) - } - - "not block higher priority tasks based on ordering" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("A","B"),Task.Medium,0L), - t(2L,Seq("A"),Task.Medium,2L,100L)) should be (Nil) - } - - "not block higher priority tasks of other resources" in { - val m = new TestResourceMap("A","B") //+ ("B",1L) - m.s( - t(1L,Seq("B"),Task.Highest), - t(2L,Seq("A","B"),Task.VeryLow,0L,100L)) should be (Seq(1L)) - } - - "consider all higher priority tasks for availability" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("B"),Task.Highest), - t(2L,Seq("A","B"),Task.Medium), - t(3L,Seq("A"),Task.VeryLow,0L,2L)) should be (List(3L)) - } - - } - -} - -trait ScheduleTester { - def s(l:(Long,Long)*) = Schedule(l.toList) -} - -class TestResourceMap(names:String*) { - // create a resource map - val m:Map[String,TaskResource] = Map[String,TaskResource]() ++ (names map { n => (n,r(n)) }) - - // create a resource - def r(name:String) = new TaskResource(name,0) - - // pre-attach Tasks to resources - def +(r:String,duration:Long):TestResourceMap = { - m.get(r).map { _.startTask(new Task(0,"_","_",0L,Seq(r),duration,duration,0L), 0L) } - this - } - - // test DefaultScheduler - def s(tasks:Task*):Seq[Long] = - DefaultScheduler.getNextTasks(tasks.sorted, 0L, m) map (_.id) - -} diff --git a/test/com/workflowfm/pew/simulation/Task.scala b/test/com/workflowfm/pew/simulation/Task.scala deleted file mode 100644 index 3226e66a..00000000 --- a/test/com/workflowfm/pew/simulation/Task.scala +++ /dev/null @@ -1,52 +0,0 @@ -package com.workflowfm.pew.simulation - -import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner - -@RunWith(classOf[JUnitRunner]) -class TaskTests extends WordSpecLike with Matchers with BeforeAndAfterAll with TaskTester { - - "Task priority" must { - - "prioritize higher priority" in { - t(2L,Seq("A"),Task.High,2L,1L,0) < t(1L,Seq("A","B"),Task.Medium,1L,2L,1) should be (true) - } - - "prioritize old age" in { - t(2L,Seq("A","B"),Task.Medium,2L,1L,0) > t(1L,Seq("A"),Task.Medium,1L,2L,1) should be (true) - } - - "prioritize more resources" in { - t(2L,Seq("A","B"),Task.Medium,0L,1L,0) < t(1L,Seq("A"),Task.Medium,0L,2L,1) should be (true) - } - - "prioritize longer duration" in { - t(2L,Seq("A"),Task.Medium,0L,1L,0) > t(1L,Seq("A"),Task.Medium,0L,2L,1) should be (true) - } - - "prioritize lower interrupt" in { - t(2L,Seq("A"),Task.Medium,0L,1L,0) < t(1L,Seq("A"),Task.Medium,0L,1L,1) should be (true) - } - - "prioritize lower ID if all else fails" in { - t(2L,Seq("A"),Task.Medium,0L,1L,0) > t(1L,Seq("A"),Task.Medium,0L,1L,0) should be (true) - } - - } -} - - -trait TaskTester { - // create a Task - def t( - id:Long, - resources:Seq[String], - priority:Task.Priority=Task.Medium, - created:Long = 0L, - duration:Long = 1L, - interrupt:Int = 0, - name:String="X" - ) = - new Task(id,name,"Test",created,resources,duration,duration,0L,interrupt,priority) -} From b6c33b1d367b4ee1fb31bf24e37fb7b7453520ad Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Sat, 13 Jul 2019 16:25:07 +0100 Subject: [PATCH 25/55] Updates d3-timeline to crop overflowing text Also fixes bad reference to simulationData variable --- resources/d3-timeline/d3-timeline.js | 151 +++++++++--------- .../d3-timeline/data-example-simulation.js | 2 +- .../d3-timeline/pew-timeline-simulation.js | 8 +- resources/d3-timeline/pew-timeline.js | 4 +- 4 files changed, 82 insertions(+), 83 deletions(-) diff --git a/resources/d3-timeline/d3-timeline.js b/resources/d3-timeline/d3-timeline.js index 3e5a4f42..bd894392 100644 --- a/resources/d3-timeline/d3-timeline.js +++ b/resources/d3-timeline/d3-timeline.js @@ -134,7 +134,7 @@ var appendBackgroundBar = function (yAxisMapping, index, g, data, datum) { var greenbarYAxis = ((itemHeight + itemMargin) * yAxisMapping[index]) + margin.top; - g.selectAll("svg").data(data).enter() + g.selectAll("svg").filter(".timeline").data(data).enter() .insert("rect") .attr("class", "row-green-bar") .attr("x", fullLengthBackgrounds ? 0 : margin.left) @@ -270,69 +270,78 @@ } if (backgroundColor) { appendBackgroundBar(yAxisMapping, index, g, data, datum); } - - g.selectAll("svg").data(data).enter() - .append(function(d, i) { - return document.createElementNS(d3.ns.prefix.svg, "display" in d? d.display:display); - }) - .attr("x", getXPos) - .attr("y", getStackPosition) - .attr("width", function (d, i) { - return (d.ending_time - d.starting_time) * scaleFactor; - }) - .attr("cy", function(d, i) { - return getStackPosition(d, i) + itemHeight/2; - }) - .attr("cx", getXPos) - .attr("r", itemHeight / 2) - .attr("height", itemHeight) - .style("stroke", "black") // added by Petros - .style("fill", function(d, i){ - var dColorPropName; - if (d.color) return d.color; - if( colorPropertyName ){ - dColorPropName = d[colorPropertyName]; - if ( dColorPropName ) { - return colorCycle( dColorPropName ); - } else { - return colorCycle( datum[colorPropertyName] ); - } - } - return colorCycle(index); - }) - .on("mousemove", function (d, i) { - hover(d, index, datum); - }) - .on("mouseover", function (d, i) { - mouseover(d, i, datum); - }) - .on("mouseout", function (d, i) { - mouseout(d, i, datum); - }) - .on("click", function (d, i) { - click(d, index, datum); - }) - .attr("class", function (d, i) { - return datum.class ? "timelineSeries_"+datum.class : "timelineSeries_"+index; - }) - .attr("id", function(d, i) { - // use deprecated id field - if (datum.id && !d.id) { - return 'timelineItem_'+datum.id; - } - - return d.id ? d.id : "timelineItem_"+index+"_"+i; - }) + + var svgs = g.selectAll("svg").filter(".timeline").data(data).enter() + .append(function(d, i) { + return document.createElementNS(d3.ns.prefix.svg, "svg"); + }) + .attr("x", getXPos) + .attr("y", getStackPosition) + .attr("width", function (d, i) { + return (d.ending_time - d.starting_time) * scaleFactor + 2; + }) + .attr("height", itemHeight + 2) ; - g.selectAll("svg").data(data).enter() - .append("text") - .attr("x", getXTextPos) - .attr("y", getStackTextPosition) - .text(function(d) { - return d.label; - }) - ; + svgs.append(function(d, i ) { + return document.createElementNS(d3.ns.prefix.svg, "display" in d? d.display:display); + }) + .attr("x", 1) + .attr("y", 1) + .attr("width", function (d, i) { + return (d.ending_time - d.starting_time) * scaleFactor; + }) + .attr("height", itemHeight) + .style("stroke", "black") // added by Petros + .attr("cy", function(d, i) { + return getStackPosition(d, i) + itemHeight/2; + }) + .attr("cx", getXPos) + .attr("r", itemHeight / 2) + .style("fill", function(d, i){ + var dColorPropName; + if (d.color) return d.color; + if( colorPropertyName ){ + dColorPropName = d[colorPropertyName]; + if ( dColorPropName ) { + return colorCycle( dColorPropName ); + } else { + return colorCycle( datum[colorPropertyName] ); + } + } + return colorCycle(index); + }) + .on("mousemove", function (d, i) { + hover(d, index, datum); + }) + .on("mouseover", function (d, i) { + mouseover(d, i, datum); + }) + .on("mouseout", function (d, i) { + mouseout(d, i, datum); + }) + .on("click", function (d, i) { + click(d, index, datum); + }) + .attr("class", function (d, i) { + return datum.class ? "timelineSeries_"+datum.class : "timelineSeries_"+index; + }) + .attr("id", function(d, i) { + // use deprecated id field + if (datum.id && !d.id) { + return 'timelineItem_'+datum.id; + } + + return d.id ? d.id : "timelineItem_"+index+"_"+i; + }); + + svgs.append("text") + .attr("x", 6) + .attr("y", itemHeight * 0.75 + 1) + .text(function(d) { + return d.label; + }) + ; if (rowSeparatorsColor) { var lineYAxis = ( itemHeight + itemMargin / 2 + margin.top + (itemHeight + itemMargin) * yAxisMapping[index]); @@ -360,15 +369,9 @@ function getStackPosition(d, i) { if (stacked) { - return margin.top + (itemHeight + itemMargin) * yAxisMapping[index]; + return margin.top + (itemHeight + itemMargin) * yAxisMapping[index] - 1; } - return margin.top; - } - function getStackTextPosition(d, i) { - if (stacked) { - return margin.top + (itemHeight + itemMargin) * yAxisMapping[index] + itemHeight * 0.75; - } - return margin.top + itemHeight * 0.75; + return margin.top - 1; } }); }); @@ -424,18 +427,14 @@ } function getXPos(d, i) { - return margin.left + (d.starting_time - beginning) * scaleFactor; - } - - function getXTextPos(d, i) { - return margin.left + (d.starting_time - beginning) * scaleFactor + 5; + return margin.left + (d.starting_time - beginning) * scaleFactor - 1; } function setHeight() { if (!height && !gParentItem.attr("height")) { if (itemHeight) { // set height based off of item height - height = gSize.height + gSize.top - gParentSize.top; + height = g[0][0].getBBox().height + gSize.top; // - gParentSize.top; // set bounding rectangle height d3.select(gParent[0][0]).attr("height", height); } else { diff --git a/resources/d3-timeline/data-example-simulation.js b/resources/d3-timeline/data-example-simulation.js index c031ce20..d174a02d 100644 --- a/resources/d3-timeline/data-example-simulation.js +++ b/resources/d3-timeline/data-example-simulation.js @@ -58,7 +58,7 @@ var resourceData = [ ]}, ]; -var workflowData = [ +var simulationData = [ {label: "D2", times: [ {"label":"AwardContract(D2)", task: "AwardContract", "starting_time": 0, "ending_time": 1, delay: 0, cost: 2}, {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, diff --git a/resources/d3-timeline/pew-timeline-simulation.js b/resources/d3-timeline/pew-timeline-simulation.js index 061324ce..323ef30d 100644 --- a/resources/d3-timeline/pew-timeline-simulation.js +++ b/resources/d3-timeline/pew-timeline-simulation.js @@ -19,7 +19,7 @@ function displayResults(selection,data) { var chart = d3.timeline() .tickFormat( // - {format: d3.format.utc("03d"), + {format: d3.format("03d"), tickInterval: 1, // This forces us to start from 001 numTicks: totalTicks, //tickValues: ticks, // Use this to start from 000 @@ -59,7 +59,7 @@ function displayResults(selection,data) { // datum is the data object div.style("left", (d3.event.pageX) + "px") .style("top", (d3.event.pageY - 28) + "px"); - div.text(d.task + "\n" + + div.text(d.label + "\n" + chart.tickFormat().format(new Date(d.starting_time)) + "-" + chart.tickFormat().format(new Date(d.ending_time)) + "\n" + "Delay: " + chart.tickFormat().format(new Date(d.delay)) + "\n" + "Cost: " + d.cost @@ -104,7 +104,7 @@ function workflow(datum) { function newWorkflow(datum) { var selection = d3.select(this); selection.append("p").text(datum.id); - selection.append("svg") + selection.append("svg").attr("class","timeline") //.attr("width", '100%') //.attr("width", totalTicks*widthPerTick); displayResults(selection,datum.data); @@ -113,7 +113,7 @@ function newWorkflow(datum) { function displayOne(tag,workflowData) { var div = d3.select(tag)//.data(workflowData) div.selectAll("svg").remove() - div.append("svg") + div.append("svg").attr("class","timeline") displayResults(div,workflowData) } diff --git a/resources/d3-timeline/pew-timeline.js b/resources/d3-timeline/pew-timeline.js index d1736419..73b088b2 100644 --- a/resources/d3-timeline/pew-timeline.js +++ b/resources/d3-timeline/pew-timeline.js @@ -97,7 +97,7 @@ function workflow(datum) { function newWorkflow(datum) { var selection = d3.select(this); selection.append("p").text(datum.id); - selection.append("svg") + selection.append("svg").attr("class","timeline") //.attr("width", '100%') //.attr("width", totalTicks*widthPerTick); displayResults(selection,datum.data); @@ -117,7 +117,7 @@ function displayAll(tag,workflowData) { function displayOne(tag,workflowData) { var div = d3.select(tag)//.data(workflowData) div.selectAll("svg").remove() - div.append("svg") + div.append("svg").attr("class","timeline") displayResults(div,workflowData) } From 15765148b3a9a888598239a2e1a5dce80ced6a32 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 17 Jul 2019 17:50:31 +0100 Subject: [PATCH 26/55] Sets up pew simulator environment --- .../pew/simulator/PiSimulation.scala | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index ca87d077..46550e52 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -1,4 +1,22 @@ package com.workflowfm.pew.simulator -class PiSimulation { +import com.workflowfm.pew.{ AtomicProcess, PiProcess } +import com.workflowfm.pew.execution.SimulatorExecutor +import com.workflowfm.simulator.{ SimulatedProcess, Simulation } +import scala.concurrent.Future + +trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { + override def isSimulatedProcess = true +} + +abstract class PiSimulation (val name: String) { + def run(executor: SimulatorExecutor[_]): Future[Any] + def getProcesses(): Seq[PiProcess] + def simulateWith(executor: SimulatorExecutor[_]) :Simulation = new PiSimulationInstance(this, executor) } + +class PiSimulationInstance(sim: PiSimulation, executor: SimulatorExecutor[_]) extends Simulation(sim.name) { + override def run(): Future[Any] = sim.run(executor) + override def simulationReady = executor.simulationReady +} + From 8087884352031ab871db7ac145dfb27a24af1ce4 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Mon, 29 Jul 2019 15:57:17 +0100 Subject: [PATCH 27/55] Introduces PiEventIdle and progress towards new simulator --- build.sbt | 2 +- .../pew/simulator/PiSimulation.scala | 16 +++++++++------ src/com/workflowfm/pew/PiEvents.scala | 20 +++++++++++++++++++ .../pew/execution/AkkaExecutor.scala | 2 ++ 4 files changed, 33 insertions(+), 7 deletions(-) diff --git a/build.sbt b/build.sbt index 8cccbd20..2469a300 100644 --- a/build.sbt +++ b/build.sbt @@ -48,7 +48,7 @@ lazy val simulator = project .settings( commonSettings, name := "pew-simulator", - libraryDependencies += "com.workflowfm" %% "workflowfm-simulator" % "0.1" + libraryDependencies += "com.workflowfm" %% "workflowfm-simulator" % "0.2-alpha-SNAPSHOT" ).dependsOn(rootRef) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 46550e52..13157f63 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -1,22 +1,26 @@ package com.workflowfm.pew.simulator +import akka.actor.ActorRef import com.workflowfm.pew.{ AtomicProcess, PiProcess } import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.simulator.{ SimulatedProcess, Simulation } -import scala.concurrent.Future +import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor } +import scala.concurrent.{ ExecutionContext, Future } trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override def isSimulatedProcess = true } -abstract class PiSimulation (val name: String) { +abstract class PiSimulation (val name: String, val coordinator: ActorRef) { def run(executor: SimulatorExecutor[_]): Future[Any] def getProcesses(): Seq[PiProcess] - def simulateWith(executor: SimulatorExecutor[_]) :Simulation = new PiSimulationInstance(this, executor) + def simulateWith(executor: SimulatorExecutor[_])(implicit executionContext: ExecutionContext): PiSimulationActor = + new PiSimulationActor(this, executor) } -class PiSimulationInstance(sim: PiSimulation, executor: SimulatorExecutor[_]) extends Simulation(sim.name) { +class PiSimulationActor(sim: PiSimulation, executor: SimulatorExecutor[_]) + (override implicit val executionContext: ExecutionContext) + extends SimulationActor(sim.name, sim.coordinator) { override def run(): Future[Any] = sim.run(executor) - override def simulationReady = executor.simulationReady + def simulationReady = executor.simulationReady } diff --git a/src/com/workflowfm/pew/PiEvents.scala b/src/com/workflowfm/pew/PiEvents.scala index ede557f4..35cadc0d 100644 --- a/src/com/workflowfm/pew/PiEvents.scala +++ b/src/com/workflowfm/pew/PiEvents.scala @@ -78,6 +78,7 @@ object PiEvent { // PiInstance Level PiEvents case e: PiEventStart[KeyT] => e.copy( metadata = fn( e.metadata ) ) case e: PiEventResult[KeyT] => e.copy( metadata = fn( e.metadata ) ) + case e: PiEventIdle[KeyT] => e.copy( metadata = fn( e.metadata ) ) // PiInstance Level PiFailures case e: PiFailureNoResult[KeyT] => e.copy( metadata = fn( e.metadata ) ) @@ -138,6 +139,25 @@ case class PiEventResult[KeyT]( s" === [$id] RESULT: $res" } +/** Denotes the completion of all reductions and process calls. + * We are waiting for at least one process call to complete. + * This is useful in simulations so we know when to progress in virtual time. + * + * @note Not all executor implementations fire this event. + * @param i PiInstance representing the current state. + * @param metadata Metadata object. + * @tparam KeyT The type used to identify PiInstances. + */ +case class PiEventIdle[KeyT]( + i: PiInstance[KeyT], + override val metadata: PiMetadataMap = PiMetadata() + + ) extends PiEvent[KeyT] with PiEventFinish[KeyT] { + + override def id: KeyT = i.id + override def asString: String = s" === [$id] Idling... " +} + //////////////////////////////////////////// // PiInstance Level Exceptions & Failures // diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index bd536fd4..c89638d8 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -102,6 +102,7 @@ class AkkaExecActor( //System.err.println("*** [" + ctr + "] Updating state after init") store = store.put(resi) (toCall zip futureCalls) map runThread(resi) + publish(PiEventIdle(resi)) } } } @@ -130,6 +131,7 @@ class AkkaExecActor( //System.err.println("*** [" + i.id + "] Updating state after: " + ref) store = store.put(resi) (toCall zip futureCalls) map runThread(resi) + publish(PiEventIdle(resi)) } } } From c773ba860965f6b0e8ce6d838dc868ce79b24d99 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Mon, 29 Jul 2019 17:23:21 +0100 Subject: [PATCH 28/55] Updates PiSimulation to use the new simulator but the old chicken/egg problem of the simulation with the executor cropped up again... --- .../pew/simulator/PiSimHandler.scala | 18 ++++++++++ .../pew/simulator/PiSimulation.scala | 23 +++++++++---- .../bson/events/PiEventIdleCodec.scala | 34 +++++++++++++++++++ 3 files changed, 69 insertions(+), 6 deletions(-) create mode 100644 simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala create mode 100644 src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala new file mode 100644 index 00000000..a039a073 --- /dev/null +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala @@ -0,0 +1,18 @@ +package com.workflowfm.pew.simulator + +import com.workflowfm.pew.stream.{ PiEventHandlerFactory, ResultHandler } +import com.workflowfm.pew.{ PiEvent, PiEventIdle, PiEventResult, PiFailure } + +class PiSimHandler[T](actor: PiSimulationActor[T], id: T) extends ResultHandler[T](id) { + override def apply(e: PiEvent[T]) = { + e match { + case PiEventIdle(i,_) => actor.simulationCheck + case _ => Unit + } + super.apply(e) + } +} + +class PiSimHandlerFactory[T](actor: PiSimulationActor[T]) extends PiEventHandlerFactory[T,PiSimHandler[T]] { + override def build(id: T) = new PiSimHandler[T](actor, id) +} diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 13157f63..0ab59181 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.simulator import akka.actor.ActorRef +import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } import com.workflowfm.pew.{ AtomicProcess, PiProcess } import com.workflowfm.pew.execution.SimulatorExecutor import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor } @@ -11,16 +12,26 @@ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { } abstract class PiSimulation (val name: String, val coordinator: ActorRef) { - def run(executor: SimulatorExecutor[_]): Future[Any] - def getProcesses(): Seq[PiProcess] - def simulateWith(executor: SimulatorExecutor[_])(implicit executionContext: ExecutionContext): PiSimulationActor = + + def rootProcess: PiProcess + def args: Seq[Any] + + def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList + + def simulateWith[T](executor: SimulatorExecutor[T])(implicit executionContext: ExecutionContext): PiSimulationActor[T] = new PiSimulationActor(this, executor) } -class PiSimulationActor(sim: PiSimulation, executor: SimulatorExecutor[_]) +class PiSimulationActor[T](sim: PiSimulation, executor: SimulatorExecutor[T]) (override implicit val executionContext: ExecutionContext) extends SimulationActor(sim.name, sim.coordinator) { - override def run(): Future[Any] = sim.run(executor) - def simulationReady = executor.simulationReady + + val factory = new PiSimHandlerFactory[T](this) + + override def run(): Future[Any] = { + executor.call(sim.rootProcess, sim.args, factory) flatMap (_.future) + } + + def simulationCheck = if (executor.simulationReady) ready() } diff --git a/src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala b/src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala new file mode 100644 index 00000000..2a5eb285 --- /dev/null +++ b/src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala @@ -0,0 +1,34 @@ +package com.workflowfm.pew.mongodb.bson.events + +import com.workflowfm.pew.PiMetadata.PiMetadataMap +import com.workflowfm.pew.mongodb.bson.auto.ClassCodec +import com.workflowfm.pew.{PiEventIdle, PiInstance} +import org.bson.{BsonReader, BsonWriter} +import org.bson.codecs.{Codec, DecoderContext, EncoderContext} + +class PiEventIdleCodec[T]( piiCodec: Codec[PiInstance[T]], metaCodec: Codec[PiMetadataMap] ) + extends ClassCodec[PiEventIdle[T]] { + + val piiN: String = "pii" + val timeN: String = "timestamp" + + override def encodeBody(writer: BsonWriter, value: PiEventIdle[T], ctx: EncoderContext): Unit = { + + writer.writeName( piiN ) + ctx.encodeWithChildContext( piiCodec, writer, value.i ) + + writer.writeName( timeN ) + ctx.encodeWithChildContext( metaCodec, writer, value.metadata ) + } + + override def decodeBody(reader: BsonReader, ctx: DecoderContext): PiEventIdle[T] = { + + reader.readName( piiN ) + val pii: PiInstance[T] = ctx.decodeWithChildContext( piiCodec, reader ) + + reader.readName( timeN ) + val data: PiMetadataMap = ctx.decodeWithChildContext( metaCodec, reader ) + + PiEventIdle( pii, data ) + } +} From 11b099176be0a5560b7480d47445a0789b7a42f0 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 30 Jul 2019 03:41:09 +0100 Subject: [PATCH 29/55] Makes the atomic process executor ActorRef parameter of AkkaExecutor --- .../pew/execution/AkkaExecutor.scala | 20 +++++++++++-------- .../pew/execution/AkkaExecutorTests.scala | 12 +++++------ 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 4e478bba..96419b50 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -14,7 +14,8 @@ import java.util.UUID class AkkaExecutor ( store:PiInstanceStore[UUID], - processes:PiProcessStore + processes:PiProcessStore, + atomicExecutor: ActorRef )( implicit val system: ActorSystem, override implicit val executionContext: ExecutionContext = ExecutionContext.global, @@ -23,16 +24,16 @@ class AkkaExecutor ( def this(store:PiInstanceStore[UUID], l:PiProcess*) (implicit system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration) = - this(store,SimpleProcessStore(l :_*)) + this(store,SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops())) def this(system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration,l:PiProcess*) = - this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*))(system,context,timeout) + this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops()))(system,context,timeout) def this(l:PiProcess*)(implicit system: ActorSystem) = - this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*))(system,ExecutionContext.global,10.seconds) + this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops()))(system,ExecutionContext.global,10.seconds) - val execActor = system.actorOf(AkkaExecutor.execprops(store,processes)) + val execActor = system.actorOf(AkkaExecutor.execprops(store,processes,atomicExecutor)) implicit val tOut = Timeout(timeout) override def simulationReady = Await.result(execActor ? AkkaExecutor.SimReady,timeout).asInstanceOf[Boolean] @@ -63,12 +64,15 @@ object AkkaExecutor { case class Subscribe(handler:PiEventHandler[UUID]) def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) - def execprops(store:PiInstanceStore[UUID], processes:PiProcessStore)(implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes)) + + def execprops(store:PiInstanceStore[UUID], processes:PiProcessStore, atomicExecutor: ActorRef) + (implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes,atomicExecutor)) } class AkkaExecActor( var store:PiInstanceStore[UUID], - processes:PiProcessStore + processes:PiProcessStore, + atomicExecutor: ActorRef )( override implicit val system: ActorSystem, implicit val executionContext: ExecutionContext = ExecutionContext.global @@ -167,7 +171,7 @@ class AkkaExecActor( try { publish(PiEventCall(i.id,ref,p,objs)) // TODO Change from ! to ? to require an acknowledgement - system.actorOf(AkkaExecutor.atomicprops()) ! AkkaExecutor.ACall(i.id,ref,p,objs,self) + atomicExecutor ! AkkaExecutor.ACall(i.id,ref,p,objs,self) } catch { case _:Throwable => Unit //TODO specify timeout exception here! - also print a warning } diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index aec554df..07111e6d 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -152,7 +152,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi val f1 = ex.call(ri,Seq(21),factory) flatMap(_.future) val r1 = await(f1) - r1 should be (8) + r1 should be (11) kill.map(_.stop) } @@ -170,7 +170,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi // r2 should be (("PbISleptFor2s","PcISleptFor1s")) val r1 = await(f1) - r1 should be (8) + r1 should be (11) k1.map(_.stop) k2.map(_.stop) @@ -187,9 +187,9 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi // r2 should be (("PbISleptFor2s","PcISleptFor1s")) val r1 = await(f1) - r1 should be (8) + r1 should be (11) val r2 = await(f2) - r2 should be (8) + r2 should be (11) } @@ -206,9 +206,9 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi // r2 should be (("PbISleptFor2s","PcISleptFor1s")) val r1 = await(f1) - r1 should be (8) + r1 should be (11) val r2 = await(f2) - r2 should be (8) + r2 should be (11) } } From 6bf28c7d1904fc0d6a469854cff5d800dfccef28 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 30 Jul 2019 04:16:45 +0100 Subject: [PATCH 30/55] Some progress with the simulator, which will evolve further --- .../pew/simulator/PiSimHandler.scala | 2 +- .../pew/simulator/PiSimulation.scala | 34 +++++++++++++------ 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala index a039a073..8534a8fd 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala @@ -6,7 +6,7 @@ import com.workflowfm.pew.{ PiEvent, PiEventIdle, PiEventResult, PiFailure } class PiSimHandler[T](actor: PiSimulationActor[T], id: T) extends ResultHandler[T](id) { override def apply(e: PiEvent[T]) = { e match { - case PiEventIdle(i,_) => actor.simulationCheck + case PiEventIdle(i,_) if (i.id == id) => actor.simulationCheck case _ => Unit } super.apply(e) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 0ab59181..22e81980 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -1,37 +1,51 @@ package com.workflowfm.pew.simulator -import akka.actor.ActorRef +import akka.actor.{ ActorRef, Props } import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } import com.workflowfm.pew.{ AtomicProcess, PiProcess } import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor } +import com.workflowfm.simulator.{ SimulatedProcess, Simulation, SimulationActor } import scala.concurrent.{ ExecutionContext, Future } +/* +trait SimulatedProcess { + def simulationActor: Actor + + def simulate[T]( + gen: TaskGenerator, + result:TaskMetrics => T, + resources:String* + )(implicit executionContext: ExecutionContext):Future[T] = { +// simulationActor ! SimulationActor.AddTask(gen, resources:_*).map(m => result(m)) + } +} + */ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override def isSimulatedProcess = true } -abstract class PiSimulation (val name: String, val coordinator: ActorRef) { - +abstract class PiSimulation (override val name: String, override val coordinator: ActorRef) extends Simulation(name, coordinator) { def rootProcess: PiProcess def args: Seq[Any] def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList - - def simulateWith[T](executor: SimulatorExecutor[T])(implicit executionContext: ExecutionContext): PiSimulationActor[T] = - new PiSimulationActor(this, executor) } -class PiSimulationActor[T](sim: PiSimulation, executor: SimulatorExecutor[T]) +class PiSimulationActor[T](override val simulation: PiSimulation, executor: SimulatorExecutor[T]) (override implicit val executionContext: ExecutionContext) - extends SimulationActor(sim.name, sim.coordinator) { + extends SimulationActor { val factory = new PiSimHandlerFactory[T](this) override def run(): Future[Any] = { - executor.call(sim.rootProcess, sim.args, factory) flatMap (_.future) + executor.call(simulation.rootProcess, simulation.args, factory) flatMap (_.future) } def simulationCheck = if (executor.simulationReady) ready() } +object PiSimulationActor { + def props[T](simulation: PiSimulation, executor: SimulatorExecutor[T]) + (implicit executionContext: ExecutionContext): Props = + Props(new PiSimulationActor[T](simulation, executor)) +} From 903d0fb2b0d9ba1b73490988b650460bb18d889b Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 30 Jul 2019 17:47:25 +0100 Subject: [PATCH 31/55] Some more progress. Using a separate AkkaExecutor for each simulation is inevitable at this point... The problem is just too complex otherwise. 1) Atomic processes run in their own actor, but they don't have access to it in advance, so they can't receive directly. We use the ask pattern for that. 2) They need to communicate with the SimulationActor to queue tasks, so they need to know its ActorRef in advance. 3) The SimulationActor needs to know the Executor in advance so that it can start the simulation when instructed by the Coordinator. 4) The Executor needs to know the available processes in advance. Hence the cycle. I tried to make the executor a variable in SimulationActor, but now stuck at (4). The PiProcessStore in AkkaExecutor is mutable, but that is too much mutability for my taste. I think having the SimulationActor generate its own AkkaExecutor will do at least for now. --- .../pew/simulator/PiSimulation.scala | 48 ++++++++++--------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 22e81980..7959af61 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -4,48 +4,50 @@ import akka.actor.{ ActorRef, Props } import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } import com.workflowfm.pew.{ AtomicProcess, PiProcess } import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.simulator.{ SimulatedProcess, Simulation, SimulationActor } +import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor } import scala.concurrent.{ ExecutionContext, Future } -/* -trait SimulatedProcess { - def simulationActor: Actor - - def simulate[T]( - gen: TaskGenerator, - result:TaskMetrics => T, - resources:String* - )(implicit executionContext: ExecutionContext):Future[T] = { -// simulationActor ! SimulationActor.AddTask(gen, resources:_*).map(m => result(m)) - } -} - */ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override def isSimulatedProcess = true } -abstract class PiSimulation (override val name: String, override val coordinator: ActorRef) extends Simulation(name, coordinator) { +abstract class PiSimulation (val name: String, val coordinator: ActorRef) { def rootProcess: PiProcess def args: Seq[Any] def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList } -class PiSimulationActor[T](override val simulation: PiSimulation, executor: SimulatorExecutor[T]) - (override implicit val executionContext: ExecutionContext) - extends SimulationActor { +class PiSimulationActor[T](implicit executionContext: ExecutionContext) + extends SimulationActor(simulation.name, simulation.coordinator) { + var executor: Option[SimulatorExecutor[T]] = None val factory = new PiSimHandlerFactory[T](this) - override def run(): Future[Any] = { - executor.call(simulation.rootProcess, simulation.args, factory) flatMap (_.future) + override def run(): Future[Any] = executor match { + case None => Future.failed(new RuntimeException(s"Tried to start simulation actor [${simulation.name}] with no executor.")) + case Some(exe) => exe.call(simulation.rootProcess, simulation.args, factory) flatMap (_.future) } - def simulationCheck = if (executor.simulationReady) ready() + def simulationCheck = if (executor.map(_.simulationReady).getOrElse(true)) ready() + + def piSimulatorReceive: Receive = { + case PiSimulationActor.Init(exe) => { + if (executor.isEmpty && exe.isInstanceOf[SimulatorExecutor[T]]) + executor = Some(exe.asInstanceOf[SimulatorExecutor[T]]) + sender() ! PiSimulationActor.Ack + }} + + override def receive = piSimulatorReceive orElse piSimulatorReceive } object PiSimulationActor { - def props[T](simulation: PiSimulation, executor: SimulatorExecutor[T]) + case class Init[T](executor: SimulatorExecutor[T]) + case object Ack + + /* + def props[T](simulation: PiSimulation) (implicit executionContext: ExecutionContext): Props = - Props(new PiSimulationActor[T](simulation, executor)) + Props(new PiSimulationActor[T](simulation)) + */ } From 63e6dad0a117881e97f29b247ff0bb0515840c58 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 30 Jul 2019 20:33:22 +0100 Subject: [PATCH 32/55] Makes each PiSimulationActor have its own SimulatorExecutor Realised that ready() gets called before the processes have a chance to register their tasks. --- .../pew/simulator/PiSimulation.scala | 37 ++++--------------- 1 file changed, 8 insertions(+), 29 deletions(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 7959af61..80517f5a 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -11,43 +11,22 @@ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override def isSimulatedProcess = true } -abstract class PiSimulation (val name: String, val coordinator: ActorRef) { +abstract class PiSimulationActor[T] (override val name: String, override val coordinator: ActorRef) + (implicit executionContext: ExecutionContext) + extends SimulationActor(name, coordinator) { + def rootProcess: PiProcess def args: Seq[Any] def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList -} - -class PiSimulationActor[T](implicit executionContext: ExecutionContext) - extends SimulationActor(simulation.name, simulation.coordinator) { - var executor: Option[SimulatorExecutor[T]] = None + def executor: SimulatorExecutor[T] val factory = new PiSimHandlerFactory[T](this) - override def run(): Future[Any] = executor match { - case None => Future.failed(new RuntimeException(s"Tried to start simulation actor [${simulation.name}] with no executor.")) - case Some(exe) => exe.call(simulation.rootProcess, simulation.args, factory) flatMap (_.future) + override def run(): Future[Any] = { + executor.call(rootProcess, args, factory) flatMap (_.future) } - def simulationCheck = if (executor.map(_.simulationReady).getOrElse(true)) ready() - - def piSimulatorReceive: Receive = { - case PiSimulationActor.Init(exe) => { - if (executor.isEmpty && exe.isInstanceOf[SimulatorExecutor[T]]) - executor = Some(exe.asInstanceOf[SimulatorExecutor[T]]) - sender() ! PiSimulationActor.Ack - }} - - override def receive = piSimulatorReceive orElse piSimulatorReceive -} -object PiSimulationActor { - case class Init[T](executor: SimulatorExecutor[T]) - case object Ack - - /* - def props[T](simulation: PiSimulation) - (implicit executionContext: ExecutionContext): Props = - Props(new PiSimulationActor[T](simulation)) - */ + def simulationCheck = if (executor.simulationReady) ready() } From 6c03becab8d7ec2085c9681609b0fd95d1c4c6dd Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 31 Jul 2019 18:17:35 +0100 Subject: [PATCH 33/55] Major progress towards async simulation Resolved multiple race conditions. Everything looks fine now. We only need to deal with processes that register multiple or no tasks. QueueMachine in Aurora for example. We need to take care of removing them from the taskWaiting list. For multiple tasks, we might want to add them once for each task they register and avoid the additional ready call. For no tasks, we need to think about managing multiple instances with the same name. Perhaps we can use metadata to register our own unique ID for all processes? --- .../pew/simulator/PiSimHandler.scala | 10 +- .../pew/simulator/PiSimulation.scala | 98 ++++++++++++++++++- src/com/workflowfm/pew/PiInstance.scala | 2 + src/com/workflowfm/pew/PiProcess.scala | 2 +- 4 files changed, 103 insertions(+), 9 deletions(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala index 8534a8fd..a550b157 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala @@ -1,18 +1,20 @@ package com.workflowfm.pew.simulator +import akka.actor.ActorRef import com.workflowfm.pew.stream.{ PiEventHandlerFactory, ResultHandler } -import com.workflowfm.pew.{ PiEvent, PiEventIdle, PiEventResult, PiFailure } +import com.workflowfm.pew.{ PiEvent, PiEventCall, PiEventReturn, PiEventIdle, PiEventResult, PiFailure } -class PiSimHandler[T](actor: PiSimulationActor[T], id: T) extends ResultHandler[T](id) { +class PiSimHandler[T](actor: ActorRef, id: T) extends ResultHandler[T](id) { override def apply(e: PiEvent[T]) = { e match { - case PiEventIdle(i,_) if (i.id == id) => actor.simulationCheck + case PiEventIdle(i,_) if (i.id == id) => actor ! PiSimulationActor.ExecutorReady(i) + case PiEventReturn(i,_,_,_) if (i == id) => actor ! PiSimulationActor.ExecutorBusy case _ => Unit } super.apply(e) } } -class PiSimHandlerFactory[T](actor: PiSimulationActor[T]) extends PiEventHandlerFactory[T,PiSimHandler[T]] { +class PiSimHandlerFactory[T](actor: ActorRef) extends PiEventHandlerFactory[T,PiSimHandler[T]] { override def build(id: T) = new PiSimHandler[T](actor, id) } diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 80517f5a..71500235 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -1,32 +1,122 @@ package com.workflowfm.pew.simulator import akka.actor.{ ActorRef, Props } +import com.workflowfm.pew.{ MetadataAtomicProcess, PiInstance, PiMetadata, PiObject } import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } import com.workflowfm.pew.{ AtomicProcess, PiProcess } import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor } +import com.workflowfm.simulator.metrics.TaskMetrics +import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor, TaskGenerator } +import java.util.UUID +import scala.collection.mutable.{ Map, Queue } import scala.concurrent.{ ExecutionContext, Future } trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override def isSimulatedProcess = true + + override val iname = s"$simulationName.$name" + + def ready() = simulationActor ! PiSimulationActor.Waiting(iname) + + override def simulate[T]( + gen: TaskGenerator, + result:TaskMetrics => T, + resources:String* + )(implicit executionContext: ExecutionContext):Future[T] = { + val id = java.util.UUID.randomUUID + simulationActor ! PiSimulationActor.AddSource(id,iname) + val f = simulate(id,gen,result,resources:_*) + ready() + f + } + } abstract class PiSimulationActor[T] (override val name: String, override val coordinator: ActorRef) (implicit executionContext: ExecutionContext) extends SimulationActor(name, coordinator) { + var executorIsReady = true + var waiting: Seq[String] = Seq[String]() + val taskWaiting: Queue[String] = Queue[String]() + val sources: Map[UUID,String] = Map() + def rootProcess: PiProcess def args: Seq[Any] def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList def executor: SimulatorExecutor[T] - val factory = new PiSimHandlerFactory[T](this) + val factory = new PiSimHandlerFactory[T](self) override def run(): Future[Any] = { + executorIsReady = false executor.call(rootProcess, args, factory) flatMap (_.future) } - def simulationCheck = if (executor.simulationReady) ready() -} + def readyCheck() = { + val q = taskWaiting.clone() + val check = waiting.forall { p => + q.dequeueFirst(_ == p) match { + case None => false + case Some(_) => true + } + } + println(s"Check: $executorIsReady && $waiting && $taskWaiting = $check == ${executorIsReady & check}") + if (executorIsReady && check) ready() + } +// def wait(process: String) = { + // println(s"Wait: $process") + // waiting += process + // } + + def processWaiting(process: String) = { + println(s"Process waiting: $process") + if (!waiting.contains(process)) executorIsReady=false + taskWaiting += process + readyCheck() + } + override def complete(id: UUID, metrics: TaskMetrics) = { + sources.get(id).map { p => + taskWaiting.dequeueFirst(_ == p) + } + super.complete(id,metrics) + } + + def executorReady(i: PiInstance[_]) = { + val procs = i.getCalledProcesses + if (procs.forall(_.isSimulatedProcess)) { + waiting = procs map (_.iname) + executorIsReady = true + readyCheck() + } + } + + /* + def processDone(process: String) = { + //println(s"DCheck: $waiting") + if (!waiting.contains(process)) println(s" ERROR ERROR ERROR ERROR ERROR ERROR ERROR ERROR $process ERROR ERROR ERROR ERROR ERROR ERROR ERROR ") + else println(s"Done: $process") + taskWaiting.dequeueFirst( _ == process ) + waiting.dequeueFirst(_ == process) + readyCheck() + } + */ + def executorBusy() = executorIsReady = false + + def piActorReceive: Receive = { + case PiSimulationActor.Waiting(p) => processWaiting(p) + case PiSimulationActor.AddSource(id,iname) => sources += id->iname + case PiSimulationActor.ExecutorBusy => executorBusy() + case PiSimulationActor.ExecutorReady(i) => executorReady(i) + } + + override def receive = piActorReceive orElse simulationActorReceive +} +object PiSimulationActor { + case class Waiting(process: String) + case class AddSource(id: UUID, iname: String) + case object ExecutorBusy + case class ExecutorReady(i: PiInstance[_]) +} diff --git a/src/com/workflowfm/pew/PiInstance.scala b/src/com/workflowfm/pew/PiInstance.scala index bb42561e..c86d9d63 100644 --- a/src/com/workflowfm/pew/PiInstance.scala +++ b/src/com/workflowfm/pew/PiInstance.scala @@ -56,6 +56,8 @@ case class PiInstance[T](final val id:T, called:Seq[Int], process:PiProcess, sta } } } + + def getCalledProcesses: Seq[PiProcess] = state.threads flatMap { f => getProc(f._2.fun) } toSeq /** * Should the simulator wait for the workflow? diff --git a/src/com/workflowfm/pew/PiProcess.scala b/src/com/workflowfm/pew/PiProcess.scala index d99681d0..8d86c5d0 100644 --- a/src/com/workflowfm/pew/PiProcess.scala +++ b/src/com/workflowfm/pew/PiProcess.scala @@ -99,7 +99,7 @@ trait AtomicProcess extends MetadataAtomicProcess { /** Implements the standard AtomicProcess interface for unsupporting ProcessExecutors. */ - final override def runMeta( args: Seq[PiObject] )( implicit ec: ExecutionContext ): Future[MetadataAtomicResult] + override def runMeta( args: Seq[PiObject] )( implicit ec: ExecutionContext ): Future[MetadataAtomicResult] = run( args ).map(MetadataAtomicProcess.result(_)) def run(args:Seq[PiObject])(implicit ec:ExecutionContext):Future[PiObject] From 692ff3feee9900cd418f0e5a9c1bdff6ac3c53e6 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 31 Jul 2019 23:11:29 +0100 Subject: [PATCH 34/55] Provides a way for no-task processes to resume, but it's no good If the process takes too long to declare itself resumed, we may proceed the coordinator before it resumes. --- .../pew/simulator/PiSimulation.scala | 30 ++++++++----------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 71500235..064b7ad9 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -16,7 +16,8 @@ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override val iname = s"$simulationName.$name" - def ready() = simulationActor ! PiSimulationActor.Waiting(iname) + def virtualWait() = simulationActor ! PiSimulationActor.Waiting(iname) + def virtualResume() = simulationActor ! PiSimulationActor.Resuming(iname) override def simulate[T]( gen: TaskGenerator, @@ -26,7 +27,7 @@ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { val id = java.util.UUID.randomUUID simulationActor ! PiSimulationActor.AddSource(id,iname) val f = simulate(id,gen,result,resources:_*) - ready() + virtualWait() f } @@ -65,18 +66,21 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo println(s"Check: $executorIsReady && $waiting && $taskWaiting = $check == ${executorIsReady & check}") if (executorIsReady && check) ready() } -// def wait(process: String) = { - // println(s"Wait: $process") - // waiting += process - // } def processWaiting(process: String) = { - println(s"Process waiting: $process") + //println(s"Process waiting: $process") if (!waiting.contains(process)) executorIsReady=false taskWaiting += process readyCheck() } + def processResuming(process: String) = { + // println(s"Process resuming: $process") + if (!waiting.contains(process)) executorIsReady=false + taskWaiting.dequeueFirst(_ == process) + readyCheck() + } + override def complete(id: UUID, metrics: TaskMetrics) = { sources.get(id).map { p => taskWaiting.dequeueFirst(_ == p) @@ -93,20 +97,11 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo } } - /* - def processDone(process: String) = { - //println(s"DCheck: $waiting") - if (!waiting.contains(process)) println(s" ERROR ERROR ERROR ERROR ERROR ERROR ERROR ERROR $process ERROR ERROR ERROR ERROR ERROR ERROR ERROR ") - else println(s"Done: $process") - taskWaiting.dequeueFirst( _ == process ) - waiting.dequeueFirst(_ == process) - readyCheck() - } - */ def executorBusy() = executorIsReady = false def piActorReceive: Receive = { case PiSimulationActor.Waiting(p) => processWaiting(p) + case PiSimulationActor.Resuming(p) => processResuming(p) case PiSimulationActor.AddSource(id,iname) => sources += id->iname case PiSimulationActor.ExecutorBusy => executorBusy() case PiSimulationActor.ExecutorReady(i) => executorReady(i) @@ -116,6 +111,7 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo } object PiSimulationActor { case class Waiting(process: String) + case class Resuming(process: String) case class AddSource(id: UUID, iname: String) case object ExecutorBusy case class ExecutorReady(i: PiInstance[_]) From fbbbc217be360288d8ab0f938d63cd5229b76627 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 1 Aug 2019 18:04:14 +0100 Subject: [PATCH 35/55] Minor clean --- .../main/scala/com/workflowfm/pew/simulator/PiSimulation.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 064b7ad9..90c71346 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -63,7 +63,7 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo case Some(_) => true } } - println(s"Check: $executorIsReady && $waiting && $taskWaiting = $check == ${executorIsReady & check}") + //println(s"[${self.path.name}] Check: $executorIsReady && $waiting && $taskWaiting = $check == ${executorIsReady & check}") if (executorIsReady && check) ready() } From 3239ed40ac13dfbc5ba3f5bc9d9ae1b9417509d6 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 2 Aug 2019 03:38:42 +0100 Subject: [PATCH 36/55] Uses SubAkka --- build.sbt | 3 + .../pew/execution/AkkaExecutor.scala | 15 ++-- src/com/workflowfm/pew/stream/PiStream.scala | 71 ++++--------------- 3 files changed, 27 insertions(+), 62 deletions(-) diff --git a/build.sbt b/build.sbt index 2469a300..5c30f9ec 100644 --- a/build.sbt +++ b/build.sbt @@ -35,6 +35,9 @@ libraryDependencies += "org.mongodb.scala" %% "mongo-scala-driver" % "2.2.1" libraryDependencies += "junit" % "junit" % "4.8.2" +libraryDependencies += "uk.ac.ed.inf" %% "subakka" % "0.1-SNAPSHOT" +libraryDependencies += "uk.ac.ed.inf" %% "subakka" % "0.1-SNAPSHOT" % Test classifier "tests" + lazy val skiexample = project .in(file("skiexample")) .settings( diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 96419b50..0258893a 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.execution import akka.actor._ +import akka.event.LoggingReceive import akka.pattern.{ask, pipe} import akka.util.Timeout import com.workflowfm.pew._ @@ -8,6 +9,7 @@ import com.workflowfm.pew.stream.{ PiObservable, PiStream, PiEventHandler, PiSwi import scala.concurrent._ import scala.concurrent.duration._ +import scala.reflect.ClassTag import scala.util.{Failure, Success} import java.util.UUID @@ -22,11 +24,13 @@ class AkkaExecutor ( implicit val timeout:FiniteDuration = 10.seconds ) extends SimulatorExecutor[UUID] with PiObservable[UUID] { + implicit val tag: ClassTag[UUID] = ClassTag(classOf[UUID]) + def this(store:PiInstanceStore[UUID], l:PiProcess*) - (implicit system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration) = + (implicit system: ActorSystem, context: ExecutionContext, timeout: FiniteDuration) = this(store,SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops())) - def this(system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration,l:PiProcess*) = + def this(system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration, l:PiProcess*) = this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops()))(system,context,timeout) def this(l:PiProcess*)(implicit system: ActorSystem) = @@ -74,8 +78,8 @@ class AkkaExecActor( processes:PiProcessStore, atomicExecutor: ActorRef )( - override implicit val system: ActorSystem, - implicit val executionContext: ExecutionContext = ExecutionContext.global + implicit val executionContext: ExecutionContext, + override implicit val tag: ClassTag[PiEvent[UUID]] ) extends Actor with PiStream[UUID] { def init(p:PiProcess,args:Seq[PiObject]):UUID = { @@ -184,7 +188,7 @@ class AkkaExecActor( def simulationReady():Boolean = store.simulationReady - def receive = { + def akkaReceive: Receive = { case AkkaExecutor.Init(p,args) => sender() ! init(p,args) case AkkaExecutor.Start(id) => start(id) case AkkaExecutor.Result(id,ref,res) => postResult(id,ref,res) @@ -199,6 +203,7 @@ class AkkaExecActor( case m => System.err.println("!!! Received unknown message: " + m) } + override def receive = LoggingReceive { publisherBehaviour orElse akkaReceive } } class AkkaAtomicProcessExecutor(implicit val exc: ExecutionContext = ExecutionContext.global) extends Actor { //(executor:ActorRef,p:PiProcess,args:Seq[PiObject]) diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index c04e87e8..b4977f22 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -1,77 +1,34 @@ package com.workflowfm.pew.stream +import akka.actor.ActorRef +import akka.stream.KillSwitch import com.workflowfm.pew.{ PiEvent, PiEventResult } +import scala.concurrent.{ ExecutionContext, Future } +import scala.reflect.ClassTag +import uk.ac.ed.inf.ppapapan.subakka.{ Publisher, Subscriber } -import akka.actor._ -import akka.stream._ -import akka.stream.scaladsl._ -import akka.{ NotUsed, Done } - -import scala.concurrent.{ Future, ExecutionContext } - -/** A [[PiObservable]] implemented using an Akka Stream Source. */ -trait PiSource[T] extends PiObservable[T] { - implicit val materializer:Materializer - - def getSource:Source[PiEvent[T], NotUsed] - - def subscribeAndForget(f:PiEvent[T]=>Unit):Unit = getSource.runForeach(f) - - def subscribe(f:PiEvent[T]=>Unit):PiSwitch = { - val kill = getSource - .viaMat(KillSwitches.single)(Keep.right) - .to(Sink.foreach(f)) - .run() - PiKillSwitch(kill) - } +class PiSubscriber[T](handler: PiEventHandler[T]) extends Subscriber[PiEvent[T]] { + var switch: Option[KillSwitch] = None + override def onInit(publisher: ActorRef, k: KillSwitch): Unit = switch = Some(k) + override def onEvent(event: PiEvent[T]): Unit = if (handler(event)) switch.map(_.shutdown()) } /** A [[PiSource]] that also implements [[PiPublisher]] with Akka Streams. * Uses [[akka.stream.scaladsl.BroadcastHub]], which allows us to clone new sources and attach the * handlers as sinks. */ -trait PiStream[T] extends PiSource[T] with PiPublisher[T] { - implicit val system:ActorSystem - override implicit val materializer = ActorMaterializer() - - private val sourceQueue = Source.queue[PiEvent[T]](PiStream.bufferSize, PiStream.overflowStrategy) - - private val ( - queue: SourceQueueWithComplete[PiEvent[T]], - source: Source[PiEvent[T], NotUsed] - ) = { - val (q,s) = sourceQueue.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run() - s.runWith(Sink.ignore) - (q,s) - } - - override def publish(evt:PiEvent[T]) = queue.offer(evt) - - override def getSource = source - //def subscribe(sink:PiEvent[T]=>Unit):Future[Done] = source.runForeach(sink(_)) +trait PiStream[T] extends PiPublisher[T] with Publisher[PiEvent[T]] with PiObservable[T] { + implicit val tag: ClassTag[PiEvent[T]] + implicit val executionContext: ExecutionContext override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = { - // Using a shared kill switch even though we only want to shutdown one stream, because - // the shared switch is available immediately (pre-materialization) so we can use it - // as a sink. - val killSwitch = KillSwitches.shared("PiEventHandlerKillSwitch") - val x = source - .via(killSwitch.flow) // Use the killSwitch as a flow so we can kill the entire stream. - .map(handler(_)) // Run the events through the handler. - .runForeach { r => if (r) { // if the returned value is true, we need to "unsubscribe" - killSwitch.shutdown() // we could not do this with a regular killSwitch, only with a shared one - } } - Future.successful(PiKillSwitch(killSwitch)) + new PiSubscriber[T](handler).subscribeTo(self)(context.system,tag) map { i => PiKillSwitch(i.killSwitch) } } } -object PiStream { - val bufferSize = 5 - val overflowStrategy = OverflowStrategy.backpressure -} /** A wrapper of [[akka.stream.KillSwitch]] to stop [[PiEventHandler]]s. */ -case class PiKillSwitch(switch:KillSwitch) extends PiSwitch { +case class PiKillSwitch(switch: KillSwitch) extends PiSwitch { override def stop = switch.shutdown() } From 1237dd5a993906b1820f6347afd59df96d590b11 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Fri, 2 Aug 2019 09:36:45 +0100 Subject: [PATCH 37/55] Swaps receive order for PiSimulationActor --- .../main/scala/com/workflowfm/pew/simulator/PiSimulation.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 90c71346..4bc3a61a 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -107,7 +107,7 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo case PiSimulationActor.ExecutorReady(i) => executorReady(i) } - override def receive = piActorReceive orElse simulationActorReceive + override def receive = simulationActorReceive orElse piActorReceive } object PiSimulationActor { case class Waiting(process: String) From 1815b91c619a474f699e976f8657926e979d1e9a Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 6 Aug 2019 11:06:32 +0100 Subject: [PATCH 38/55] Uses SubAkka.HashSetPublisher and SubscriptionSwitch --- src/com/workflowfm/pew/stream/PiStream.scala | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index b4977f22..9e52dc30 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -5,20 +5,20 @@ import akka.stream.KillSwitch import com.workflowfm.pew.{ PiEvent, PiEventResult } import scala.concurrent.{ ExecutionContext, Future } import scala.reflect.ClassTag -import uk.ac.ed.inf.ppapapan.subakka.{ Publisher, Subscriber } +import uk.ac.ed.inf.ppapapan.subakka.{ HashSetPublisher, Subscriber, SubscriptionSwitch } class PiSubscriber[T](handler: PiEventHandler[T]) extends Subscriber[PiEvent[T]] { - var switch: Option[KillSwitch] = None + var switch: Option[SubscriptionSwitch] = None - override def onInit(publisher: ActorRef, k: KillSwitch): Unit = switch = Some(k) - override def onEvent(event: PiEvent[T]): Unit = if (handler(event)) switch.map(_.shutdown()) + override def onInit(publisher: ActorRef, killSwitch: SubscriptionSwitch): Unit = switch = Some(killSwitch) + override def onEvent(event: PiEvent[T]): Unit = if (handler(event)) switch.map(_.stop()) } /** A [[PiSource]] that also implements [[PiPublisher]] with Akka Streams. * Uses [[akka.stream.scaladsl.BroadcastHub]], which allows us to clone new sources and attach the * handlers as sinks. */ -trait PiStream[T] extends PiPublisher[T] with Publisher[PiEvent[T]] with PiObservable[T] { +trait PiStream[T] extends PiPublisher[T] with HashSetPublisher[PiEvent[T]] with PiObservable[T] { implicit val tag: ClassTag[PiEvent[T]] implicit val executionContext: ExecutionContext @@ -29,6 +29,6 @@ trait PiStream[T] extends PiPublisher[T] with Publisher[PiEvent[T]] with PiObser /** A wrapper of [[akka.stream.KillSwitch]] to stop [[PiEventHandler]]s. */ -case class PiKillSwitch(switch: KillSwitch) extends PiSwitch { - override def stop = switch.shutdown() +case class PiKillSwitch(switch: SubscriptionSwitch) extends PiSwitch { + override def stop = switch.stop() } From 260c670fa5d720e7d196fcdf26c99438fc5adf43 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 6 Aug 2019 16:48:10 +0100 Subject: [PATCH 39/55] Cleans up simulator --- .../pew/simulator/PiSimulation.scala | 33 +++---------------- .../pew/simulator/SimulatedPiProcess.scala | 31 +++++++++++++++++ 2 files changed, 36 insertions(+), 28 deletions(-) create mode 100644 simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 4bc3a61a..8dbfffcd 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -1,38 +1,15 @@ package com.workflowfm.pew.simulator import akka.actor.{ ActorRef, Props } -import com.workflowfm.pew.{ MetadataAtomicProcess, PiInstance, PiMetadata, PiObject } import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } -import com.workflowfm.pew.{ AtomicProcess, PiProcess } +import com.workflowfm.pew.{ PiProcess, PiInstance } import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.simulator.metrics.TaskMetrics +import com.workflowfm.simulator.Task import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor, TaskGenerator } import java.util.UUID import scala.collection.mutable.{ Map, Queue } import scala.concurrent.{ ExecutionContext, Future } -trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { - override def isSimulatedProcess = true - - override val iname = s"$simulationName.$name" - - def virtualWait() = simulationActor ! PiSimulationActor.Waiting(iname) - def virtualResume() = simulationActor ! PiSimulationActor.Resuming(iname) - - override def simulate[T]( - gen: TaskGenerator, - result:TaskMetrics => T, - resources:String* - )(implicit executionContext: ExecutionContext):Future[T] = { - val id = java.util.UUID.randomUUID - simulationActor ! PiSimulationActor.AddSource(id,iname) - val f = simulate(id,gen,result,resources:_*) - virtualWait() - f - } - -} - abstract class PiSimulationActor[T] (override val name: String, override val coordinator: ActorRef) (implicit executionContext: ExecutionContext) extends SimulationActor(name, coordinator) { @@ -81,11 +58,11 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo readyCheck() } - override def complete(id: UUID, metrics: TaskMetrics) = { - sources.get(id).map { p => + override def complete(task: Task, time: Long) = { + sources.get(task.id).map { p => taskWaiting.dequeueFirst(_ == p) } - super.complete(id,metrics) + super.complete(task,time) } def executorReady(i: PiInstance[_]) = { diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala new file mode 100644 index 00000000..70e350f8 --- /dev/null +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala @@ -0,0 +1,31 @@ +package com.workflowfm.pew.simulator + +import akka.actor.{ ActorRef, Props } +import com.workflowfm.pew.{ MetadataAtomicProcess, PiInstance, PiMetadata, PiObject } +import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } +import com.workflowfm.pew.{ AtomicProcess, PiProcess } +import com.workflowfm.simulator.{ SimulatedProcess, Task, TaskGenerator } +import java.util.UUID +import scala.concurrent.{ ExecutionContext, Future } + +trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { + override def isSimulatedProcess = true + + override val iname = s"$simulationName.$name" + + def virtualWait() = simulationActor ! PiSimulationActor.Waiting(iname) + def virtualResume() = simulationActor ! PiSimulationActor.Resuming(iname) + + override def simulate[T]( + gen: TaskGenerator, + result: (Task, Long) => T, + resources: String* + )(implicit executionContext: ExecutionContext): Future[T] = { + val id = java.util.UUID.randomUUID + simulationActor ! PiSimulationActor.AddSource(id,iname) + val f = simulate(id,gen,result,resources:_*) + virtualWait() + f + } + +} From 454c2caf6eb914963df2ca7a22e78118969abd36 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 4 Dec 2019 18:39:32 +0000 Subject: [PATCH 40/55] Lets Coordinator know we should wait for resumed processes Also tightens synchronisation when waiting/resuming --- build.sbt | 2 +- .../pew/simulator/PiSimulation.scala | 18 ++++++++++++++---- .../pew/simulator/SimulatedPiProcess.scala | 8 ++++++-- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/build.sbt b/build.sbt index 5c30f9ec..a0596017 100644 --- a/build.sbt +++ b/build.sbt @@ -51,7 +51,7 @@ lazy val simulator = project .settings( commonSettings, name := "pew-simulator", - libraryDependencies += "com.workflowfm" %% "workflowfm-simulator" % "0.2-alpha-SNAPSHOT" + libraryDependencies += "com.workflowfm" %% "wfm-simulator" % "0.2-alpha-SNAPSHOT" ).dependsOn(rootRef) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 8dbfffcd..b2e77145 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -4,12 +4,13 @@ import akka.actor.{ ActorRef, Props } import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } import com.workflowfm.pew.{ PiProcess, PiInstance } import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.simulator.Task +import com.workflowfm.simulator.{ Coordinator, Task } import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor, TaskGenerator } import java.util.UUID import scala.collection.mutable.{ Map, Queue } import scala.concurrent.{ ExecutionContext, Future } + abstract class PiSimulationActor[T] (override val name: String, override val coordinator: ActorRef) (implicit executionContext: ExecutionContext) extends SimulationActor(name, coordinator) { @@ -77,18 +78,27 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo def executorBusy() = executorIsReady = false def piActorReceive: Receive = { - case PiSimulationActor.Waiting(p) => processWaiting(p) - case PiSimulationActor.Resuming(p) => processResuming(p) + case PiSimulationActor.Waiting(p) => { + processWaiting(p) + sender() ! PiSimulationActor.Ack + } + + case PiSimulationActor.Resuming(p) => { + val actor = sender() + processResuming(p) + requestWait(actor) + } case PiSimulationActor.AddSource(id,iname) => sources += id->iname case PiSimulationActor.ExecutorBusy => executorBusy() case PiSimulationActor.ExecutorReady(i) => executorReady(i) } - override def receive = simulationActorReceive orElse piActorReceive + override def receive = LoggingReceive {simulationActorReceive orElse piActorReceive } } object PiSimulationActor { case class Waiting(process: String) case class Resuming(process: String) + case object Ack case class AddSource(id: UUID, iname: String) case object ExecutorBusy case class ExecutorReady(i: PiInstance[_]) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala index 70e350f8..7542cea4 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala @@ -7,14 +7,18 @@ import com.workflowfm.pew.{ AtomicProcess, PiProcess } import com.workflowfm.simulator.{ SimulatedProcess, Task, TaskGenerator } import java.util.UUID import scala.concurrent.{ ExecutionContext, Future } +import akka.pattern.ask +import akka.util.Timeout +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override def isSimulatedProcess = true override val iname = s"$simulationName.$name" - def virtualWait() = simulationActor ! PiSimulationActor.Waiting(iname) - def virtualResume() = simulationActor ! PiSimulationActor.Resuming(iname) + def virtualWait() = (simulationActor ? PiSimulationActor.Waiting(iname))(Timeout(1, TimeUnit.DAYS)) + def virtualResume() = (simulationActor ? PiSimulationActor.Resuming(iname))(Timeout(1, TimeUnit.DAYS)) override def simulate[T]( gen: TaskGenerator, From 279e25f63c20ab327b317912e521ccff6f86cbb5 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 4 Dec 2019 23:59:56 +0000 Subject: [PATCH 41/55] Cleans leftover junk --- .../main/scala/com/workflowfm/pew/simulator/PiSimulation.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index b2e77145..428a639f 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -93,7 +93,7 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo case PiSimulationActor.ExecutorReady(i) => executorReady(i) } - override def receive = LoggingReceive {simulationActorReceive orElse piActorReceive } + override def receive = simulationActorReceive orElse piActorReceive } object PiSimulationActor { case class Waiting(process: String) From 5a480db553ee5b5984a046bff8f7900771ea7e3f Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 5 Dec 2019 16:36:37 +0000 Subject: [PATCH 42/55] Adds a note about the asks used in `SimulatedPiProcess` --- .../com/workflowfm/pew/simulator/SimulatedPiProcess.scala | 3 +++ 1 file changed, 3 insertions(+) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala index 7542cea4..fc48cb05 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala @@ -17,6 +17,9 @@ trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { override val iname = s"$simulationName.$name" + + // TODO We never actually wait for these asks, so there is still a chance the ordering will be messed up + // if the messages are delayed def virtualWait() = (simulationActor ? PiSimulationActor.Waiting(iname))(Timeout(1, TimeUnit.DAYS)) def virtualResume() = (simulationActor ? PiSimulationActor.Resuming(iname))(Timeout(1, TimeUnit.DAYS)) From 0ee8a1d0fc60c2662f5d050cc7ed62fc76f5c9f1 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 5 Dec 2019 17:20:12 +0000 Subject: [PATCH 43/55] Removes unused process stores from executors Closes #56 --- .../pew/execution/AkkaExecutor.scala | 28 ++++++++--------- .../pew/execution/MultiStateExecutor.scala | 21 +++++++------ .../execution/SingleBlockingExecutor.scala | 18 ++++------- .../pew/execution/SingleStateExecutor.scala | 5 +--- .../pew/execution/AkkaExecutorTests.scala | 30 +++++++++---------- .../execution/CompositeExecutionTests.scala | 6 ++-- .../execution/MultiStateExecutorTests.scala | 12 ++++---- .../execution/SingleStateExecutorTests.scala | 12 ++++---- .../workflowfm/pew/metrics/MetricsTests.scala | 4 +-- 9 files changed, 61 insertions(+), 75 deletions(-) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 0258893a..af330d88 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -16,28 +16,23 @@ import java.util.UUID class AkkaExecutor ( store:PiInstanceStore[UUID], - processes:PiProcessStore, atomicExecutor: ActorRef )( implicit val system: ActorSystem, - override implicit val executionContext: ExecutionContext = ExecutionContext.global, - implicit val timeout:FiniteDuration = 10.seconds + implicit val timeout: FiniteDuration ) extends SimulatorExecutor[UUID] with PiObservable[UUID] { implicit val tag: ClassTag[UUID] = ClassTag(classOf[UUID]) + override implicit val executionContext: ExecutionContext = system.dispatcher - def this(store:PiInstanceStore[UUID], l:PiProcess*) - (implicit system: ActorSystem, context: ExecutionContext, timeout: FiniteDuration) = - this(store,SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops())) + def this(store: PiInstanceStore[UUID]) + (implicit system: ActorSystem, timeout: FiniteDuration) = + this(store,system.actorOf(AkkaExecutor.atomicprops())) - def this(system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration, l:PiProcess*) = - this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops()))(system,context,timeout) + def this()(implicit system: ActorSystem, timeout: FiniteDuration = 10.seconds) = + this(SimpleInstanceStore[UUID](),system.actorOf(AkkaExecutor.atomicprops()))(system,timeout) - def this(l:PiProcess*)(implicit system: ActorSystem) = - this(SimpleInstanceStore[UUID](),SimpleProcessStore(l :_*),system.actorOf(AkkaExecutor.atomicprops()))(system,ExecutionContext.global,10.seconds) - - - val execActor = system.actorOf(AkkaExecutor.execprops(store,processes,atomicExecutor)) + val execActor = system.actorOf(AkkaExecutor.execprops(store, atomicExecutor)) implicit val tOut = Timeout(timeout) override def simulationReady = Await.result(execActor ? AkkaExecutor.SimReady,timeout).asInstanceOf[Boolean] @@ -69,13 +64,14 @@ object AkkaExecutor { def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) - def execprops(store:PiInstanceStore[UUID], processes:PiProcessStore, atomicExecutor: ActorRef) - (implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes,atomicExecutor)) + def execprops(store: PiInstanceStore[UUID], atomicExecutor: ActorRef) + (implicit system: ActorSystem): Props = Props( + new AkkaExecActor(store,atomicExecutor)(system.dispatcher, implicitly[ClassTag[PiEvent[UUID]]]) + ) } class AkkaExecActor( var store:PiInstanceStore[UUID], - processes:PiProcessStore, atomicExecutor: ActorRef )( implicit val executionContext: ExecutionContext, diff --git a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala index 1618f224..b7018a4d 100644 --- a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala @@ -14,23 +14,22 @@ import scala.util.{Failure, Success} * promises/futures from the first workflow can trigger changes on the state! */ -class MultiStateExecutor(var store:PiInstanceStore[Int], processes:PiProcessStore) +class MultiStateExecutor(var store: PiInstanceStore[Int]) (override implicit val executionContext: ExecutionContext = ExecutionContext.global) extends SimulatorExecutor[Int] with SimplePiObservable[Int] { - def this(store:PiInstanceStore[Int], l:PiProcess*) = this(store,SimpleProcessStore(l :_*)) - def this(l:PiProcess*) = this(SimpleInstanceStore[Int](),SimpleProcessStore(l :_*)) + def this() = this(SimpleInstanceStore[Int]()) var ctr:Int = 0 - override protected def init(p:PiProcess,args:Seq[PiObject]) = store.synchronized { + override protected def init(p: PiProcess, args: Seq[PiObject]) = store.synchronized { val inst = PiInstance(ctr,p,args:_*) store = store.put(inst) ctr = ctr + 1 Future.successful(ctr-1) } - override def start(id:Int):Unit = store.get(id) match { + override def start(id: Int): Unit = store.get(id) match { case None => publish(PiFailureNoSuchInstance(id)) case Some(inst) => { publish(PiEventStart(inst)) @@ -51,7 +50,7 @@ class MultiStateExecutor(var store:PiInstanceStore[Int], processes:PiProcessStor } } - final def run(id:Int,f:PiInstance[Int]=>PiInstance[Int]):Unit = store.synchronized { + final def run(id: Int, f: PiInstance[Int]=>PiInstance[Int]): Unit = store.synchronized { store.get(id) match { case None => System.err.println("*** [" + id + "] No running instance! ***") case Some(i) => @@ -75,7 +74,7 @@ class MultiStateExecutor(var store:PiInstanceStore[Int], processes:PiProcessStor } } - def handleThread(i:PiInstance[Int])(ref:Int,f:PiFuture):Boolean = { + def handleThread(i: PiInstance[Int])(ref: Int,f: PiFuture): Boolean = { System.err.println("*** [" + i.id + "] Handling thread: " + ref + " (" + f.fun + ")") f match { case PiFuture(name, outChan, args) => i.getProc(name) match { @@ -83,7 +82,7 @@ class MultiStateExecutor(var store:PiInstanceStore[Int], processes:PiProcessStor System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name) false } - case Some(p:MetadataAtomicProcess) => { + case Some(p: MetadataAtomicProcess) => { val objs = args map (_.obj) publish(PiEventCall(i.id,ref,p,objs)) p.runMeta(args map (_.obj)).onComplete{ @@ -96,14 +95,14 @@ class MultiStateExecutor(var store:PiInstanceStore[Int], processes:PiProcessStor System.err.println("*** [" + i.id + "] Called process: " + p.name + " ref:" + ref) true } - case Some(p:CompositeProcess) => { System.err.println("*** [" + i.id + "] Executor encountered composite process thread: " + name); false } // TODO this should never happen! + case Some(p: CompositeProcess) => { System.err.println("*** [" + i.id + "] Executor encountered composite process thread: " + name); false } // TODO this should never happen! } } } - def postResult(id:Int,ref:Int, res:PiObject):Unit = { + def postResult(id: Int, ref: Int, res: PiObject): Unit = { System.err.println("*** [" + id + "] Received result for thread " + ref + " : " + res) run(id,{x => x.postResult(ref, res)}) } - override def simulationReady:Boolean = store.simulationReady + override def simulationReady: Boolean = store.simulationReady } diff --git a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala index 55629f1f..f7d161d0 100644 --- a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala @@ -11,8 +11,8 @@ import scala.annotation.tailrec * SingleBlockingExecutor fully executes one PiProcess from a map of given PiProcesses. * It blocks waiting for every atomic call to finish, so has no concurrency. */ -case class SingleBlockingExecutor(processes:Map[String,PiProcess])(implicit val context:ExecutionContext) { // extends ProcessExecutor[Int] with SimplePiObservable[Int] { - def call(process:PiProcess,args:Seq[PiObject]) = { +case class SingleBlockingExecutor(implicit val context:ExecutionContext) { // extends ProcessExecutor[Int] with SimplePiObservable[Int] { + def call(process:PiProcess, args:Seq[PiObject]) = { val s = process.execState(args) System.err.println(" === INITIAL STATE === \n" + s + "\n === === === === === === === ===") val fs = run(s) @@ -23,18 +23,18 @@ case class SingleBlockingExecutor(processes:Map[String,PiProcess])(implicit val } @tailrec - final def run(s:PiState):PiState = { + final def run(s: PiState): PiState = { val ns = s.fullReduce() if (ns.threads isEmpty) ns else run((ns /: ns.threads)(handleThread)) } - def handleThread(s:PiState, x:(Int,PiFuture)):PiState = x match { case (ref,f) => + def handleThread(s: PiState, x: (Int,PiFuture)): PiState = x match { case (ref,f) => handleThread(ref,f,s) getOrElse s } def handleThread(ref:Int, f:PiFuture, s:PiState):Option[PiState] = f match { - case PiFuture(name, outChan, args) => processes get name match { + case PiFuture(name, outChan, args) => s.processes.get(name) match { case None => { System.err.println("*** ERROR *** Unable to find process: " + name) Some(s removeThread ref) @@ -48,14 +48,8 @@ case class SingleBlockingExecutor(processes:Map[String,PiProcess])(implicit val } } - def withProc(p:PiProcess):SingleBlockingExecutor = copy(processes = processes + (p.name->p)) withProcs (p.dependencies :_*) - def withProcs(l:PiProcess*):SingleBlockingExecutor = (this /: l)(_ withProc _) - //override def simulationReady:Boolean = true def execute(process:PiProcess,args:Seq[Any]):Option[Any] = - this withProc process call(process,args map PiObject.apply) -} -object SingleBlockingExecutor { - def apply()(implicit context:ExecutionContext):SingleBlockingExecutor = SingleBlockingExecutor(Map[String,PiProcess]())(context) + call(process,args map PiObject.apply) } diff --git a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala index 8c8c9560..809fac37 100644 --- a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala @@ -14,11 +14,8 @@ import scala.util.{Failure, Success} * promises/futures from the first workflow can trigger changes on the state! */ -class SingleStateExecutor(processes:PiProcessStore) - (override implicit val executionContext: ExecutionContext = ExecutionContext.global) +class SingleStateExecutor(override implicit val executionContext: ExecutionContext = ExecutionContext.global) extends ProcessExecutor[Int] with SimplePiObservable[Int] { - - def this(l:PiProcess*) = this(SimpleProcessStore(l :_*)) var ctr:Int = 0 var instance:Option[PiInstance[Int]] = None diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index 07111e6d..2605b17e 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -32,7 +32,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute atomic PbI once" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(pbi,Seq(1)) val r1 = await(f1) @@ -40,7 +40,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute atomic PbI twice concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(pbi,Seq(2)) val f2 = ex.execute(pbi,Seq(1)) @@ -51,7 +51,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample once" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(21)) val r1 = await(f1) @@ -59,7 +59,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample once with same timings" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val r1 = await(f1) @@ -67,7 +67,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample twice concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(31)) val f2 = ex.execute(ri,Seq(12)) @@ -78,7 +78,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample twice with same timings concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) @@ -89,7 +89,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample thrice concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) val f3 = ex.execute(ri,Seq(11)) @@ -103,7 +103,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample twice, each with a differnt component" in { - val ex = new AkkaExecutor(pai,pbi,pci,pci2,ri,ri2) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri2,Seq(11)) @@ -115,7 +115,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "handle a failing atomic process" in { val p = new FailP - val ex = new AkkaExecutor(p) + val ex = new AkkaExecutor() val f1 = ex.execute(p,Seq(1)) try { @@ -129,7 +129,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "handle a failing composite process" in { - val ex = new AkkaExecutor(pai,pbi,pcif,rif) + val ex = new AkkaExecutor() val f1 = rif(21)(ex)//ex.execute(rif,Seq(21)) try { @@ -145,7 +145,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample with a CounterHandler" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val factory = new CounterHandlerFactory[UUID] val kill = ex.subscribe(new PrintEventHandler) @@ -157,8 +157,8 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "allow separate handlers per executor" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) - val ex2 = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() + val ex2 = new AkkaExecutor() val factory = new CounterHandlerFactory[UUID] val k1 = ex.subscribe(new PrintEventHandler) val k2 = ex2.subscribe(new PrintEventHandler) @@ -177,7 +177,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "allow separate handlers for separate workflows" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val factory = new CounterHandlerFactory[UUID] val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) @@ -194,7 +194,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "unsubscribe handlers successfully" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val factory = new CounterHandlerFactory[UUID] val kill = ex.subscribe(new PrintEventHandler) kill.map(_.stop) diff --git a/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala b/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala index 3f4994c6..18e9718a 100644 --- a/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala +++ b/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala @@ -13,7 +13,7 @@ class CompositeExecutionTests extends FlatSpec with Matchers { implicit val context:ExecutionContext = ExecutionContext.global "SingleBlockingExecutor" should "execute C1" in { - SingleBlockingExecutor() withProcs (P1,C1) call(C1,Seq(PiPair(PiItem("OH"),PiItem("HAI!")))) should be( Some("OH++HAI!") ) + SingleBlockingExecutor() call(C1,Seq(PiPair(PiItem("OH"),PiItem("HAI!")))) should be( Some("OH++HAI!") ) } object P1 extends AtomicProcess { // X,Y -> (X++Y) @@ -36,7 +36,7 @@ class CompositeExecutionTests extends FlatSpec with Matchers { "SingleBlockingExecutor" should "execute C2" in { - SingleBlockingExecutor() withProcs (P2A,P2B,C2) call(C2,Seq(PiItem("HI:"))) should be( Some("HI:AABB") ) + SingleBlockingExecutor() call(C2,Seq(PiItem("HI:"))) should be( Some("HI:AABB") ) } object P2A extends AtomicProcess { // X -> XAA @@ -69,7 +69,7 @@ class CompositeExecutionTests extends FlatSpec with Matchers { "SingleBlockingExecutor" should "execute C3" in { - SingleBlockingExecutor() withProcs (P3A,P3B,C3) call(C3,Seq(PiItem("HI:"))) should be( Some(("HI:AARR","HI:BB")) ) + SingleBlockingExecutor() call(C3,Seq(PiItem("HI:"))) should be( Some(("HI:AARR","HI:BB")) ) } object P3A extends AtomicProcess { // X -> (XAA,XBB) diff --git a/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala b/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala index fe8736ec..f5e6d786 100644 --- a/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala @@ -25,7 +25,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto val ri = new R(pai,pbi,pci) "MultiStateExecutor" should "execute atomic PbI once" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(pbi,Seq(2)) val r1 = await(f1) @@ -33,7 +33,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute atomic PbI twice concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(pbi,Seq(2)) val f2 = ex.execute(pbi,Seq(1)) @@ -44,7 +44,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample once" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(21)) val r1 = await(f1) @@ -52,7 +52,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample twice concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(31)) val f2 = ex.execute(ri,Seq(12)) @@ -63,7 +63,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample twice with same timings concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) @@ -74,7 +74,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample thrice concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) val f3 = ex.execute(ri,Seq(11)) diff --git a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala index c212fde3..1e1c0867 100644 --- a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala @@ -28,14 +28,14 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut val rif = new R(pai,pbi,pcif) "SingleStateExecutor" should "execute Rexample concurrently" in { - val executor = new SingleStateExecutor(pai,pbi,pci,ri) + val executor = new SingleStateExecutor() executor.subscribe(new PrintEventHandler) exe(executor,ri,13)//.isEmpty should be( false ) - //exe(new SingleStateExecutor(pai,pbi,pci,ri),ri,31)//.isEmpty should be( false ) + //exe(new SingleStateExecutor(),ri,31)//.isEmpty should be( false ) } "SingleStateExecutor" should "handle a failing component process" in { - val ex = new SingleStateExecutor(pai,pbi,pcif,rif) + val ex = new SingleStateExecutor() val f1 = rif(21)(ex)//ex.execute(rif,Seq(21)) try { @@ -54,7 +54,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut // } "SingleStateExecutor" should "execute C1" in { - val executor = new SingleStateExecutor(P1,C1) + val executor = new SingleStateExecutor() exe(executor,C1,("OH","HAI!")) should be( "OH++HAI!" ) } @@ -78,7 +78,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut "SingleStateExecutor" should "execute C2" in { - exe(new SingleStateExecutor(P2A,P2B,C2),C2,"HI:") should be( "HI:AABB" ) + exe(new SingleStateExecutor(),C2,"HI:") should be( "HI:AABB" ) } object P2A extends AtomicProcess { // X -> XAA @@ -111,7 +111,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut "SingleStateExecutor" should "execute C3" in { - exe(new SingleStateExecutor(P3A,P3B,C3),C3,"HI:") should be( ("HI:AARR","HI:BB") ) + exe(new SingleStateExecutor(),C3,"HI:") should be( ("HI:AARR","HI:BB") ) } object P3A extends AtomicProcess { // X -> (XAA,XBB) diff --git a/test/com/workflowfm/pew/metrics/MetricsTests.scala b/test/com/workflowfm/pew/metrics/MetricsTests.scala index 9f7d1720..00af521f 100644 --- a/test/com/workflowfm/pew/metrics/MetricsTests.scala +++ b/test/com/workflowfm/pew/metrics/MetricsTests.scala @@ -37,7 +37,7 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr it should "measure things" in { val handler = new MetricsHandler[UUID] - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val k1 = ex.subscribe(handler) val f1 = ex.execute(ri,Seq(11)) @@ -54,7 +54,7 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr it should "output a D3 timeline of 3 Rexample workflows" in { val handler = new MetricsHandler[UUID] - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val k1 = ex.subscribe(handler) val f1 = ex.execute(ri,Seq(11)) From 55a34eccf8d3cf90518ffc5ae2d79e59a603d1a3 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Sat, 7 Dec 2019 19:17:53 +0000 Subject: [PATCH 44/55] Updates executors to allow intermediate `PiInstance` execution (#58) Also allows swapping of the process map in a `PiInstance`/`PiState`. Closes #44 --- src/com/workflowfm/pew/PiInstance.scala | 5 +- src/com/workflowfm/pew/PiState.scala | 3 + .../pew/execution/AkkaExecutor.scala | 24 ++-- .../pew/execution/MultiStateExecutor.scala | 119 +++++++++--------- .../pew/execution/ProcessExecutor.scala | 69 ++++++++-- .../pew/execution/SingleStateExecutor.scala | 9 +- .../pew/mongodb/MongoExecutor.scala | 10 +- .../kafka/MinimalKafkaExecutor.scala | 14 +-- .../pew/execution/AkkaExecutorTests.scala | 13 +- .../pew/stateless/KafkaExecutorTests.scala | 2 +- 10 files changed, 165 insertions(+), 103 deletions(-) diff --git a/src/com/workflowfm/pew/PiInstance.scala b/src/com/workflowfm/pew/PiInstance.scala index c86d9d63..557c45f5 100644 --- a/src/com/workflowfm/pew/PiInstance.scala +++ b/src/com/workflowfm/pew/PiInstance.scala @@ -58,7 +58,10 @@ case class PiInstance[T](final val id:T, called:Seq[Int], process:PiProcess, sta } def getCalledProcesses: Seq[PiProcess] = state.threads flatMap { f => getProc(f._2.fun) } toSeq - + + def updateProcs(m: Map[String,PiProcess]) = copy(state = state.updateProcs(m)) + def updateProcs(f: PiProcess => PiProcess) = copy(state = state.updateProcs(f)) + /** * Should the simulator wait for the workflow? */ diff --git a/src/com/workflowfm/pew/PiState.scala b/src/com/workflowfm/pew/PiState.scala index 6af99604..54d7ff59 100644 --- a/src/com/workflowfm/pew/PiState.scala +++ b/src/com/workflowfm/pew/PiState.scala @@ -47,6 +47,9 @@ case class PiState(inputs:Map[Chan,Input], outputs:Map[Chan,Output], calls:List[ def withProc(p:PiProcess) = copy(processes = processes + (p.name->p)) def withProcs(l:PiProcess*) = (this /: l)(_ withProc _) + def updateProcs(m: Map[String,PiProcess]) = copy(processes = m) + def updateProcs(f: PiProcess => PiProcess) = copy(processes = processes.mapValues(f)) + def withCalls(l:PiFuture*) = copy(calls = l.toList ++ calls) def withThread(ref:Int, name:String, chan:String, args:Seq[PiResource]) = withThreads((ref,PiFuture(name,Chan(chan),args))) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index af330d88..be1b3038 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -36,18 +36,18 @@ class AkkaExecutor ( implicit val tOut = Timeout(timeout) override def simulationReady = Await.result(execActor ? AkkaExecutor.SimReady,timeout).asInstanceOf[Boolean] - - override protected def init(process:PiProcess,args:Seq[PiObject]):Future[UUID] = - execActor ? AkkaExecutor.Init(process,args) map (_.asInstanceOf[UUID]) - - override protected def start(id:UUID) = execActor ! AkkaExecutor.Start(id) - override def subscribe(handler:PiEventHandler[UUID]):Future[PiSwitch] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[PiSwitch] + override protected def init(instance: PiInstance[_]): Future[UUID] = + execActor ? AkkaExecutor.Init(instance) map (_.asInstanceOf[UUID]) + + override protected def start(id: UUID) = execActor ! AkkaExecutor.Start(id) + + override def subscribe(handler: PiEventHandler[UUID]): Future[PiSwitch] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[PiSwitch] } object AkkaExecutor { - case class Init(p:PiProcess,args:Seq[PiObject]) + case class Init(instance: PiInstance[_]) case class Start(id:UUID) case class Result(id:UUID,ref:Int,res:MetadataAtomicProcess.MetadataAtomicResult) case class Error(id:UUID,ref:Int,ex:Throwable) @@ -78,11 +78,10 @@ class AkkaExecActor( override implicit val tag: ClassTag[PiEvent[UUID]] ) extends Actor with PiStream[UUID] { - def init(p:PiProcess,args:Seq[PiObject]):UUID = { + def init(instance: PiInstance[_]): UUID = { val id = java.util.UUID.randomUUID - val inst = PiInstance(id,p,args:_*) - store = store.put(inst) - id + store = store.put(instance.copy(id = id)) + id } def start(id:UUID):Unit = store.get(id) match { @@ -185,7 +184,7 @@ class AkkaExecActor( def simulationReady():Boolean = store.simulationReady def akkaReceive: Receive = { - case AkkaExecutor.Init(p,args) => sender() ! init(p,args) + case AkkaExecutor.Init(inst) => sender() ! init(inst) case AkkaExecutor.Start(id) => start(id) case AkkaExecutor.Result(id,ref,res) => postResult(id,ref,res) case AkkaExecutor.Error(id,ref,ex) => { @@ -205,7 +204,6 @@ class AkkaExecActor( class AkkaAtomicProcessExecutor(implicit val exc: ExecutionContext = ExecutionContext.global) extends Actor { //(executor:ActorRef,p:PiProcess,args:Seq[PiObject]) def receive = { case AkkaExecutor.ACall(id,ref,p,args,actor) => { - //System.err.println("*** [" + id + "] Calling atomic process: " + p.name + " ref:" + ref) p.runMeta(args).onComplete{ case Success(res) => actor ! AkkaExecutor.Result(id,ref,res) case Failure(ex) => actor ! AkkaExecutor.Error(id,ref,ex) diff --git a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala index b7018a4d..cae7e03c 100644 --- a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala @@ -7,12 +7,12 @@ import scala.concurrent._ import scala.util.{Failure, Success} /** - * Executes any PiProcess asynchronously. - * Only holds a single state, so can only execute one workflow at a time. - * - * Running a second workflow after one has finished executing can be risky because - * promises/futures from the first workflow can trigger changes on the state! - */ + * Executes any PiProcess asynchronously. + * Only holds a single state, so can only execute one workflow at a time. + * + * Running a second workflow after one has finished executing can be risky because + * promises/futures from the first workflow can trigger changes on the state! + */ class MultiStateExecutor(var store: PiInstanceStore[Int]) (override implicit val executionContext: ExecutionContext = ExecutionContext.global) @@ -21,12 +21,11 @@ class MultiStateExecutor(var store: PiInstanceStore[Int]) def this() = this(SimpleInstanceStore[Int]()) var ctr:Int = 0 - - override protected def init(p: PiProcess, args: Seq[PiObject]) = store.synchronized { - val inst = PiInstance(ctr,p,args:_*) - store = store.put(inst) - ctr = ctr + 1 - Future.successful(ctr-1) + + override protected def init(instance: PiInstance[_]): Future[Int] = store.synchronized { + store = store.put(instance.copy(id = ctr)) + ctr = ctr + 1 + Future.successful(ctr-1) } override def start(id: Int): Unit = store.get(id) match { @@ -35,74 +34,74 @@ class MultiStateExecutor(var store: PiInstanceStore[Int]) publish(PiEventStart(inst)) val ni = inst.reduce if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(id) - } + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(id) + } } else { - val (_,resi) = ni.handleThreads(handleThread(ni)) - store = store.put(resi) + val (_,resi) = ni.handleThreads(handleThread(ni)) + store = store.put(resi) } } } - + final def run(id: Int, f: PiInstance[Int]=>PiInstance[Int]): Unit = store.synchronized { - store.get(id) match { + store.get(id) match { case None => System.err.println("*** [" + id + "] No running instance! ***") - case Some(i) => + case Some(i) => if (i.id != id) System.err.println("*** [" + id + "] Different instance ID encountered: " + i.id) else { System.err.println("*** [" + id + "] Running!") val ni = f(i).reduce - if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(ni.id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(ni.id) - } - } else { - store = store.put(ni.handleThreads(handleThread(ni))._2) - } + if (ni.completed) ni.result match { + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(ni.id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(ni.id) + } + } else { + store = store.put(ni.handleThreads(handleThread(ni))._2) + } } - } + } } - + def handleThread(i: PiInstance[Int])(ref: Int,f: PiFuture): Boolean = { - System.err.println("*** [" + i.id + "] Handling thread: " + ref + " (" + f.fun + ")") + System.err.println("*** [" + i.id + "] Handling thread: " + ref + " (" + f.fun + ")") f match { - case PiFuture(name, outChan, args) => i.getProc(name) match { - case None => { - System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name) - false - } - case Some(p: MetadataAtomicProcess) => { - val objs = args map (_.obj) - publish(PiEventCall(i.id,ref,p,objs)) - p.runMeta(args map (_.obj)).onComplete{ - case Success(res) => { - publish(PiEventReturn(i.id,ref,PiObject.get(res._1),res._2)) - postResult(i.id,ref,res._1) + case PiFuture(name, outChan, args) => i.getProc(name) match { + case None => { + System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name) + false + } + case Some(p: MetadataAtomicProcess) => { + val objs = args map (_.obj) + publish(PiEventCall(i.id,ref,p,objs)) + p.runMeta(args map (_.obj)).onComplete{ + case Success(res) => { + publish(PiEventReturn(i.id,ref,PiObject.get(res._1),res._2)) + postResult(i.id,ref,res._1) + } + case Failure (ex) => publish(PiFailureAtomicProcessException(i.id,ref,ex)) } - case Failure (ex) => publish(PiFailureAtomicProcessException(i.id,ref,ex)) + System.err.println("*** [" + i.id + "] Called process: " + p.name + " ref:" + ref) + true } - System.err.println("*** [" + i.id + "] Called process: " + p.name + " ref:" + ref) - true + case Some(p: CompositeProcess) => { System.err.println("*** [" + i.id + "] Executor encountered composite process thread: " + name); false } // TODO this should never happen! } - case Some(p: CompositeProcess) => { System.err.println("*** [" + i.id + "] Executor encountered composite process thread: " + name); false } // TODO this should never happen! - } - } } - + } } + def postResult(id: Int, ref: Int, res: PiObject): Unit = { System.err.println("*** [" + id + "] Received result for thread " + ref + " : " + res) run(id,{x => x.postResult(ref, res)}) } - + override def simulationReady: Boolean = store.simulationReady } diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index 2aeab721..275c0737 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -28,16 +28,27 @@ case class AtomicProcessExecutor(process:AtomicProcess) { /** * Trait representing the ability to execute any PiProcess */ -trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => +trait ProcessExecutor[KeyT] { this: PiObservable[KeyT] => /** - * Initializes a PiInstance for a process execution. + * Initializes a PiProcess call for a process execution. * This is always and only invoked before a {@code start}, hence why it is protected. * This separation gives a chance to PiEventHandlers to subscribe before execution starts. * @param process The (atomic or composite) PiProcess to be executed * @param args The PiObject arguments to be passed to the process * @return A Future with the new unique ID that was generated */ - protected def init(process:PiProcess,args:Seq[PiObject]):Future[KeyT] + protected def init(process: PiProcess, args: Seq[PiObject]): Future[KeyT] = init(PiInstance(0, process, args: _*)) + + /** + * Initializes a PiInstance for a process execution. + * A new ID will be generated for the PiInstance to ensure freshness. + * This is always and only invoked before a {@code start}, hence why it is protected. + * This separation gives a chance to PiEventHandlers to subscribe before execution starts. + * @param instance The PiInstance to be executed + * @return A Future with the new unique ID that was generated + */ + protected def init(instance: PiInstance[_]): Future[KeyT] + /** * Starts the execution of an initialized PiInstance. @@ -55,10 +66,20 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => * @param args The (real) arguments to be passed to the process * @return A Future with the ID corresponding to this execution */ - def call(process:PiProcess,args:Seq[Any]):Future[KeyT] = { + def call(process: PiProcess, args: Seq[Any]): Future[KeyT] = { init(process,args map PiObject.apply) map { id => start(id) ; id } } + /** + * A simple {@code init ; start} sequence when we do not need any even listeners. + * A new ID will be generated for the PiInstance to ensure freshness. + * @param instance The PiInstance to be executed + * @return A Future with the ID corresponding to this execution + */ + def call(instance: PiInstance[_]): Future[KeyT] = { + init(instance) map { id => start(id) ; id } + } + /** * A {@code init ; start} sequence that gives us a chance to subscribe a listener * that is specific to this execution. @@ -67,7 +88,11 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => * @param factory A PiEventHandlerFactory which generates PiEventHandler's for a given ID * @return A Future with the PiEventHandler that was generated */ - def call[H <: PiEventHandler[KeyT]](process:PiProcess,args:Seq[Any],factory:PiEventHandlerFactory[KeyT,H]):Future[H] = { + def call[H <: PiEventHandler[KeyT]] ( + process: PiProcess, + args: Seq[Any], + factory: PiEventHandlerFactory[KeyT,H] + ): Future[H] = { init(process,args map PiObject.apply) flatMap { id => val handler = factory.build(id) subscribe(handler).map { _ => @@ -77,14 +102,44 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => } } + /** + * A {@code init ; start} sequence that gives us a chance to subscribe a listener + * that is specific to this execution. + * A new ID will be generated for the PiInstance to ensure freshness. + * @param instance The PiInstance to be executed + * @param factory A PiEventHandlerFactory which generates PiEventHandler's for a given ID + * @return A Future with the PiEventHandler that was generated + */ + def call[H <: PiEventHandler[KeyT]] + (instance: PiInstance[_], + factory: PiEventHandlerFactory[KeyT,H] + ): Future[H] = { + init(instance) flatMap { id => + val handler = factory.build(id) + subscribe(handler).map { _ => + start(id) + handler + } + } + } + /** * Executes a process with a PromiseHandler + * @param instance The PiInstance to be executed + * @return A Future with the result of the executed process + */ + def execute(process: PiProcess, args: Seq[Any]): Future[Any] = + call(process, args, new ResultHandlerFactory[KeyT]) flatMap (_.future) + + /** + * Executes a PiInstance with a PromiseHandler + * A new ID will be generated for the PiInstance to ensure freshness. * @param process The (atomic or composite) PiProcess to be executed * @param args The (real) arguments to be passed to the process * @return A Future with the result of the executed process */ - def execute(process:PiProcess,args:Seq[Any]):Future[Any] = - call(process,args,new ResultHandlerFactory[KeyT]) flatMap (_.future) + def execute(instance: PiInstance[_]): Future[Any] = + call(instance, new ResultHandlerFactory[KeyT]) flatMap (_.future) } object ProcessExecutor { diff --git a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala index 809fac37..b349b734 100644 --- a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala @@ -19,12 +19,11 @@ class SingleStateExecutor(override implicit val executionContext: ExecutionConte var ctr:Int = 0 var instance:Option[PiInstance[Int]] = None - - override protected def init(p:PiProcess,args:Seq[PiObject]):Future[Int] = Future { - if (instance.isDefined) throw new ProcessExecutor.AlreadyExecutingException() + + override protected def init(instance: PiInstance[_]): Future[Int] = Future { + if (this.instance.isDefined) throw new ProcessExecutor.AlreadyExecutingException() else { - val inst = PiInstance(ctr,p,args:_*) - instance = Some(inst) + this.instance = Some(instance.copy(id = ctr)) ctr = ctr + 1 ctr - 1 } diff --git a/src/com/workflowfm/pew/mongodb/MongoExecutor.scala b/src/com/workflowfm/pew/mongodb/MongoExecutor.scala index 541f76ca..ed3d191c 100644 --- a/src/com/workflowfm/pew/mongodb/MongoExecutor.scala +++ b/src/com/workflowfm/pew/mongodb/MongoExecutor.scala @@ -32,11 +32,11 @@ import org.mongodb.scala.ReadConcern import org.mongodb.scala.WriteConcern class MongoExecutor - (client:MongoClient, db:String, collection:String, processes:PiProcessStore) + (client: MongoClient, db: String, collection: String, processes: PiProcessStore) (implicit val executionContext: ExecutionContext = ExecutionContext.global) extends ProcessExecutor[ObjectId] with SimplePiObservable[ObjectId] { - def this(client:MongoClient, db:String, collection:String, l:PiProcess*) = this(client,db,collection,SimpleProcessStore(l :_*)) + def this(client: MongoClient, db: String, collection: String, l: PiProcess*) = this(client,db,collection,SimpleProcessStore(l :_*)) final val CAS_MAX_ATTEMPTS = 10 final val CAS_WAIT_MS = 1 @@ -46,11 +46,11 @@ class MongoExecutor withCodecRegistry(codecRegistry). //withReadConcern(ReadConcern.LINEARIZABLE). withWriteConcern(WriteConcern.MAJORITY); - val col:MongoCollection[PiInstance[ObjectId]] = database.getCollection(collection) + val col: MongoCollection[PiInstance[ObjectId]] = database.getCollection(collection) - override def init(p:PiProcess,args:Seq[PiObject]):Future[ObjectId] = { + override def init(instance: PiInstance[_]): Future[ObjectId] = { val oid = new ObjectId - val inst = PiInstance(oid,p,args:_*) + val inst = instance.copy(id = oid) col.insertOne(inst).toFuture() map (_=>oid) } diff --git a/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala b/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala index c2a23a14..14846512 100644 --- a/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala +++ b/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala @@ -34,19 +34,13 @@ class MinimalKafkaExecutor( implicit val environment: KafkaExecutorEnvironment ) protected var piiStore: PiInstanceStore[ObjectId] = SimpleInstanceStore() - /** - * Initializes a PiInstance for a process execution. - * This is always and only invoked before a {@code start}, hence why it is protected. - * This separation gives a chance to PiEventHandlers to subscribe before execution starts. - * @param process The (atomic or composite) PiProcess to be executed - * @param args The PiObject arguments to be passed to the process - * @return A Future with the new unique ID that was generated - */ - override def init( process: PiProcess, args: Seq[PiObject] ): Future[ObjectId] = { + + + override def init( instance: PiInstance[_] ): Future[ObjectId] = { if (isShutdown) throw new ShutdownExecutorException( "`init` was called." ) val piiId = ObjectId.get - piiStore = piiStore.put( PiInstance( piiId, process, args:_* ) ) + piiStore = piiStore.put( instance.copy(id = piiId) ) Future.successful( piiId ) } diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index 2605b17e..76dc4955 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -102,7 +102,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi r3 should be (("PbISleptFor1s","PcISleptFor1s")) } - it should "execute Rexample twice, each with a differnt component" in { + it should "execute Rexample twice, each with a different component" in { val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri2,Seq(11)) @@ -210,5 +210,16 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi val r2 = await(f2) r2 should be (11) } + + + it should "execute a reduced Rexample instance" in { + val ex = new AkkaExecutor() + + val instance = PiInstance(0,ri,PiObject(11)).reduce.postResult(0, PiObject((1,2))).reduce + val f1 = ex.execute(instance) + + val r1 = await(f1) + r1 should be (("PbISleptFor1s","PcISleptFor2s")) + } } diff --git a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala index d90fc129..fe5cd3f5 100644 --- a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala +++ b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala @@ -289,7 +289,7 @@ class KafkaExecutorTests } def baremetalCall( ex: CustomKafkaExecutor, p: PiProcess, args: PiObject* ): Future[Any] = { - val piiId = await( ex.init( p, args.toSeq ) ) + val piiId = await( ex.init( PiInstance(0, p, args:_* ) ) ) val handler = new ResultHandler(piiId) ex.subscribe(handler) From 81c3a03c062e23b89d292e93d198abfb8e735047 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Sat, 7 Dec 2019 19:47:24 +0000 Subject: [PATCH 45/55] Cleans up more simulation stuff from main library Things like `SimulationExecutor`, `PiInstance.isSimulationReady` etc are no longer needed. --- ...Process.scala => PiSimulatedProcess.scala} | 5 +--- .../pew/simulator/PiSimulation.scala | 6 ++--- src/com/workflowfm/pew/PiInstance.scala | 17 +------------ src/com/workflowfm/pew/PiProcess.scala | 7 ------ .../pew/execution/AkkaExecutor.scala | 10 ++------ .../pew/execution/MultiStateExecutor.scala | 4 +--- .../pew/execution/ProcessExecutor.scala | 24 ------------------- .../execution/SingleBlockingExecutor.scala | 2 -- .../pew/execution/SingleStateExecutor.scala | 5 ---- 9 files changed, 8 insertions(+), 72 deletions(-) rename simulator/src/main/scala/com/workflowfm/pew/simulator/{SimulatedPiProcess.scala => PiSimulatedProcess.scala} (92%) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulatedProcess.scala similarity index 92% rename from simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala rename to simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulatedProcess.scala index fc48cb05..78476e9f 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/SimulatedPiProcess.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulatedProcess.scala @@ -12,12 +12,9 @@ import akka.util.Timeout import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration -trait SimulatedPiProcess extends AtomicProcess with SimulatedProcess { - override def isSimulatedProcess = true - +trait PiSimulatedProcess extends AtomicProcess with SimulatedProcess { override val iname = s"$simulationName.$name" - // TODO We never actually wait for these asks, so there is still a chance the ordering will be messed up // if the messages are delayed def virtualWait() = (simulationActor ? PiSimulationActor.Waiting(iname))(Timeout(1, TimeUnit.DAYS)) diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala index 428a639f..ed38f1ee 100644 --- a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -3,7 +3,7 @@ package com.workflowfm.pew.simulator import akka.actor.{ ActorRef, Props } import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } import com.workflowfm.pew.{ PiProcess, PiInstance } -import com.workflowfm.pew.execution.SimulatorExecutor +import com.workflowfm.pew.execution.ProcessExecutor import com.workflowfm.simulator.{ Coordinator, Task } import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor, TaskGenerator } import java.util.UUID @@ -25,7 +25,7 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList - def executor: SimulatorExecutor[T] + def executor: ProcessExecutor[T] val factory = new PiSimHandlerFactory[T](self) override def run(): Future[Any] = { @@ -68,7 +68,7 @@ abstract class PiSimulationActor[T] (override val name: String, override val coo def executorReady(i: PiInstance[_]) = { val procs = i.getCalledProcesses - if (procs.forall(_.isSimulatedProcess)) { + if (procs.forall(_.isInstanceOf[PiSimulatedProcess])) { waiting = procs map (_.iname) executorIsReady = true readyCheck() diff --git a/src/com/workflowfm/pew/PiInstance.scala b/src/com/workflowfm/pew/PiInstance.scala index 557c45f5..57f2cfec 100644 --- a/src/com/workflowfm/pew/PiInstance.scala +++ b/src/com/workflowfm/pew/PiInstance.scala @@ -61,18 +61,6 @@ case class PiInstance[T](final val id:T, called:Seq[Int], process:PiProcess, sta def updateProcs(m: Map[String,PiProcess]) = copy(state = state.updateProcs(m)) def updateProcs(f: PiProcess => PiProcess) = copy(state = state.updateProcs(f)) - - /** - * Should the simulator wait for the workflow? - */ - def simulationReady:Boolean = - if (completed) true // workflow is done - else { - val procs = state.threads flatMap { f => getProc(f._2.fun) } - if (procs.isEmpty) false // workflow is not completed, so we either couldn't find a process with getProc or - // calls have not been converted to threads yet (so no fullreduce) for whatever reason - else procs.forall(_.isSimulatedProcess) // are all open threads simulated processes? - } } object PiInstance { def apply[T](id:T,p:PiProcess,args:PiObject*):PiInstance[T] = PiInstance(id, Seq(), p, p.execState(args)) @@ -90,14 +78,12 @@ trait PiInstanceStore[T] { def get(id:T):Option[PiInstance[T]] def put(i:PiInstance[T]):PiInstanceStore[T] def del(id:T):PiInstanceStore[T] - def simulationReady:Boolean } trait PiInstanceMutableStore[T] { def get(id:T):Option[PiInstance[T]] def put(i:PiInstance[T]):Unit def del(id:T):Unit - def simulationReady:Boolean } case class SimpleInstanceStore[T](m: Map[T,PiInstance[T]]) extends PiInstanceStore[T] { @@ -105,8 +91,7 @@ case class SimpleInstanceStore[T](m: Map[T,PiInstance[T]]) extends PiInstanceSto override def get(id: T): Option[PiInstance[T]] = m.get(id) override def put(i: PiInstance[T]): SimpleInstanceStore[T] = copy(m = m + (i.id->i)) override def del(id: T): SimpleInstanceStore[T] = copy(m = m - id) - - override def simulationReady: Boolean = m.values.forall(_.simulationReady) + /*{ m.values.foreach { i => { val procs = i.state.threads.values.map(_.fun).mkString(", ") diff --git a/src/com/workflowfm/pew/PiProcess.scala b/src/com/workflowfm/pew/PiProcess.scala index 8d86c5d0..85589090 100644 --- a/src/com/workflowfm/pew/PiProcess.scala +++ b/src/com/workflowfm/pew/PiProcess.scala @@ -55,13 +55,6 @@ sealed trait PiProcess { * * Shortcut to create entries in name->PiProcess maps using the instance name */ def toIEntry:(String,PiProcess) = iname->this - - /** - * This is used to identify simulation processes that need (virtual) time to complete. - * If a process is not a simulation process, then the simulator needs to wait for it to complete before - * the next virtual tick. - */ - def isSimulatedProcess = false } object PiProcess { def allDependenciesOf(p:PiProcess):Seq[PiProcess] = diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index be1b3038..cc52aae2 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -20,7 +20,7 @@ class AkkaExecutor ( )( implicit val system: ActorSystem, implicit val timeout: FiniteDuration -) extends SimulatorExecutor[UUID] with PiObservable[UUID] { +) extends ProcessExecutor[UUID] with PiObservable[UUID] { implicit val tag: ClassTag[UUID] = ClassTag(classOf[UUID]) override implicit val executionContext: ExecutionContext = system.dispatcher @@ -34,8 +34,6 @@ class AkkaExecutor ( val execActor = system.actorOf(AkkaExecutor.execprops(store, atomicExecutor)) implicit val tOut = Timeout(timeout) - - override def simulationReady = Await.result(execActor ? AkkaExecutor.SimReady,timeout).asInstanceOf[Boolean] override protected def init(instance: PiInstance[_]): Future[UUID] = execActor ? AkkaExecutor.Init(instance) map (_.asInstanceOf[UUID]) @@ -58,7 +56,6 @@ object AkkaExecutor { case class AFuture(f:Future[Any]) case object Ping - case object SimReady case class Subscribe(handler:PiEventHandler[UUID]) @@ -180,9 +177,7 @@ class AkkaExecActor( } } } } - - def simulationReady():Boolean = store.simulationReady - + def akkaReceive: Receive = { case AkkaExecutor.Init(inst) => sender() ! init(inst) case AkkaExecutor.Start(id) => start(id) @@ -193,7 +188,6 @@ class AkkaExecActor( } case AkkaExecutor.Ping => sender() ! AkkaExecutor.Ping case AkkaExecutor.AckCall => Unit - case AkkaExecutor.SimReady => sender() ! simulationReady() case AkkaExecutor.Subscribe(h) => subscribe(h) pipeTo sender() case m => System.err.println("!!! Received unknown message: " + m) } diff --git a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala index cae7e03c..d3ae1e8d 100644 --- a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala @@ -16,7 +16,7 @@ import scala.util.{Failure, Success} class MultiStateExecutor(var store: PiInstanceStore[Int]) (override implicit val executionContext: ExecutionContext = ExecutionContext.global) - extends SimulatorExecutor[Int] with SimplePiObservable[Int] { + extends ProcessExecutor[Int] with SimplePiObservable[Int] { def this() = this(SimpleInstanceStore[Int]()) @@ -102,6 +102,4 @@ class MultiStateExecutor(var store: PiInstanceStore[Int]) System.err.println("*** [" + id + "] Received result for thread " + ref + " : " + res) run(id,{x => x.postResult(ref, res)}) } - - override def simulationReady: Boolean = store.simulationReady } diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index 275c0737..99244a59 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -146,27 +146,3 @@ object ProcessExecutor { final case class AlreadyExecutingException(private val cause: Throwable = None.orNull) extends Exception("Unable to execute more than one process at a time", cause) } - -trait SimulatorExecutor[KeyT] extends ProcessExecutor[KeyT] { this:PiObservable[KeyT] => - /** - * This should check all executing PiInstances if they are simulationReady. - * This means that all possible execution has been performed and they are all - * waiting for simulation time to pass. - * @return true if all PiInstances are simulationReady - */ - def simulationReady:Boolean - - /** - * Executes a process with a ResultHandler - * Same as ProcessExecutor.execute but blocks until call has been initiated. - * The simulator needs to ensure this has happened before continuing. - * @param process The (atomic or composite) PiProcess to be executed - * @param args The (real) arguments to be passed to the process - * @return A Future with the result of the executed process - */ - def simulate(process:PiProcess,args:Seq[Any],timeout:FiniteDuration=10.seconds):Future[Any] = { - val f = call(process,args,new ResultHandlerFactory[KeyT]) - val handler = Await.result(f, timeout) - handler.future - } -} diff --git a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala index f7d161d0..595f0d12 100644 --- a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala @@ -48,8 +48,6 @@ case class SingleBlockingExecutor(implicit val context:ExecutionContext) { // ex } } - //override def simulationReady:Boolean = true - def execute(process:PiProcess,args:Seq[Any]):Option[Any] = call(process,args map PiObject.apply) } diff --git a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala index b349b734..eb5fba7d 100644 --- a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala @@ -104,10 +104,5 @@ class SingleStateExecutor(override implicit val executionContext: ExecutionConte } } } - - def simulationReady:Boolean = instance match { - case None => true - case Some(i) => i.simulationReady - } } From d3d8a6a556f95f8164dadb6be789411224d9bc8c Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 12 Dec 2019 17:40:05 +0000 Subject: [PATCH 46/55] Updates wfm-simulator dependency to version 0.2 --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index a0596017..0880c97a 100644 --- a/build.sbt +++ b/build.sbt @@ -51,7 +51,7 @@ lazy val simulator = project .settings( commonSettings, name := "pew-simulator", - libraryDependencies += "com.workflowfm" %% "wfm-simulator" % "0.2-alpha-SNAPSHOT" + libraryDependencies += "com.workflowfm" %% "wfm-simulator" % "0.2" ).dependsOn(rootRef) From ebca1e7c9680fa48409c1b34b62db8ed061f8561 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Mon, 16 Dec 2019 12:45:55 +0000 Subject: [PATCH 47/55] Makes `PiProcess` dependencies vals for efficiency --- src/com/workflowfm/pew/PiProcess.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/com/workflowfm/pew/PiProcess.scala b/src/com/workflowfm/pew/PiProcess.scala index 85589090..24b9b971 100644 --- a/src/com/workflowfm/pew/PiProcess.scala +++ b/src/com/workflowfm/pew/PiProcess.scala @@ -20,9 +20,9 @@ sealed trait PiProcess { def inputs:Seq[(PiObject,String)] // List of (type,channel) for each input. def channels:Seq[String] = output._2 +: (inputs map (_._2)) // order of channels is important for correct process calls! - def dependencies:Seq[PiProcess] // dependencies of composite processes + val dependencies:Seq[PiProcess] // dependencies of composite processes - def allDependencies:Seq[PiProcess] = PiProcess.allDependenciesOf(this) // all ancestors (i.e. including dependencies of dependencies + lazy val allDependencies:Seq[PiProcess] = PiProcess.allDependenciesOf(this) // all ancestors (i.e. including dependencies of dependencies /** * Initializes a PiState that executes this process with a given list of PiObject arguments. From 04d8418245e1747c5e97612b2a3d1740491ca029 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Mon, 16 Dec 2019 14:19:54 +0000 Subject: [PATCH 48/55] Fixes `MetricsHandler` not handling `PiEventIdle` --- src/com/workflowfm/pew/PiEvents.scala | 2 +- src/com/workflowfm/pew/metrics/Measure.scala | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/com/workflowfm/pew/PiEvents.scala b/src/com/workflowfm/pew/PiEvents.scala index 35cadc0d..f641a0f0 100644 --- a/src/com/workflowfm/pew/PiEvents.scala +++ b/src/com/workflowfm/pew/PiEvents.scala @@ -364,7 +364,7 @@ case class PiEventCall[KeyT]( * * @tparam KeyT The type used to identify PiInstances. */ -trait PiEventCallEnd[KeyT] extends PiAtomicProcessEvent[KeyT] +sealed trait PiEventCallEnd[KeyT] extends PiAtomicProcessEvent[KeyT] /** Denotes the successful completion of execution of a AtomicProcess. * diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index fc3c8681..d30ec085 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -203,6 +203,7 @@ class MetricsHandler[KeyT](timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime case PiEventCall(i,r,p,_,t) => procCall( i, r, p.iname, timeFn(t) ) case PiEventReturn(i,r,s,t) => procReturn( i, r, s, timeFn(t) ) case PiFailureAtomicProcessException(i,r,m,_,t) => processFailure( i, r, m, timeFn(t) ) + case PiEventIdle(_, _) => Unit case ev: PiFailure[KeyT] => workflowException( ev.id, ev.exception, timeFn(ev.metadata) ) From 3bc767668bf5139da8e2a362d20373a687b0e211 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Mon, 16 Dec 2019 14:25:10 +0000 Subject: [PATCH 49/55] Allows control of subscription timeout in `PiStream` Affects `AkkaExecutor`, though it already had a timeout parameter --- src/com/workflowfm/pew/execution/AkkaExecutor.scala | 7 ++++--- src/com/workflowfm/pew/stream/PiStream.scala | 6 +++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index cc52aae2..5be4a753 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -62,8 +62,8 @@ object AkkaExecutor { def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) def execprops(store: PiInstanceStore[UUID], atomicExecutor: ActorRef) - (implicit system: ActorSystem): Props = Props( - new AkkaExecActor(store,atomicExecutor)(system.dispatcher, implicitly[ClassTag[PiEvent[UUID]]]) + (implicit system: ActorSystem, timeout: FiniteDuration): Props = Props( + new AkkaExecActor(store,atomicExecutor)(system.dispatcher, implicitly[ClassTag[PiEvent[UUID]]], timeout) ) } @@ -72,7 +72,8 @@ class AkkaExecActor( atomicExecutor: ActorRef )( implicit val executionContext: ExecutionContext, - override implicit val tag: ClassTag[PiEvent[UUID]] + override implicit val tag: ClassTag[PiEvent[UUID]], + override implicit val timeout: FiniteDuration ) extends Actor with PiStream[UUID] { def init(instance: PiInstance[_]): UUID = { diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala index 9e52dc30..95e03c21 100644 --- a/src/com/workflowfm/pew/stream/PiStream.scala +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -4,6 +4,7 @@ import akka.actor.ActorRef import akka.stream.KillSwitch import com.workflowfm.pew.{ PiEvent, PiEventResult } import scala.concurrent.{ ExecutionContext, Future } +import scala.concurrent.duration.FiniteDuration import scala.reflect.ClassTag import uk.ac.ed.inf.ppapapan.subakka.{ HashSetPublisher, Subscriber, SubscriptionSwitch } @@ -21,9 +22,12 @@ class PiSubscriber[T](handler: PiEventHandler[T]) extends Subscriber[PiEvent[T]] trait PiStream[T] extends PiPublisher[T] with HashSetPublisher[PiEvent[T]] with PiObservable[T] { implicit val tag: ClassTag[PiEvent[T]] implicit val executionContext: ExecutionContext + implicit val timeout: FiniteDuration override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = { - new PiSubscriber[T](handler).subscribeTo(self)(context.system,tag) map { i => PiKillSwitch(i.killSwitch) } + new PiSubscriber[T](handler).subscribeTo(self, None, timeout)(context.system,tag) map { + i => PiKillSwitch(i.killSwitch) + } } } From 92b76cdf70d4d368e4351eae4ccab248d745ecaa Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Tue, 17 Dec 2019 17:15:13 +0000 Subject: [PATCH 50/55] Updates wfm-simulator version to 0.2.1 --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index 0880c97a..fc887326 100644 --- a/build.sbt +++ b/build.sbt @@ -51,7 +51,7 @@ lazy val simulator = project .settings( commonSettings, name := "pew-simulator", - libraryDependencies += "com.workflowfm" %% "wfm-simulator" % "0.2" + libraryDependencies += "com.workflowfm" %% "wfm-simulator" % "0.2.1" ).dependsOn(rootRef) From c5e23ae50777d0585e1865dbab430514102a0cf6 Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Wed, 18 Dec 2019 23:04:05 +0000 Subject: [PATCH 51/55] Removes unnecessary atomic executor actor from `AkkaExecutor` --- .../pew/execution/AkkaExecutor.scala | 37 +++++-------------- 1 file changed, 9 insertions(+), 28 deletions(-) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 5be4a753..bb79bc07 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -15,8 +15,7 @@ import scala.util.{Failure, Success} import java.util.UUID class AkkaExecutor ( - store:PiInstanceStore[UUID], - atomicExecutor: ActorRef + store:PiInstanceStore[UUID] )( implicit val system: ActorSystem, implicit val timeout: FiniteDuration @@ -25,14 +24,10 @@ class AkkaExecutor ( implicit val tag: ClassTag[UUID] = ClassTag(classOf[UUID]) override implicit val executionContext: ExecutionContext = system.dispatcher - def this(store: PiInstanceStore[UUID]) - (implicit system: ActorSystem, timeout: FiniteDuration) = - this(store,system.actorOf(AkkaExecutor.atomicprops())) - def this()(implicit system: ActorSystem, timeout: FiniteDuration = 10.seconds) = - this(SimpleInstanceStore[UUID](),system.actorOf(AkkaExecutor.atomicprops()))(system,timeout) + this(SimpleInstanceStore[UUID]())(system,timeout) - val execActor = system.actorOf(AkkaExecutor.execprops(store, atomicExecutor)) + val execActor = system.actorOf(AkkaExecutor.execprops(store)) implicit val tOut = Timeout(timeout) override protected def init(instance: PiInstance[_]): Future[UUID] = @@ -59,17 +54,14 @@ object AkkaExecutor { case class Subscribe(handler:PiEventHandler[UUID]) - def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) - - def execprops(store: PiInstanceStore[UUID], atomicExecutor: ActorRef) + def execprops(store: PiInstanceStore[UUID]) (implicit system: ActorSystem, timeout: FiniteDuration): Props = Props( - new AkkaExecActor(store,atomicExecutor)(system.dispatcher, implicitly[ClassTag[PiEvent[UUID]]], timeout) + new AkkaExecActor(store)(system.dispatcher, implicitly[ClassTag[PiEvent[UUID]]], timeout) ) } class AkkaExecActor( var store:PiInstanceStore[UUID], - atomicExecutor: ActorRef )( implicit val executionContext: ExecutionContext, override implicit val tag: ClassTag[PiEvent[UUID]], @@ -167,8 +159,10 @@ class AkkaExecActor( val objs = args map (_.obj) try { publish(PiEventCall(i.id,ref,p,objs)) - // TODO Change from ! to ? to require an acknowledgement - atomicExecutor ! AkkaExecutor.ACall(i.id,ref,p,objs,self) + p.runMeta(objs).onComplete{ + case Success(res) => self ! AkkaExecutor.Result(i.id,ref,res) + case Failure(ex) => self ! AkkaExecutor.Error(i.id,ref,ex) + } } catch { case _:Throwable => Unit //TODO specify timeout exception here! - also print a warning } @@ -195,16 +189,3 @@ class AkkaExecActor( override def receive = LoggingReceive { publisherBehaviour orElse akkaReceive } } - -class AkkaAtomicProcessExecutor(implicit val exc: ExecutionContext = ExecutionContext.global) extends Actor { //(executor:ActorRef,p:PiProcess,args:Seq[PiObject]) - def receive = { - case AkkaExecutor.ACall(id,ref,p,args,actor) => { - p.runMeta(args).onComplete{ - case Success(res) => actor ! AkkaExecutor.Result(id,ref,res) - case Failure(ex) => actor ! AkkaExecutor.Error(id,ref,ex) - } - actor ! AkkaExecutor.AckCall - } - case m => System.err.println("!! Received unknown message: " + m) - } -} From e255b556c2e94100fd5bb62960c2126e9e3d810e Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 19 Dec 2019 14:54:59 +0000 Subject: [PATCH 52/55] Updates dependencies --- build.sbt | 12 ++++++------ .../instances/kafka/components/Tracked.scala | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/build.sbt b/build.sbt index fc887326..1f598033 100644 --- a/build.sbt +++ b/build.sbt @@ -19,15 +19,15 @@ libraryDependencies += "org.scalatest" %% "scalatest" % "3.2.0-SNAP10" % "test" libraryDependencies += "org.scalamock" %% "scalamock" % "4.1.0" % Test libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.14.0" % "test" -libraryDependencies += "com.typesafe.akka" %% "akka-actor" % "2.4.12" -libraryDependencies += "com.typesafe.akka" %% "akka-testkit" % "2.5.16" % "test" +libraryDependencies += "com.typesafe.akka" %% "akka-actor" % "2.6.1" +libraryDependencies += "com.typesafe.akka" %% "akka-testkit" % "2.6.1" % "test" libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.3.2" -libraryDependencies += "com.typesafe.akka" %% "akka-stream-kafka" % "0.21.1" -libraryDependencies += "com.typesafe.akka" %% "akka-stream" % "2.5.13" -libraryDependencies += "com.typesafe.akka" %% "akka-http" % "10.1.3" -libraryDependencies += "de.heikoseeberger" %% "akka-http-jackson" % "1.21.0" +libraryDependencies += "com.typesafe.akka" %% "akka-stream-kafka" % "1.1.0" +libraryDependencies += "com.typesafe.akka" %% "akka-stream" % "2.6.1" +libraryDependencies += "com.typesafe.akka" %% "akka-http" % "10.1.11" +libraryDependencies += "de.heikoseeberger" %% "akka-http-jackson" % "1.27.0" libraryDependencies += "org.apache.kafka" %% "kafka" % "1.1.0" libraryDependencies += "org.apache.kafka" % "kafka-streams" % "1.1.0" diff --git a/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala b/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala index 4253631f..faac8325 100644 --- a/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala +++ b/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala @@ -9,7 +9,6 @@ import akka.{Done, NotUsed} import com.workflowfm.pew.stateless.StatelessMessages.AnyMsg import com.workflowfm.pew.stateless.instances.kafka.settings.KafkaExecutorEnvironment import com.workflowfm.pew.util.ClassLoaderUtil.withClassLoader -import org.apache.kafka.clients.producer.KafkaProducer import org.bson.types.ObjectId import scala.concurrent.{ExecutionContext, Future} @@ -147,9 +146,10 @@ object Tracked { * * (Note: Use `lazyProducer` to minimize the number of new Producers which are created, * this reduces the number of system resources used (such as file handles)) + * Note on the note: lazyProducer is no longer available in the latest version */ - def createProducer[K, V]( settings: ProducerSettings[K, V] ): KafkaProducer[K, V] - = withClassLoader( null ) { settings.lazyProducer } + def createProducer[K, V]( settings: ProducerSettings[K, V] ): org.apache.kafka.clients.producer.Producer[K, V] + = withClassLoader( null ) { settings.createKafkaProducer() } } /** A Tracked type which uses a `Committable` as tracking information. From 2a3e4f8526b0763ea83760725b49b017dad5b09e Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 19 Mar 2020 01:23:05 +0000 Subject: [PATCH 53/55] Updates gitignore with metals files --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 95e1782a..6d4e2dd6 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,7 @@ *~ \#*\# .\#* -.projectile \ No newline at end of file +.projectile +.metals/ +.bloop/ +project/metals.sbt \ No newline at end of file From 8045630be95c3d3efc69ef8249f1130d3ea53a0a Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 19 Mar 2020 06:35:43 +0000 Subject: [PATCH 54/55] From Scala 2.12.6 to 2.12.10 --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index 1f598033..385fc75d 100644 --- a/build.sbt +++ b/build.sbt @@ -3,7 +3,7 @@ sbtVersion := "1.2.6" lazy val commonSettings = Seq ( version := "1.4.0-SNAPSHOT", organization := "com.workflowfm", - scalaVersion := "2.12.6" + scalaVersion := "2.12.10" ) autoAPIMappings := true From 4447d41cead272b95920922c1c10b9c42d16394a Mon Sep 17 00:00:00 2001 From: Petros Papapanagiotou Date: Thu, 9 Jul 2020 22:24:45 +0100 Subject: [PATCH 55/55] Finalizes 1.4.0 --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index 385fc75d..3aeb7b2c 100644 --- a/build.sbt +++ b/build.sbt @@ -1,7 +1,7 @@ sbtVersion := "1.2.6" lazy val commonSettings = Seq ( - version := "1.4.0-SNAPSHOT", + version := "1.4.0", organization := "com.workflowfm", scalaVersion := "2.12.10" )