diff --git a/.gitignore b/.gitignore index 95e1782a..6d4e2dd6 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,7 @@ *~ \#*\# .\#* -.projectile \ No newline at end of file +.projectile +.metals/ +.bloop/ +project/metals.sbt \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index ab5b0851..f81474ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,15 @@ Includes feature updates, bug fixes, and open issues. * [AkkaExecutor.Call may timeout #4](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/4) - since v0.1 +## [v1.4.0](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.4.0) - 2019-07-01 + +### Features + +* Improved `PiEventHandlers`. The `PromiseHandler` is now generalized to return a single object at the end of the workflow. The old `PromiseHandler` is an instance called `ResultHandler` (see also [#26](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/26)). +* Implemented `PiStream` using Akka's `BroadcastHub` to enable more flexible event handling (see also [#34](https://github.com/PetrosPapapa/WorkflowFM-PEW/issues/34)). Executors can now be mixed in with (at least) either of the two default observables, namely `SimplePiObservable` and `PiStream`. +* `SimMetricsActor` no longer keeps a reference to the `Coordinator`. This makes for a cleaner, more flexible implementation, allowing multiple simulations across multiple `Coordinator`s. The downside is that simulations can be run asynchronously, making it hard to disambiguate which results came from which `Coordinator`. We leave that problem to the user for now. + + ## [v1.3.0](https://github.com/PetrosPapapa/WorkflowFM-PEW/releases/tag/v1.3.0) - 2019-06-19 For some unknown reason, the version number was increased in `build.sbt` back in December without actually merging the intended changes or creating a new tag. In the meantime, [#45](https://github.com/PetrosPapapa/WorkflowFM-PEW/pull/45)) was merged with various bug fixes and minor changes, the Ski example was updated and some documentation was added. I decided to create the tag now and push the stream changes to 1.4.0. diff --git a/build.sbt b/build.sbt index 8dc9f7e5..3aeb7b2c 100644 --- a/build.sbt +++ b/build.sbt @@ -1,13 +1,13 @@ -name := "PEW" - sbtVersion := "1.2.6" lazy val commonSettings = Seq ( - version := "1.3.0-SNAPSHOT", + version := "1.4.0", organization := "com.workflowfm", - scalaVersion := "2.12.6" + scalaVersion := "2.12.10" ) +autoAPIMappings := true + // The dependencies are in Maven format, with % separating the parts. // Notice the extra bit "test" on the end of JUnit and ScalaTest, which will // mean it is only a test dependency. @@ -19,15 +19,15 @@ libraryDependencies += "org.scalatest" %% "scalatest" % "3.2.0-SNAP10" % "test" libraryDependencies += "org.scalamock" %% "scalamock" % "4.1.0" % Test libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.14.0" % "test" -libraryDependencies += "com.typesafe.akka" %% "akka-actor" % "2.4.12" -libraryDependencies += "com.typesafe.akka" %% "akka-testkit" % "2.5.16" % "test" +libraryDependencies += "com.typesafe.akka" %% "akka-actor" % "2.6.1" +libraryDependencies += "com.typesafe.akka" %% "akka-testkit" % "2.6.1" % "test" libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.3.2" -libraryDependencies += "com.typesafe.akka" %% "akka-stream-kafka" % "0.21.1" -libraryDependencies += "com.typesafe.akka" %% "akka-stream" % "2.5.13" -libraryDependencies += "com.typesafe.akka" %% "akka-http" % "10.1.3" -libraryDependencies += "de.heikoseeberger" %% "akka-http-jackson" % "1.21.0" +libraryDependencies += "com.typesafe.akka" %% "akka-stream-kafka" % "1.1.0" +libraryDependencies += "com.typesafe.akka" %% "akka-stream" % "2.6.1" +libraryDependencies += "com.typesafe.akka" %% "akka-http" % "10.1.11" +libraryDependencies += "de.heikoseeberger" %% "akka-http-jackson" % "1.27.0" libraryDependencies += "org.apache.kafka" %% "kafka" % "1.1.0" libraryDependencies += "org.apache.kafka" % "kafka-streams" % "1.1.0" @@ -35,21 +35,32 @@ libraryDependencies += "org.mongodb.scala" %% "mongo-scala-driver" % "2.2.1" libraryDependencies += "junit" % "junit" % "4.8.2" -EclipseKeys.preTasks := Seq(compile in Compile, compile in Test) +libraryDependencies += "uk.ac.ed.inf" %% "subakka" % "0.1-SNAPSHOT" +libraryDependencies += "uk.ac.ed.inf" %% "subakka" % "0.1-SNAPSHOT" % Test classifier "tests" + +lazy val skiexample = project + .in(file("skiexample")) + .settings( + commonSettings, + scalaSource in Compile := baseDirectory.value / "src", + scalaSource in Test := baseDirectory.value / "test" + ).dependsOn(rootRef) + +lazy val simulator = project + .in(file("simulator")) + .settings( + commonSettings, + name := "pew-simulator", + libraryDependencies += "com.workflowfm" %% "wfm-simulator" % "0.2.1" + ).dependsOn(rootRef) -lazy val skiexample = project - .in(file("skiexample")) - .settings( - commonSettings, - scalaSource in Compile := baseDirectory.value / "src", - scalaSource in Test := baseDirectory.value / "test" - ).dependsOn(rootRef) lazy val root = project - .in(file(".")) - .settings( - commonSettings, - scalaSource in Compile := baseDirectory.value / "src", - scalaSource in Test := baseDirectory.value / "test" - ) + .in(file(".")) + .settings( + commonSettings, + name := "pew", + scalaSource in Compile := baseDirectory.value / "src", + scalaSource in Test := baseDirectory.value / "test" + ) lazy val rootRef = LocalProject("root") diff --git a/docs/metrics.org b/docs/metrics.org new file mode 100644 index 00000000..9913112e --- /dev/null +++ b/docs/metrics.org @@ -0,0 +1,200 @@ +#+TITLE: Metrics +#+AUTHOR: Petros Papapanagiotou +#+EMAIL: petrospapapan@gmail.com +#+OPTIONS: toc:2 +#+EXCLUDE_TAGS: noexport + +* Introduction + +The PEW engine is able to automatically track metrics during workflow execution. These can be used to monitor the workflows and extract analytics and insights. + +Metrics can be tracked in 2 settings: +1) [[realtime][*Real-time execution*]]: These are metrics from the engine regarding the execution of any workflow. +2) [[simulation][*Simulation*]]: These are metrics from the simulator across the simulated time. + +We describe the general setting and the individual metrics next. + + +* Setting + +The general idea for PEW, is that the engine will automatically collect all the available metrics at runtime. The user can then implement an output function to generate some analytics from the collected metrics. + +The following key concepts are used by PEW for capturing and managing metrics: + +** ~Metrics~ +Metrics are captured around individual concepts, such as an atomic process or task, a persistent resource, or a workflow. Each set of metrics is captured in an immutable case class. This includes the different features being measured and the methods to update them based on what is happening in the engine. + +** ~Aggregator~ +An ~Aggregator~ is a mutable class that collects all the ~Metrics~ collected across multiple workflows in one place. It contains methods to update the different metrics, indexed by some id, and based on different events that may take place. + +** ~Output~ +An ~Output~ is essentially a function that can generate any outputs from the ~Metrics~ within an ~Aggregator~. Outputs may include analytics, visualizations, reports, or anything else. + + +* <>Real-time execution metrics + +[[[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/src/com/workflowfm/pew/metrics][Source]]] + +Real-time metrics are minimal, as they are aimed to be generic and domain-independent. We capture metrics about calls to atomic processes and metrics abount executions of entire workflows. + +The metrics are gathered in a ~MetricsAggregator~ and can be processed through a ~MetricsOutput~. + +A ~MetricsHandler~ is the most convenient way of gathering metrics directly from a ~ProcessExecutor~. It is a ~PiEventHandler~, which means you can register it directly to the executor and process the results afterwards. + +Here's an example pattern: +#+BEGIN_SRC scala +// Instantiate your handler. Call it "metrics". +val handler = new MetricsHandler[Int]("metrics") + +// Instantiate your executor (assuming a list of processes). +val executor = new AkkaExecutor(processes :_*) + +// Subscribe the handler and obtain a kill switch to unsubscribe it when done. +val killSwitch = executor.subscribe(handler) + +/////////////////////////////////// +// Execute all your workflows here. +// Wait for them to finish. +/////////////////////////////////// + +// Stop/unsubscribe the handler. +killSwitch.map(_.stop) + +// Instantiate your output, in this case a simple MetricsPrinter. +val output = new MetricsPrinter[Int]() + +// Run it on the results. +output(handler) +#+END_SRC + +The 2 types of metrics available are described next. + +** ~ProcessMetrics~ + +This captures metrics about a particular call of an ~AtomicProcess~. +- ~piID~: The ID of the workflow that executed the atomic process. +- ~ref~: A unique call ID for this process call within the particular workflow. +- ~process~: The name of the process. +- ~start~: The system time in milliseconds when the process call started. +- ~finish~: The system time in milliseconds that the process call finished, or ~None~ if it is still running. +- ~result~: A ~String~ representation of the returned result from the process call, or ~None~ if it still running. In case of failure, the field is populated with the localized message of the exception thrown. + +** ~WorkflowMetrics~ + +This captures metrics for a particular execution of a workflow (~CompositeProcess~). +- ~piID~: The unique ID of the workflow. +- ~start~: The system time in milliseconds when the workflow started executing. +- ~calls~: The number of individual calls performed to atomic processes. +- ~finish~: The system time in milliseconds that the workflow finished, or ~None~ if it is still running. +- ~result~: a ~String~ representation of the returned result from the workflow, or ~None~ if it still running. In case of failure, the field is populated with the localized message of the exception thrown. + + +* <>Simulation metrics + +[[[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/src/com/workflowfm/pew/simulation/metrics][Source]]] + +Simulation metrics are somewhat richer than the real-time ones. We capture metrics about each ~Task~, ~Simulation~ and ~TaskResource~ used. More details about these concepts can be found [[https://github.com/PetrosPapapa/WorkflowFM-PEW/wiki/Simulation][here]]. + +The metrics are gathered in a ~SimMetricsAggregator~ and can be processed through a ~SimMetricsOutput~. + +The general assumption is that simulations run on ~AkkaExecutor~. Under this assumption, we can expand the implementation to ensure asynchronous gathering of the metrics and automatic execution of the output in the end. For this reason we have introduced the [[https://github.com/PetrosPapapa/WorkflowFM-PEW/blob/master/src/com/workflowfm/pew/simulation/metrics/Actor.scala][~SimMetricsActor~]] that takes care of all of this for us. + +Here is an example setup to manage simulation metrics, assuming an active ~ActorSystem~: +#+BEGIN_SRC scala +// Instantiate the Coordinator. +val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) + +// Load the a list of available TaskResources to the Coordinator. +coordinator ! Coordinator.AddResources(machines) + +// Instantiate your output, in this case a simple SimMetricsPrinter. +val output = new SimMetricsPrinter() + +// Create the SimMetricsActor. +val metricsActor = system.actorOf(SimMetricsActor.props(output)) + +// Set up a list of simulations, paired with their starting times. +val simulations:Seq[(Long,Simulation)] = ... + +// Instantiate the executor. +val executor = new AkkaExecutor(simulations flatMap (_._2.getProcesses()) :_*) + +// Start the simulations through the SimMetricsActor. +metricsActor ! SimMetricsActor.StartSims(coordinator,simulations,executor) +#+END_SRC + +The ~metricsActor~ will automatically run the output function (the printer in this case) on the results. + +Note that, in this scenario, the ~metricsActor~ will also shutdown the ~ActorSystem~. If you want to avoid that, e.g. when you need to run multiple independent simulations, you need to set up your own actor that will be given the opportunity to act when the simulation and metrics output have finished. Assuming ~a:ActorRef~ is that actor, you can pass it to the ~metricsActor~ at construction as follows: +#+BEGIN_SRC scala +val metricsActor = system.actorOf(SimMetricsActor.props(output, Some(a))) +#+END_SRC + +Your actor will receive a ~Coordinator.Done~ message when everything is done and the ~ActorSystem~ will remain active. + +The 3 types of gathered metrics are described next. + +** ~TaskMetrics~ + +This captures metrics for a simulated ~Task~. +- ~id~: The unique ID of the ~Task~. +- ~task~: The name of the ~Task~. +- ~simulation~: The name of the simulation the ~Task~ belongs to. +- ~created~: The virtual timestamp when the ~Task~ was created and entered the ~Coordinator~. +- ~started~: The virtual timestamp when the ~Task~ started executing, or ~None~ if it has not started yet. +- ~duration~: The virtual duration of the ~Task~. +- ~cost~: The cost associated with the ~Task~. +- ~resources~: The list of names of the ~TaskResource~ this ~Task~ used. + +** ~SimulationMetrics~ + +This captures metrics for a particular ~Simulation~. +- ~name~: The unique name of the ~Simulation~. +- ~started~: The virtual timestamp when the ~Simulation~ started executing. +- ~duration~: The virtual duration of the ~Simulation~. +- ~delay~: The sum of all delays for all involved ~Task~. +- ~tasks~: The number of ~Task~ associated with the ~Simulation~ so far. +- ~cost~: The total cost associated with the ~Simulation~ so far. +- ~result~: a ~String~ representation of the returned result from the ~Simulation~, or ~None~ if it still running. In case of failure, the field is populated with the localized message of the exception thrown. + +** ~ResourceMetrics~ + +This captures metrics for a particular ~TaskResource~. +- ~name~: The unique name of the ~TaskResource~. +- ~busyTime~: The total amount of virtual time that the ~TaskResource~ has been busy, i.e. attached to a ~Task~. +- ~idleTime~: The total amount of virtual time that the ~TaskResource~ has been idle, i.e. not attached to any ~Task~. +- ~tasks~: The number of different ~Task~ that have been attached to this ~TaskResource~. +- ~cost~: The total cost associated with this ~TaskResource~. + + +* Extending the metrics + +Analytics that can be derived from the current metrics can be calculated by a custom output function. + +Implementation of new types of metrics in the current setup requires an extension of each of the 3 main concepts and, more importantly, a computational way to generate these metrics at runtime. + +The former can be easily achieved by: +1) Implementing your own custom case classes for your metrics. +2) Extending one of the existing aggregators to hold your new metrics. +3) Extending the output classes to deal with your custom metrics. + +The latter is harder, as the current metrics are measured directly in the ~PiEvent~'s generated by the executor or by the simulation ~Coordinator~. + +Metrics that can be calculated by atomic processes (or tasks), can be given as metadata output in the process implementation. Instead of implementing a standard ~AtomicProcess~, switch its inheritance to a ~MetadataAtomicProcess~. You can then implement the ~run~ function so that it returns calculated metrics as one or more ~PiMetadataElem~. + +Here's an example pattern: +#+BEGIN_SRC scala +override def runMeta( args: Seq[PiObject] )( implicit ec: ExecutionContext ): Future[MetadataAtomicResult] = { + // run this as a regular AtomicProcess + run( args ).map { result => + // calculate your metrics + val metrics :Future[Seq[PiMetadataElem]] = ... + // return the combined result (assuming metrics is a Future here) + metrics.map { m => MetadataAtomicProcess.result(result, m :_*) } + } +} +#+END_SRC + +The generated metadata will be attached to the corresponding ~PiEventReturn~, so you can use a ~PiEventHandler~ to grab it and pass it to your aggregator. + +Calculating the metrics at the same time as the result requires refactoring of the automatically generated code. diff --git a/pom.xml b/pom.xml deleted file mode 100644 index b8725dd4..00000000 --- a/pom.xml +++ /dev/null @@ -1,50 +0,0 @@ - - 4.0.0 - workflowfm-pew - pew - 0.0.1-SNAPSHOT - WorkflowFM PEW - Persistent Execution of Workflows - - src - - - maven-compiler-plugin - 3.7.0 - - 1.8 - 1.8 - - - - - - - org.scalatest - scalatest-maven-plugin - 1.0 - maven-plugin - - - junit - junit - 4.8.2 - - - org.scalatest - scalatest_2.12 - 3.2.0-SNAP10 - test - - - com.typesafe.akka - akka-actor_2.12.0-RC2 - 2.4.12 - - - org.mongodb.scala - mongo-scala-driver_2.12 - 2.2.1 - - - \ No newline at end of file diff --git a/resources/d3-timeline/d3-timeline.js b/resources/d3-timeline/d3-timeline.js index 3e5a4f42..bd894392 100644 --- a/resources/d3-timeline/d3-timeline.js +++ b/resources/d3-timeline/d3-timeline.js @@ -134,7 +134,7 @@ var appendBackgroundBar = function (yAxisMapping, index, g, data, datum) { var greenbarYAxis = ((itemHeight + itemMargin) * yAxisMapping[index]) + margin.top; - g.selectAll("svg").data(data).enter() + g.selectAll("svg").filter(".timeline").data(data).enter() .insert("rect") .attr("class", "row-green-bar") .attr("x", fullLengthBackgrounds ? 0 : margin.left) @@ -270,69 +270,78 @@ } if (backgroundColor) { appendBackgroundBar(yAxisMapping, index, g, data, datum); } - - g.selectAll("svg").data(data).enter() - .append(function(d, i) { - return document.createElementNS(d3.ns.prefix.svg, "display" in d? d.display:display); - }) - .attr("x", getXPos) - .attr("y", getStackPosition) - .attr("width", function (d, i) { - return (d.ending_time - d.starting_time) * scaleFactor; - }) - .attr("cy", function(d, i) { - return getStackPosition(d, i) + itemHeight/2; - }) - .attr("cx", getXPos) - .attr("r", itemHeight / 2) - .attr("height", itemHeight) - .style("stroke", "black") // added by Petros - .style("fill", function(d, i){ - var dColorPropName; - if (d.color) return d.color; - if( colorPropertyName ){ - dColorPropName = d[colorPropertyName]; - if ( dColorPropName ) { - return colorCycle( dColorPropName ); - } else { - return colorCycle( datum[colorPropertyName] ); - } - } - return colorCycle(index); - }) - .on("mousemove", function (d, i) { - hover(d, index, datum); - }) - .on("mouseover", function (d, i) { - mouseover(d, i, datum); - }) - .on("mouseout", function (d, i) { - mouseout(d, i, datum); - }) - .on("click", function (d, i) { - click(d, index, datum); - }) - .attr("class", function (d, i) { - return datum.class ? "timelineSeries_"+datum.class : "timelineSeries_"+index; - }) - .attr("id", function(d, i) { - // use deprecated id field - if (datum.id && !d.id) { - return 'timelineItem_'+datum.id; - } - - return d.id ? d.id : "timelineItem_"+index+"_"+i; - }) + + var svgs = g.selectAll("svg").filter(".timeline").data(data).enter() + .append(function(d, i) { + return document.createElementNS(d3.ns.prefix.svg, "svg"); + }) + .attr("x", getXPos) + .attr("y", getStackPosition) + .attr("width", function (d, i) { + return (d.ending_time - d.starting_time) * scaleFactor + 2; + }) + .attr("height", itemHeight + 2) ; - g.selectAll("svg").data(data).enter() - .append("text") - .attr("x", getXTextPos) - .attr("y", getStackTextPosition) - .text(function(d) { - return d.label; - }) - ; + svgs.append(function(d, i ) { + return document.createElementNS(d3.ns.prefix.svg, "display" in d? d.display:display); + }) + .attr("x", 1) + .attr("y", 1) + .attr("width", function (d, i) { + return (d.ending_time - d.starting_time) * scaleFactor; + }) + .attr("height", itemHeight) + .style("stroke", "black") // added by Petros + .attr("cy", function(d, i) { + return getStackPosition(d, i) + itemHeight/2; + }) + .attr("cx", getXPos) + .attr("r", itemHeight / 2) + .style("fill", function(d, i){ + var dColorPropName; + if (d.color) return d.color; + if( colorPropertyName ){ + dColorPropName = d[colorPropertyName]; + if ( dColorPropName ) { + return colorCycle( dColorPropName ); + } else { + return colorCycle( datum[colorPropertyName] ); + } + } + return colorCycle(index); + }) + .on("mousemove", function (d, i) { + hover(d, index, datum); + }) + .on("mouseover", function (d, i) { + mouseover(d, i, datum); + }) + .on("mouseout", function (d, i) { + mouseout(d, i, datum); + }) + .on("click", function (d, i) { + click(d, index, datum); + }) + .attr("class", function (d, i) { + return datum.class ? "timelineSeries_"+datum.class : "timelineSeries_"+index; + }) + .attr("id", function(d, i) { + // use deprecated id field + if (datum.id && !d.id) { + return 'timelineItem_'+datum.id; + } + + return d.id ? d.id : "timelineItem_"+index+"_"+i; + }); + + svgs.append("text") + .attr("x", 6) + .attr("y", itemHeight * 0.75 + 1) + .text(function(d) { + return d.label; + }) + ; if (rowSeparatorsColor) { var lineYAxis = ( itemHeight + itemMargin / 2 + margin.top + (itemHeight + itemMargin) * yAxisMapping[index]); @@ -360,15 +369,9 @@ function getStackPosition(d, i) { if (stacked) { - return margin.top + (itemHeight + itemMargin) * yAxisMapping[index]; + return margin.top + (itemHeight + itemMargin) * yAxisMapping[index] - 1; } - return margin.top; - } - function getStackTextPosition(d, i) { - if (stacked) { - return margin.top + (itemHeight + itemMargin) * yAxisMapping[index] + itemHeight * 0.75; - } - return margin.top + itemHeight * 0.75; + return margin.top - 1; } }); }); @@ -424,18 +427,14 @@ } function getXPos(d, i) { - return margin.left + (d.starting_time - beginning) * scaleFactor; - } - - function getXTextPos(d, i) { - return margin.left + (d.starting_time - beginning) * scaleFactor + 5; + return margin.left + (d.starting_time - beginning) * scaleFactor - 1; } function setHeight() { if (!height && !gParentItem.attr("height")) { if (itemHeight) { // set height based off of item height - height = gSize.height + gSize.top - gParentSize.top; + height = g[0][0].getBBox().height + gSize.top; // - gParentSize.top; // set bounding rectangle height d3.select(gParent[0][0]).attr("height", height); } else { diff --git a/resources/d3-timeline/data-example-simulation.js b/resources/d3-timeline/data-example-simulation.js deleted file mode 100644 index c031ce20..00000000 --- a/resources/d3-timeline/data-example-simulation.js +++ /dev/null @@ -1,103 +0,0 @@ -var tasks = [ - "AwardContract", - "ProvideService", - "TaskSimTask", - "CheckOutcome", -]; - -var resourceData = [ -{label: "Petros", times: [ - {"label":"AwardContract(D2)", task: "AwardContract", "starting_time": 0, "ending_time": 1, delay: 0, cost: 2}, - {"label":"TaskSimTask(TaskSim)", task: "TaskSimTask", "starting_time": 6, "ending_time": 11, delay: 0, cost: 15}, - {"label":"CheckOutcome(D2)", task: "CheckOutcome", "starting_time": 11, "ending_time": 13, delay: 5, cost: 3}, - {"label":"AwardContract(A1)", task: "AwardContract", "starting_time": 13, "ending_time": 14, delay: 4, cost: 2}, - {"label":"AwardContract(A3)", task: "AwardContract", "starting_time": 14, "ending_time": 15, delay: 3, cost: 2}, - {"label":"AwardContract(A7)", task: "AwardContract", "starting_time": 22, "ending_time": 23, delay: 0, cost: 2}, - {"label":"ProvideService(D4)", task: "ProvideService", "starting_time": 23, "ending_time": 28, delay: 1, cost: 6}, - {"label":"AwardContract(D8)", task: "AwardContract", "starting_time": 28, "ending_time": 29, delay: 6, cost: 2}, -]}, -{label: "Orphen", times: [ - {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, - {"label":"TaskSimTask(TaskSim)", task: "TaskSimTask", "starting_time": 6, "ending_time": 11, delay: 0, cost: 15}, - {"label":"ProvideService(A5)", task: "ProvideService", "starting_time": 11, "ending_time": 16, delay: 1, cost: 6}, - {"label":"ProvideService(D6)", task: "ProvideService", "starting_time": 16, "ending_time": 21, delay: 5, cost: 6}, - {"label":"AwardContract(D4)", task: "AwardContract", "starting_time": 21, "ending_time": 22, delay: 9, cost: 2}, - {"label":"ProvideService(A1)", task: "ProvideService", "starting_time": 22, "ending_time": 27, delay: 8, cost: 6}, - {"label":"ProvideService(A3)", task: "ProvideService", "starting_time": 27, "ending_time": 32, delay: 12, cost: 6}, - {"label":"CheckOutcome(A5)", task: "CheckOutcome", "starting_time": 32, "ending_time": 34, delay: 16, cost: 3}, - {"label":"CheckOutcome(D4)", task: "CheckOutcome", "starting_time": 34, "ending_time": 36, delay: 6, cost: 3}, - {"label":"ProvideService(A7)", task: "ProvideService", "starting_time": 37, "ending_time": 42, delay: 14, cost: 6}, - {"label":"CheckOutcome(A1)", task: "CheckOutcome", "starting_time": 42, "ending_time": 44, delay: 15, cost: 3}, - {"label":"CheckOutcome(A3)", task: "CheckOutcome", "starting_time": 44, "ending_time": 46, delay: 12, cost: 3}, - {"label":"CheckOutcome(A7)", task: "CheckOutcome", "starting_time": 46, "ending_time": 48, delay: 4, cost: 3}, -]}, -{label: "Patient1", times: [ - {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, - {"label":"CheckOutcome(D2)", task: "CheckOutcome", "starting_time": 11, "ending_time": 13, delay: 5, cost: 3}, - {"label":"ProvideService(A1)", task: "ProvideService", "starting_time": 22, "ending_time": 27, delay: 8, cost: 6}, - {"label":"ProvideService(A3)", task: "ProvideService", "starting_time": 27, "ending_time": 32, delay: 12, cost: 6}, - {"label":"ProvideService(D8)", task: "ProvideService", "starting_time": 32, "ending_time": 37, delay: 3, cost: 6}, - {"label":"ProvideService(A7)", task: "ProvideService", "starting_time": 37, "ending_time": 42, delay: 14, cost: 6}, - {"label":"CheckOutcome(A1)", task: "CheckOutcome", "starting_time": 42, "ending_time": 44, delay: 15, cost: 3}, - {"label":"CheckOutcome(A3)", task: "CheckOutcome", "starting_time": 44, "ending_time": 46, delay: 12, cost: 3}, - {"label":"CheckOutcome(A7)", task: "CheckOutcome", "starting_time": 46, "ending_time": 48, delay: 4, cost: 3}, -]}, -{label: "Blah", times: [ - {"label":"AwardContract(A5)", task: "AwardContract", "starting_time": 9, "ending_time": 10, delay: 0, cost: 2}, - {"label":"AwardContract(D6)", task: "AwardContract", "starting_time": 10, "ending_time": 11, delay: 1, cost: 2}, - {"label":"ProvideService(D8)", task: "ProvideService", "starting_time": 32, "ending_time": 37, delay: 3, cost: 6}, -]}, -{label: "Patient3", times: [ - {"label":"ProvideService(A5)", task: "ProvideService", "starting_time": 11, "ending_time": 16, delay: 1, cost: 6}, - {"label":"ProvideService(D6)", task: "ProvideService", "starting_time": 16, "ending_time": 21, delay: 5, cost: 6}, - {"label":"CheckOutcome(A5)", task: "CheckOutcome", "starting_time": 32, "ending_time": 34, delay: 16, cost: 3}, -]}, -{label: "Patient2", times: [ - {"label":"ProvideService(D4)", task: "ProvideService", "starting_time": 23, "ending_time": 28, delay: 1, cost: 6}, - {"label":"CheckOutcome(D4)", task: "CheckOutcome", "starting_time": 34, "ending_time": 36, delay: 6, cost: 3}, -]}, -]; - -var workflowData = [ -{label: "D2", times: [ - {"label":"AwardContract(D2)", task: "AwardContract", "starting_time": 0, "ending_time": 1, delay: 0, cost: 2}, - {"label":"ProvideService(D2)", task: "ProvideService", "starting_time": 1, "ending_time": 6, delay: 0, cost: 6}, - {"label":"CheckOutcome(D2)", task: "CheckOutcome", "starting_time": 11, "ending_time": 13, delay: 5, cost: 3}, -]}, -{label: "TaskSim", times: [ - {"label":"TaskSimTask(TaskSim)", task: "TaskSimTask", "starting_time": 6, "ending_time": 11, delay: 0, cost: 15}, -]}, -{label: "A1", times: [ - {"label":"AwardContract(A1)", task: "AwardContract", "starting_time": 13, "ending_time": 14, delay: 4, cost: 2}, - {"label":"ProvideService(A1)", task: "ProvideService", "starting_time": 22, "ending_time": 27, delay: 8, cost: 6}, - {"label":"CheckOutcome(A1)", task: "CheckOutcome", "starting_time": 42, "ending_time": 44, delay: 15, cost: 3}, -]}, -{label: "A5", times: [ - {"label":"AwardContract(A5)", task: "AwardContract", "starting_time": 9, "ending_time": 10, delay: 0, cost: 2}, - {"label":"ProvideService(A5)", task: "ProvideService", "starting_time": 11, "ending_time": 16, delay: 1, cost: 6}, - {"label":"CheckOutcome(A5)", task: "CheckOutcome", "starting_time": 32, "ending_time": 34, delay: 16, cost: 3}, -]}, -{label: "D6", times: [ - {"label":"AwardContract(D6)", task: "AwardContract", "starting_time": 10, "ending_time": 11, delay: 1, cost: 2}, - {"label":"ProvideService(D6)", task: "ProvideService", "starting_time": 16, "ending_time": 21, delay: 5, cost: 6}, -]}, -{label: "A3", times: [ - {"label":"AwardContract(A3)", task: "AwardContract", "starting_time": 14, "ending_time": 15, delay: 3, cost: 2}, - {"label":"ProvideService(A3)", task: "ProvideService", "starting_time": 27, "ending_time": 32, delay: 12, cost: 6}, - {"label":"CheckOutcome(A3)", task: "CheckOutcome", "starting_time": 44, "ending_time": 46, delay: 12, cost: 3}, -]}, -{label: "D4", times: [ - {"label":"AwardContract(D4)", task: "AwardContract", "starting_time": 21, "ending_time": 22, delay: 9, cost: 2}, - {"label":"ProvideService(D4)", task: "ProvideService", "starting_time": 23, "ending_time": 28, delay: 1, cost: 6}, - {"label":"CheckOutcome(D4)", task: "CheckOutcome", "starting_time": 34, "ending_time": 36, delay: 6, cost: 3}, -]}, -{label: "A7", times: [ - {"label":"AwardContract(A7)", task: "AwardContract", "starting_time": 22, "ending_time": 23, delay: 0, cost: 2}, - {"label":"ProvideService(A7)", task: "ProvideService", "starting_time": 37, "ending_time": 42, delay: 14, cost: 6}, - {"label":"CheckOutcome(A7)", task: "CheckOutcome", "starting_time": 46, "ending_time": 48, delay: 4, cost: 3}, -]}, -{label: "D8", times: [ - {"label":"AwardContract(D8)", task: "AwardContract", "starting_time": 28, "ending_time": 29, delay: 6, cost: 2}, - {"label":"ProvideService(D8)", task: "ProvideService", "starting_time": 32, "ending_time": 37, delay: 3, cost: 6}, -]}, -]; diff --git a/resources/d3-timeline/pew-timeline-simulation.html b/resources/d3-timeline/pew-timeline-simulation.html deleted file mode 100644 index eb0634b9..00000000 --- a/resources/d3-timeline/pew-timeline-simulation.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - WorkflowFM - Results Timeline - - - - -

Resources:

-
-

Simulations:

-
- - - - diff --git a/resources/d3-timeline/pew-timeline-simulation.js b/resources/d3-timeline/pew-timeline-simulation.js deleted file mode 100644 index 061324ce..00000000 --- a/resources/d3-timeline/pew-timeline-simulation.js +++ /dev/null @@ -1,121 +0,0 @@ -function displayResults(selection,data) { - var widthPerTick = 60 - var leftMargin = 100 - var rightMargin = 30 - - var colorScale = d3.scale.category20().domain(tasks); - //var ticks = Array(totalTicks).fill().map((v,i)=>i); - - var tRange = timeRange(data) - var startTime = tRange[0] - var endTime = tRange[1] - - var totalTicks = endTime - startTime - - //var tickTime = d3.time.minutes - //var totalTicks = tickTime(startTime,endTime).length - - console.log("Total Ticks: " + totalTicks) - - var chart = d3.timeline() - .tickFormat( // - {format: d3.format.utc("03d"), - tickInterval: 1, // This forces us to start from 001 - numTicks: totalTicks, - //tickValues: ticks, // Use this to start from 000 - tickSize: 10, - }) - /*.tickFormat( // - {format: d3.time.format.utc("%H"), - tickTime: tickTime, - tickInterval: 1, - tickSize: 10, - })*/ - .stack() - .margin({left:100, right:30, top:0, bottom:0}) - .colors( colorScale ) - .colorProperty('task') - .width(totalTicks*widthPerTick+leftMargin+rightMargin); - - chart.showTimeAxisTick(); - chart.relativeTime(); - //chart.rowSeparators("#555555"); - - var backgroundColor = "#eeeeee"; - var altBackgroundColor = "white"; - chart.background(function (datum, i) { - var odd = (i % 2) === 0; - return odd ? altBackgroundColor : backgroundColor; - }); - chart.fullLengthBackgrounds(); - - var div = selection.append("div") - .attr("class", "tooltip") - .style("opacity", 0); - - chart.mouseover(function (d, i, datum) { - // d is the current rendering object - // i is the index during d3 rendering - // datum is the data object - div.style("left", (d3.event.pageX) + "px") - .style("top", (d3.event.pageY - 28) + "px"); - div.text(d.task + "\n" + - chart.tickFormat().format(new Date(d.starting_time)) + "-" + chart.tickFormat().format(new Date(d.ending_time)) + "\n" + - "Delay: " + chart.tickFormat().format(new Date(d.delay)) + "\n" + - "Cost: " + d.cost - ); - div.transition() - .duration(200) - .style("opacity", .9); - }); - chart.mouseout(function (d, i, datum) { - div.transition() - .duration(500) - .style("opacity", 0); - }); - - selection.select("svg").selectAll("g").remove(); - var svg = selection.select("svg") - .datum(data) - //.attr("width", '100%') - .attr("width", totalTicks*widthPerTick+leftMargin+rightMargin) - .call(chart); -} - -function timeRange(data) { - var start = new Date().getTime(); - var finish = 0; - for (var i = 0; i < data.length; i++) { - for (var j = 0; j < data[i].times.length; j++) { - if (data[i].times[j].starting_time < start) - start = data[i].times[j].starting_time; - if (data[i].times[j].ending_time > finish) - finish = data[i].times[j].ending_time; - } - } - return [start,finish] -} - -function workflow(datum) { - var selection = d3.select(this); - displayResults(selection,datum.data); -} - -function newWorkflow(datum) { - var selection = d3.select(this); - selection.append("p").text(datum.id); - selection.append("svg") - //.attr("width", '100%') - //.attr("width", totalTicks*widthPerTick); - displayResults(selection,datum.data); -} - -function displayOne(tag,workflowData) { - var div = d3.select(tag)//.data(workflowData) - div.selectAll("svg").remove() - div.append("svg") - displayResults(div,workflowData) -} - -displayOne("#resources",resourceData); -displayOne("#simulations",simulationData); diff --git a/resources/d3-timeline/pew-timeline.js b/resources/d3-timeline/pew-timeline.js index d1736419..73b088b2 100644 --- a/resources/d3-timeline/pew-timeline.js +++ b/resources/d3-timeline/pew-timeline.js @@ -97,7 +97,7 @@ function workflow(datum) { function newWorkflow(datum) { var selection = d3.select(this); selection.append("p").text(datum.id); - selection.append("svg") + selection.append("svg").attr("class","timeline") //.attr("width", '100%') //.attr("width", totalTicks*widthPerTick); displayResults(selection,datum.data); @@ -117,7 +117,7 @@ function displayAll(tag,workflowData) { function displayOne(tag,workflowData) { var div = d3.select(tag)//.data(workflowData) div.selectAll("svg").remove() - div.append("svg") + div.append("svg").attr("class","timeline") displayResults(div,workflowData) } diff --git a/simulator/.gitignore b/simulator/.gitignore new file mode 100644 index 00000000..95e1782a --- /dev/null +++ b/simulator/.gitignore @@ -0,0 +1,17 @@ +*.class +*.log +/bin/ +.cache* +/target/ +/project/target/ +/project/project/target/ +/.settings/ +/.classpath +/.idea +/resources/data +.ensime +.ensime_cache/ +*~ +\#*\# +.\#* +.projectile \ No newline at end of file diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala new file mode 100644 index 00000000..a550b157 --- /dev/null +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimHandler.scala @@ -0,0 +1,20 @@ +package com.workflowfm.pew.simulator + +import akka.actor.ActorRef +import com.workflowfm.pew.stream.{ PiEventHandlerFactory, ResultHandler } +import com.workflowfm.pew.{ PiEvent, PiEventCall, PiEventReturn, PiEventIdle, PiEventResult, PiFailure } + +class PiSimHandler[T](actor: ActorRef, id: T) extends ResultHandler[T](id) { + override def apply(e: PiEvent[T]) = { + e match { + case PiEventIdle(i,_) if (i.id == id) => actor ! PiSimulationActor.ExecutorReady(i) + case PiEventReturn(i,_,_,_) if (i == id) => actor ! PiSimulationActor.ExecutorBusy + case _ => Unit + } + super.apply(e) + } +} + +class PiSimHandlerFactory[T](actor: ActorRef) extends PiEventHandlerFactory[T,PiSimHandler[T]] { + override def build(id: T) = new PiSimHandler[T](actor, id) +} diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulatedProcess.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulatedProcess.scala new file mode 100644 index 00000000..78476e9f --- /dev/null +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulatedProcess.scala @@ -0,0 +1,35 @@ +package com.workflowfm.pew.simulator + +import akka.actor.{ ActorRef, Props } +import com.workflowfm.pew.{ MetadataAtomicProcess, PiInstance, PiMetadata, PiObject } +import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } +import com.workflowfm.pew.{ AtomicProcess, PiProcess } +import com.workflowfm.simulator.{ SimulatedProcess, Task, TaskGenerator } +import java.util.UUID +import scala.concurrent.{ ExecutionContext, Future } +import akka.pattern.ask +import akka.util.Timeout +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +trait PiSimulatedProcess extends AtomicProcess with SimulatedProcess { + override val iname = s"$simulationName.$name" + + // TODO We never actually wait for these asks, so there is still a chance the ordering will be messed up + // if the messages are delayed + def virtualWait() = (simulationActor ? PiSimulationActor.Waiting(iname))(Timeout(1, TimeUnit.DAYS)) + def virtualResume() = (simulationActor ? PiSimulationActor.Resuming(iname))(Timeout(1, TimeUnit.DAYS)) + + override def simulate[T]( + gen: TaskGenerator, + result: (Task, Long) => T, + resources: String* + )(implicit executionContext: ExecutionContext): Future[T] = { + val id = java.util.UUID.randomUUID + simulationActor ! PiSimulationActor.AddSource(id,iname) + val f = simulate(id,gen,result,resources:_*) + virtualWait() + f + } + +} diff --git a/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala new file mode 100644 index 00000000..ed38f1ee --- /dev/null +++ b/simulator/src/main/scala/com/workflowfm/pew/simulator/PiSimulation.scala @@ -0,0 +1,105 @@ +package com.workflowfm.pew.simulator + +import akka.actor.{ ActorRef, Props } +import com.workflowfm.pew.stream.{ PiEventHandler, PiEventHandlerFactory } +import com.workflowfm.pew.{ PiProcess, PiInstance } +import com.workflowfm.pew.execution.ProcessExecutor +import com.workflowfm.simulator.{ Coordinator, Task } +import com.workflowfm.simulator.{ SimulatedProcess, SimulationActor, TaskGenerator } +import java.util.UUID +import scala.collection.mutable.{ Map, Queue } +import scala.concurrent.{ ExecutionContext, Future } + + +abstract class PiSimulationActor[T] (override val name: String, override val coordinator: ActorRef) + (implicit executionContext: ExecutionContext) + extends SimulationActor(name, coordinator) { + + var executorIsReady = true + var waiting: Seq[String] = Seq[String]() + val taskWaiting: Queue[String] = Queue[String]() + val sources: Map[UUID,String] = Map() + + def rootProcess: PiProcess + def args: Seq[Any] + + def getProcesses(): Seq[PiProcess] = rootProcess :: rootProcess.allDependencies.toList + + def executor: ProcessExecutor[T] + val factory = new PiSimHandlerFactory[T](self) + + override def run(): Future[Any] = { + executorIsReady = false + executor.call(rootProcess, args, factory) flatMap (_.future) + } + + def readyCheck() = { + val q = taskWaiting.clone() + val check = waiting.forall { p => + q.dequeueFirst(_ == p) match { + case None => false + case Some(_) => true + } + } + //println(s"[${self.path.name}] Check: $executorIsReady && $waiting && $taskWaiting = $check == ${executorIsReady & check}") + if (executorIsReady && check) ready() + } + + def processWaiting(process: String) = { + //println(s"Process waiting: $process") + if (!waiting.contains(process)) executorIsReady=false + taskWaiting += process + readyCheck() + } + + def processResuming(process: String) = { + // println(s"Process resuming: $process") + if (!waiting.contains(process)) executorIsReady=false + taskWaiting.dequeueFirst(_ == process) + readyCheck() + } + + override def complete(task: Task, time: Long) = { + sources.get(task.id).map { p => + taskWaiting.dequeueFirst(_ == p) + } + super.complete(task,time) + } + + def executorReady(i: PiInstance[_]) = { + val procs = i.getCalledProcesses + if (procs.forall(_.isInstanceOf[PiSimulatedProcess])) { + waiting = procs map (_.iname) + executorIsReady = true + readyCheck() + } + } + + def executorBusy() = executorIsReady = false + + def piActorReceive: Receive = { + case PiSimulationActor.Waiting(p) => { + processWaiting(p) + sender() ! PiSimulationActor.Ack + } + + case PiSimulationActor.Resuming(p) => { + val actor = sender() + processResuming(p) + requestWait(actor) + } + case PiSimulationActor.AddSource(id,iname) => sources += id->iname + case PiSimulationActor.ExecutorBusy => executorBusy() + case PiSimulationActor.ExecutorReady(i) => executorReady(i) + } + + override def receive = simulationActorReceive orElse piActorReceive +} +object PiSimulationActor { + case class Waiting(process: String) + case class Resuming(process: String) + case object Ack + case class AddSource(id: UUID, iname: String) + case object ExecutorBusy + case class ExecutorReady(i: PiInstance[_]) +} diff --git a/skiexample/.project b/skiexample/.project deleted file mode 100644 index 50a850c1..00000000 --- a/skiexample/.project +++ /dev/null @@ -1,13 +0,0 @@ - - skiexample - - - org.scala-ide.sdt.core.scalabuilder - - - - org.scala-ide.sdt.core.scalanature - org.eclipse.jdt.core.javanature - - - \ No newline at end of file diff --git a/src/com/workflowfm/pew/PiEventHandler.scala b/src/com/workflowfm/pew/PiEvents.scala similarity index 80% rename from src/com/workflowfm/pew/PiEventHandler.scala rename to src/com/workflowfm/pew/PiEvents.scala index b9056de1..f641a0f0 100644 --- a/src/com/workflowfm/pew/PiEventHandler.scala +++ b/src/com/workflowfm/pew/PiEvents.scala @@ -78,6 +78,7 @@ object PiEvent { // PiInstance Level PiEvents case e: PiEventStart[KeyT] => e.copy( metadata = fn( e.metadata ) ) case e: PiEventResult[KeyT] => e.copy( metadata = fn( e.metadata ) ) + case e: PiEventIdle[KeyT] => e.copy( metadata = fn( e.metadata ) ) // PiInstance Level PiFailures case e: PiFailureNoResult[KeyT] => e.copy( metadata = fn( e.metadata ) ) @@ -138,6 +139,25 @@ case class PiEventResult[KeyT]( s" === [$id] RESULT: $res" } +/** Denotes the completion of all reductions and process calls. + * We are waiting for at least one process call to complete. + * This is useful in simulations so we know when to progress in virtual time. + * + * @note Not all executor implementations fire this event. + * @param i PiInstance representing the current state. + * @param metadata Metadata object. + * @tparam KeyT The type used to identify PiInstances. + */ +case class PiEventIdle[KeyT]( + i: PiInstance[KeyT], + override val metadata: PiMetadataMap = PiMetadata() + + ) extends PiEvent[KeyT] with PiEventFinish[KeyT] { + + override def id: KeyT = i.id + override def asString: String = s" === [$id] Idling... " +} + //////////////////////////////////////////// // PiInstance Level Exceptions & Failures // @@ -344,7 +364,7 @@ case class PiEventCall[KeyT]( * * @tparam KeyT The type used to identify PiInstances. */ -trait PiEventCallEnd[KeyT] extends PiAtomicProcessEvent[KeyT] +sealed trait PiEventCallEnd[KeyT] extends PiAtomicProcessEvent[KeyT] /** Denotes the successful completion of execution of a AtomicProcess. * @@ -408,113 +428,3 @@ object PiFailureAtomicProcessException { def apply[KeyT]( id: KeyT, ref: Int, ex: Throwable, metadata: PiMetadataMap = PiMetadata() ): PiFailureAtomicProcessException[KeyT] = PiFailureAtomicProcessException[KeyT]( id, ref, ex.getLocalizedMessage, ex.getStackTrace, metadata ) } - - -//////////////////// -// PiEventHanders // -//////////////////// - -// Return true if the handler is done and needs to be unsubscribed. - -trait PiEventHandler[KeyT] extends (PiEvent[KeyT]=>Boolean) { - def name:String - def and(h:PiEventHandler[KeyT]) = MultiPiEventHandler(this,h) -} - -trait PiEventHandlerFactory[T,H <: PiEventHandler[T]] { - def build(id:T):H -} - -class PrintEventHandler[T](override val name:String) extends PiEventHandler[T] { - val formatter = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss.SSS") - override def apply(e:PiEvent[T]) = { - val time = formatter.format(e.rawTime) - System.err.println("["+time+"]" + e.asString) - false - } -} - - -class PromiseHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { - val promise = Promise[Any]() - def future = promise.future - - // class PromiseException(message:String) extends Exception(message) - - override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { - case PiEventResult(i,res,_) => promise.success(res); true - case ex: PiFailure[T] => promise.failure( ex.exception ); true - case _ => false - } else false -} - -class PromiseHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,PromiseHandler[T]] { - def this(name:String) = this { _:T => name } - override def build(id:T) = new PromiseHandler[T](name(id),id) -} - - - -class CounterHandler[T](override val name:String, val id:T) extends PiEventHandler[T] { - private var counter:Int = 0 - def count = counter - val promise = Promise[Int]() - def future = promise.future - - override def apply(e:PiEvent[T]) = if (e.id == this.id) e match { - case PiEventResult(i,res,_) => counter += 1 ; promise.success(counter) ; true - case ex: PiFailure[T] => counter += 1; promise.success(counter) ; true - case _ => counter += 1 ; false - } else false -} - -class CounterHandlerFactory[T](name:T=>String) extends PiEventHandlerFactory[T,CounterHandler[T]] { - def this(name:String) = this { _:T => name } - override def build(id:T) = new CounterHandler[T](name(id),id) -} - - -case class MultiPiEventHandler[T](handlers:Queue[PiEventHandler[T]]) extends PiEventHandler[T] { - override def name = handlers map (_.name) mkString(",") - override def apply(e:PiEvent[T]) = handlers map (_(e)) forall (_ == true) - override def and(h:PiEventHandler[T]) = MultiPiEventHandler(handlers :+ h) -} - -object MultiPiEventHandler { - def apply[T](handlers:PiEventHandler[T]*):MultiPiEventHandler[T] = MultiPiEventHandler[T](Queue[PiEventHandler[T]]() ++ handlers) -} - - -trait PiObservable[T] { - def subscribe(handler:PiEventHandler[T]):Future[Boolean] - def unsubscribe(handlerName:String):Future[Boolean] -} - -trait SimplePiObservable[T] extends PiObservable[T] { - import collection.mutable.Map - - implicit val executionContext:ExecutionContext - - val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() - - override def subscribe(handler:PiEventHandler[T]):Future[Boolean] = Future { - //System.err.println("Subscribed: " + handler.name) - handlers += (handler.name -> handler) - true - } - - override def unsubscribe(handlerName:String):Future[Boolean] = Future { - handlers.remove(handlerName).isDefined - } - - def publish(evt:PiEvent[T]) = { - handlers.retain((k,v) => !v(evt)) - } -} - -trait DelegatedPiObservable[T] extends PiObservable[T] { - val worker: PiObservable[T] - - override def subscribe( handler: PiEventHandler[T] ): Future[Boolean] = worker.subscribe( handler ) - override def unsubscribe( handlerName: String ): Future[Boolean] = worker.unsubscribe( handlerName ) -} diff --git a/src/com/workflowfm/pew/PiInstance.scala b/src/com/workflowfm/pew/PiInstance.scala index bb42561e..57f2cfec 100644 --- a/src/com/workflowfm/pew/PiInstance.scala +++ b/src/com/workflowfm/pew/PiInstance.scala @@ -56,18 +56,11 @@ case class PiInstance[T](final val id:T, called:Seq[Int], process:PiProcess, sta } } } - - /** - * Should the simulator wait for the workflow? - */ - def simulationReady:Boolean = - if (completed) true // workflow is done - else { - val procs = state.threads flatMap { f => getProc(f._2.fun) } - if (procs.isEmpty) false // workflow is not completed, so we either couldn't find a process with getProc or - // calls have not been converted to threads yet (so no fullreduce) for whatever reason - else procs.forall(_.isSimulatedProcess) // are all open threads simulated processes? - } + + def getCalledProcesses: Seq[PiProcess] = state.threads flatMap { f => getProc(f._2.fun) } toSeq + + def updateProcs(m: Map[String,PiProcess]) = copy(state = state.updateProcs(m)) + def updateProcs(f: PiProcess => PiProcess) = copy(state = state.updateProcs(f)) } object PiInstance { def apply[T](id:T,p:PiProcess,args:PiObject*):PiInstance[T] = PiInstance(id, Seq(), p, p.execState(args)) @@ -85,14 +78,12 @@ trait PiInstanceStore[T] { def get(id:T):Option[PiInstance[T]] def put(i:PiInstance[T]):PiInstanceStore[T] def del(id:T):PiInstanceStore[T] - def simulationReady:Boolean } trait PiInstanceMutableStore[T] { def get(id:T):Option[PiInstance[T]] def put(i:PiInstance[T]):Unit def del(id:T):Unit - def simulationReady:Boolean } case class SimpleInstanceStore[T](m: Map[T,PiInstance[T]]) extends PiInstanceStore[T] { @@ -100,8 +91,7 @@ case class SimpleInstanceStore[T](m: Map[T,PiInstance[T]]) extends PiInstanceSto override def get(id: T): Option[PiInstance[T]] = m.get(id) override def put(i: PiInstance[T]): SimpleInstanceStore[T] = copy(m = m + (i.id->i)) override def del(id: T): SimpleInstanceStore[T] = copy(m = m - id) - - override def simulationReady: Boolean = m.values.forall(_.simulationReady) + /*{ m.values.foreach { i => { val procs = i.state.threads.values.map(_.fun).mkString(", ") diff --git a/src/com/workflowfm/pew/PiProcess.scala b/src/com/workflowfm/pew/PiProcess.scala index d99681d0..24b9b971 100644 --- a/src/com/workflowfm/pew/PiProcess.scala +++ b/src/com/workflowfm/pew/PiProcess.scala @@ -20,9 +20,9 @@ sealed trait PiProcess { def inputs:Seq[(PiObject,String)] // List of (type,channel) for each input. def channels:Seq[String] = output._2 +: (inputs map (_._2)) // order of channels is important for correct process calls! - def dependencies:Seq[PiProcess] // dependencies of composite processes + val dependencies:Seq[PiProcess] // dependencies of composite processes - def allDependencies:Seq[PiProcess] = PiProcess.allDependenciesOf(this) // all ancestors (i.e. including dependencies of dependencies + lazy val allDependencies:Seq[PiProcess] = PiProcess.allDependenciesOf(this) // all ancestors (i.e. including dependencies of dependencies /** * Initializes a PiState that executes this process with a given list of PiObject arguments. @@ -55,13 +55,6 @@ sealed trait PiProcess { * * Shortcut to create entries in name->PiProcess maps using the instance name */ def toIEntry:(String,PiProcess) = iname->this - - /** - * This is used to identify simulation processes that need (virtual) time to complete. - * If a process is not a simulation process, then the simulator needs to wait for it to complete before - * the next virtual tick. - */ - def isSimulatedProcess = false } object PiProcess { def allDependenciesOf(p:PiProcess):Seq[PiProcess] = @@ -99,7 +92,7 @@ trait AtomicProcess extends MetadataAtomicProcess { /** Implements the standard AtomicProcess interface for unsupporting ProcessExecutors. */ - final override def runMeta( args: Seq[PiObject] )( implicit ec: ExecutionContext ): Future[MetadataAtomicResult] + override def runMeta( args: Seq[PiObject] )( implicit ec: ExecutionContext ): Future[MetadataAtomicResult] = run( args ).map(MetadataAtomicProcess.result(_)) def run(args:Seq[PiObject])(implicit ec:ExecutionContext):Future[PiObject] diff --git a/src/com/workflowfm/pew/PiState.scala b/src/com/workflowfm/pew/PiState.scala index 6af99604..54d7ff59 100644 --- a/src/com/workflowfm/pew/PiState.scala +++ b/src/com/workflowfm/pew/PiState.scala @@ -47,6 +47,9 @@ case class PiState(inputs:Map[Chan,Input], outputs:Map[Chan,Output], calls:List[ def withProc(p:PiProcess) = copy(processes = processes + (p.name->p)) def withProcs(l:PiProcess*) = (this /: l)(_ withProc _) + def updateProcs(m: Map[String,PiProcess]) = copy(processes = m) + def updateProcs(f: PiProcess => PiProcess) = copy(processes = processes.mapValues(f)) + def withCalls(l:PiFuture*) = copy(calls = l.toList ++ calls) def withThread(ref:Int, name:String, chan:String, args:Seq[PiResource]) = withThreads((ref,PiFuture(name,Chan(chan),args))) diff --git a/src/com/workflowfm/pew/execution/AkkaExecutor.scala b/src/com/workflowfm/pew/execution/AkkaExecutor.scala index 5c0cec6f..bb79bc07 100644 --- a/src/com/workflowfm/pew/execution/AkkaExecutor.scala +++ b/src/com/workflowfm/pew/execution/AkkaExecutor.scala @@ -1,213 +1,191 @@ package com.workflowfm.pew.execution import akka.actor._ +import akka.event.LoggingReceive import akka.pattern.{ask, pipe} import akka.util.Timeout import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, PiStream, PiEventHandler, PiSwitch } import scala.concurrent._ import scala.concurrent.duration._ +import scala.reflect.ClassTag import scala.util.{Failure, Success} +import java.util.UUID class AkkaExecutor ( - store:PiInstanceStore[Int], - processes:PiProcessStore + store:PiInstanceStore[UUID] )( implicit val system: ActorSystem, - override implicit val executionContext: ExecutionContext = ExecutionContext.global, - implicit val timeout:FiniteDuration = 10.seconds -) extends SimulatorExecutor[Int] with PiObservable[Int] { + implicit val timeout: FiniteDuration +) extends ProcessExecutor[UUID] with PiObservable[UUID] { - def this(store:PiInstanceStore[Int], l:PiProcess*) - (implicit system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration) = - this(store,SimpleProcessStore(l :_*)) + implicit val tag: ClassTag[UUID] = ClassTag(classOf[UUID]) + override implicit val executionContext: ExecutionContext = system.dispatcher - def this(system: ActorSystem, context: ExecutionContext, timeout:FiniteDuration,l:PiProcess*) = - this(SimpleInstanceStore[Int](),SimpleProcessStore(l :_*))(system,context,timeout) + def this()(implicit system: ActorSystem, timeout: FiniteDuration = 10.seconds) = + this(SimpleInstanceStore[UUID]())(system,timeout) - def this(l:PiProcess*)(implicit system: ActorSystem) = - this(SimpleInstanceStore[Int](),SimpleProcessStore(l :_*))(system,ExecutionContext.global,10.seconds) + val execActor = system.actorOf(AkkaExecutor.execprops(store)) + implicit val tOut = Timeout(timeout) + override protected def init(instance: PiInstance[_]): Future[UUID] = + execActor ? AkkaExecutor.Init(instance) map (_.asInstanceOf[UUID]) - val execActor = system.actorOf(AkkaExecutor.execprops(store,processes)) - implicit val tOut = Timeout(timeout) - - override def simulationReady = Await.result(execActor ? AkkaExecutor.SimReady,timeout).asInstanceOf[Boolean] - - override protected def init(process:PiProcess,args:Seq[PiObject]):Future[Int] = - execActor ? AkkaExecutor.Init(process,args) map (_.asInstanceOf[Int]) - - override protected def start(id:Int) = execActor ! AkkaExecutor.Start(id) + override protected def start(id: UUID) = execActor ! AkkaExecutor.Start(id) - override def subscribe(handler:PiEventHandler[Int]):Future[Boolean] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[Boolean] + override def subscribe(handler: PiEventHandler[UUID]): Future[PiSwitch] = (execActor ? AkkaExecutor.Subscribe(handler)).mapTo[PiSwitch] - override def unsubscribe(name:String):Future[Boolean] = (execActor ? AkkaExecutor.Unsubscribe(name)).mapTo[Boolean] } object AkkaExecutor { - case class Init(p:PiProcess,args:Seq[PiObject]) - case class Start(id:Int) - case class Result(id:Int,ref:Int,res:MetadataAtomicProcess.MetadataAtomicResult) - case class Error(id:Int,ref:Int,ex:Throwable) + case class Init(instance: PiInstance[_]) + case class Start(id:UUID) + case class Result(id:UUID,ref:Int,res:MetadataAtomicProcess.MetadataAtomicResult) + case class Error(id:UUID,ref:Int,ex:Throwable) - case class ACall(id:Int,ref:Int,p:MetadataAtomicProcess,args:Seq[PiObject],actor:ActorRef) + case class ACall(id:UUID,ref:Int,p:MetadataAtomicProcess,args:Seq[PiObject],actor:ActorRef) case object AckCall case class AFuture(f:Future[Any]) case object Ping - case object SimReady - case class Subscribe(handler:PiEventHandler[Int]) - case class Unsubscribe(name:String) + case class Subscribe(handler:PiEventHandler[UUID]) - def atomicprops(implicit context: ExecutionContext = ExecutionContext.global): Props = Props(new AkkaAtomicProcessExecutor()) - def execprops(store:PiInstanceStore[Int], processes:PiProcessStore)(implicit system: ActorSystem, exc: ExecutionContext): Props = Props(new AkkaExecActor(store,processes)) + def execprops(store: PiInstanceStore[UUID]) + (implicit system: ActorSystem, timeout: FiniteDuration): Props = Props( + new AkkaExecActor(store)(system.dispatcher, implicitly[ClassTag[PiEvent[UUID]]], timeout) + ) } class AkkaExecActor( - var store:PiInstanceStore[Int], - processes:PiProcessStore + var store:PiInstanceStore[UUID], )( - implicit system: ActorSystem, - override implicit val executionContext: ExecutionContext = ExecutionContext.global -) extends Actor with SimplePiObservable[Int] { - - var ctr:Int = 0 - - def init(p:PiProcess,args:Seq[PiObject]):Int = { - val inst = PiInstance(ctr,p,args:_*) - store = store.put(inst) - ctr = ctr + 1 - ctr-1 + implicit val executionContext: ExecutionContext, + override implicit val tag: ClassTag[PiEvent[UUID]], + override implicit val timeout: FiniteDuration +) extends Actor with PiStream[UUID] { + + def init(instance: PiInstance[_]): UUID = { + val id = java.util.UUID.randomUUID + store = store.put(instance.copy(id = id)) + id } - def start(id:Int):Unit = store.get(id) match { + def start(id:UUID):Unit = store.get(id) match { case None => publish(PiFailureNoSuchInstance(id)) case Some(inst) => { publish(PiEventStart(inst)) val ni = inst.reduce if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(id) - } + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(id) + } } else { - val (toCall,resi) = ni.handleThreads(handleThread(ni)) - val futureCalls = toCall flatMap (resi.piFutureOf) - //System.err.println("*** [" + ctr + "] Updating state after init") - store = store.put(resi) - (toCall zip futureCalls) map runThread(resi) + val (toCall,resi) = ni.handleThreads(handleThread(ni)) + val futureCalls = toCall flatMap (resi.piFutureOf) + //System.err.println("*** [" + ctr + "] Updating state after init") + store = store.put(resi) + (toCall zip futureCalls) map runThread(resi) + publish(PiEventIdle(resi)) } } } - final def postResult(id:Int,ref:Int, res:MetadataAtomicProcess.MetadataAtomicResult):Unit = { + final def postResult(id:UUID, ref:Int, res:MetadataAtomicProcess.MetadataAtomicResult):Unit = { publish(PiEventReturn(id,ref,PiObject.get(res._1),res._2)) store.get(id) match { case None => publish(PiFailureNoSuchInstance(id)) - case Some(i) => + case Some(i) => if (i.id != id) System.err.println("*** [" + id + "] Different instance ID encountered: " + i.id) // This should never happen. We trust the Instance Store! else { //System.err.println("*** [" + id + "] Running!") val ni = i.postResult(ref, res._1).reduce - if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(ni.id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(ni.id) - } - } else { - val (toCall,resi) = ni.handleThreads(handleThread(ni)) - val futureCalls = toCall flatMap (resi.piFutureOf) - //System.err.println("*** [" + i.id + "] Updating state after: " + ref) - store = store.put(resi) - (toCall zip futureCalls) map runThread(resi) - } + if (ni.completed) ni.result match { + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(ni.id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(ni.id) + } + } else { + val (toCall,resi) = ni.handleThreads(handleThread(ni)) + val futureCalls = toCall flatMap (resi.piFutureOf) + //System.err.println("*** [" + i.id + "] Updating state after: " + ref) + store = store.put(resi) + (toCall zip futureCalls) map runThread(resi) + publish(PiEventIdle(resi)) + } } - } + } } - - def handleThread(i:PiInstance[Int])(ref:Int,f:PiFuture):Boolean = { + + def handleThread(i:PiInstance[UUID])(ref:Int,f:PiFuture):Boolean = { //System.err.println("*** [" + id + "] Checking thread: " + ref + " (" + f.fun + ")") f match { - case PiFuture(name, outChan, args) => i.getProc(name) match { - case None => { - publish(PiFailureUnknownProcess(i, name)) - false - } - case Some(p:MetadataAtomicProcess) => true - case Some(p:CompositeProcess) => { // TODO this should never happen! - publish(PiFailureAtomicProcessIsComposite(i, name)) - false + case PiFuture(name, outChan, args) => i.getProc(name) match { + case None => { + publish(PiFailureUnknownProcess(i, name)) + false + } + case Some(p:MetadataAtomicProcess) => true + case Some(p:CompositeProcess) => { // TODO this should never happen! + publish(PiFailureAtomicProcessIsComposite(i, name)) + false + } } - } - } } - // + } } + // - def runThread(i:PiInstance[Int])(t:(Int,PiFuture)):Unit = { + def runThread(i:PiInstance[UUID])(t:(Int,PiFuture)):Unit = { //System.err.println("*** [" + id + "] Running thread: " + t._1 + " (" + t._2.fun + ")") t match { - case (ref,PiFuture(name, outChan, args)) => i.getProc(name) match { - case None => { - // This should never happen! We already checked! - System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name + " even though we checked already") - } - case Some(p:MetadataAtomicProcess) => { - implicit val tOut = Timeout(1.second) - val objs = args map (_.obj) - try { - publish(PiEventCall(i.id,ref,p,objs)) - // TODO Change from ! to ? to require an acknowledgement - system.actorOf(AkkaExecutor.atomicprops()) ! AkkaExecutor.ACall(i.id,ref,p,objs,self) - } catch { - case _:Throwable => Unit //TODO specify timeout exception here! - also print a warning + case (ref,PiFuture(name, outChan, args)) => i.getProc(name) match { + case None => { + // This should never happen! We already checked! + System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name + " even though we checked already") + } + case Some(p:MetadataAtomicProcess) => { + implicit val tOut = Timeout(1.second) + val objs = args map (_.obj) + try { + publish(PiEventCall(i.id,ref,p,objs)) + p.runMeta(objs).onComplete{ + case Success(res) => self ! AkkaExecutor.Result(i.id,ref,res) + case Failure(ex) => self ! AkkaExecutor.Error(i.id,ref,ex) + } + } catch { + case _:Throwable => Unit //TODO specify timeout exception here! - also print a warning + } + } + case Some(p:CompositeProcess) => {// This should never happen! We already checked! + publish(PiFailureAtomicProcessIsComposite(i, name)) } } - case Some(p:CompositeProcess) => {// This should never happen! We already checked! - publish(PiFailureAtomicProcessIsComposite(i, name)) - } - } - } } - - def simulationReady():Boolean = store.simulationReady - - def receive = { - case AkkaExecutor.Init(p,args) => sender() ! init(p,args) + } } + + def akkaReceive: Receive = { + case AkkaExecutor.Init(inst) => sender() ! init(inst) case AkkaExecutor.Start(id) => start(id) - case AkkaExecutor.Result(id,ref,res) => postResult(id,ref,res) + case AkkaExecutor.Result(id,ref,res) => postResult(id,ref,res) case AkkaExecutor.Error(id,ref,ex) => { publish( PiFailureAtomicProcessException(id,ref,ex) ) store = store.del(id) } case AkkaExecutor.Ping => sender() ! AkkaExecutor.Ping case AkkaExecutor.AckCall => Unit - case AkkaExecutor.SimReady => sender() ! simulationReady() case AkkaExecutor.Subscribe(h) => subscribe(h) pipeTo sender() - case AkkaExecutor.Unsubscribe(name) => unsubscribe(name) pipeTo sender() case m => System.err.println("!!! Received unknown message: " + m) } -} - -class AkkaAtomicProcessExecutor(implicit val exc: ExecutionContext = ExecutionContext.global) extends Actor { //(executor:ActorRef,p:PiProcess,args:Seq[PiObject]) - def receive = { - case AkkaExecutor.ACall(id,ref,p,args,actor) => { - //System.err.println("*** [" + id + "] Calling atomic process: " + p.name + " ref:" + ref) - p.runMeta(args).onComplete{ - case Success(res) => actor ! AkkaExecutor.Result(id,ref,res) - case Failure(ex) => actor ! AkkaExecutor.Error(id,ref,ex) - } - actor ! AkkaExecutor.AckCall - } - case m => System.err.println("!! Received unknown message: " + m) - } + override def receive = LoggingReceive { publisherBehaviour orElse akkaReceive } } diff --git a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala index f5a3c244..d3ae1e8d 100644 --- a/src/com/workflowfm/pew/execution/MultiStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/MultiStateExecutor.scala @@ -1,108 +1,105 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.SimplePiObservable import scala.concurrent._ import scala.util.{Failure, Success} /** - * Executes any PiProcess asynchronously. - * Only holds a single state, so can only execute one workflow at a time. - * - * Running a second workflow after one has finished executing can be risky because - * promises/futures from the first workflow can trigger changes on the state! - */ + * Executes any PiProcess asynchronously. + * Only holds a single state, so can only execute one workflow at a time. + * + * Running a second workflow after one has finished executing can be risky because + * promises/futures from the first workflow can trigger changes on the state! + */ -class MultiStateExecutor(var store:PiInstanceStore[Int], processes:PiProcessStore) +class MultiStateExecutor(var store: PiInstanceStore[Int]) (override implicit val executionContext: ExecutionContext = ExecutionContext.global) - extends SimulatorExecutor[Int] with SimplePiObservable[Int] { + extends ProcessExecutor[Int] with SimplePiObservable[Int] { - def this(store:PiInstanceStore[Int], l:PiProcess*) = this(store,SimpleProcessStore(l :_*)) - def this(l:PiProcess*) = this(SimpleInstanceStore[Int](),SimpleProcessStore(l :_*)) + def this() = this(SimpleInstanceStore[Int]()) var ctr:Int = 0 - - override protected def init(p:PiProcess,args:Seq[PiObject]) = store.synchronized { - val inst = PiInstance(ctr,p,args:_*) - store = store.put(inst) - ctr = ctr + 1 - Future.successful(ctr-1) + + override protected def init(instance: PiInstance[_]): Future[Int] = store.synchronized { + store = store.put(instance.copy(id = ctr)) + ctr = ctr + 1 + Future.successful(ctr-1) } - override def start(id:Int):Unit = store.get(id) match { + override def start(id: Int): Unit = store.get(id) match { case None => publish(PiFailureNoSuchInstance(id)) case Some(inst) => { publish(PiEventStart(inst)) val ni = inst.reduce if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(id) - } + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(id) + } } else { - val (_,resi) = ni.handleThreads(handleThread(ni)) - store = store.put(resi) + val (_,resi) = ni.handleThreads(handleThread(ni)) + store = store.put(resi) } } } - - final def run(id:Int,f:PiInstance[Int]=>PiInstance[Int]):Unit = store.synchronized { - store.get(id) match { + + final def run(id: Int, f: PiInstance[Int]=>PiInstance[Int]): Unit = store.synchronized { + store.get(id) match { case None => System.err.println("*** [" + id + "] No running instance! ***") - case Some(i) => + case Some(i) => if (i.id != id) System.err.println("*** [" + id + "] Different instance ID encountered: " + i.id) else { System.err.println("*** [" + id + "] Running!") val ni = f(i).reduce - if (ni.completed) ni.result match { - case None => { - publish(PiFailureNoResult(ni)) - store = store.del(ni.id) - } - case Some(res) => { - publish(PiEventResult(ni, res)) - store = store.del(ni.id) - } - } else { - store = store.put(ni.handleThreads(handleThread(ni))._2) - } + if (ni.completed) ni.result match { + case None => { + publish(PiFailureNoResult(ni)) + store = store.del(ni.id) + } + case Some(res) => { + publish(PiEventResult(ni, res)) + store = store.del(ni.id) + } + } else { + store = store.put(ni.handleThreads(handleThread(ni))._2) + } } - } + } } - - def handleThread(i:PiInstance[Int])(ref:Int,f:PiFuture):Boolean = { - System.err.println("*** [" + i.id + "] Handling thread: " + ref + " (" + f.fun + ")") + + def handleThread(i: PiInstance[Int])(ref: Int,f: PiFuture): Boolean = { + System.err.println("*** [" + i.id + "] Handling thread: " + ref + " (" + f.fun + ")") f match { - case PiFuture(name, outChan, args) => i.getProc(name) match { - case None => { - System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name) - false - } - case Some(p:MetadataAtomicProcess) => { - val objs = args map (_.obj) - publish(PiEventCall(i.id,ref,p,objs)) - p.runMeta(args map (_.obj)).onComplete{ - case Success(res) => { - publish(PiEventReturn(i.id,ref,PiObject.get(res._1),res._2)) - postResult(i.id,ref,res._1) + case PiFuture(name, outChan, args) => i.getProc(name) match { + case None => { + System.err.println("*** [" + i.id + "] ERROR *** Unable to find process: " + name) + false + } + case Some(p: MetadataAtomicProcess) => { + val objs = args map (_.obj) + publish(PiEventCall(i.id,ref,p,objs)) + p.runMeta(args map (_.obj)).onComplete{ + case Success(res) => { + publish(PiEventReturn(i.id,ref,PiObject.get(res._1),res._2)) + postResult(i.id,ref,res._1) + } + case Failure (ex) => publish(PiFailureAtomicProcessException(i.id,ref,ex)) } - case Failure (ex) => publish(PiFailureAtomicProcessException(i.id,ref,ex)) + System.err.println("*** [" + i.id + "] Called process: " + p.name + " ref:" + ref) + true } - System.err.println("*** [" + i.id + "] Called process: " + p.name + " ref:" + ref) - true + case Some(p: CompositeProcess) => { System.err.println("*** [" + i.id + "] Executor encountered composite process thread: " + name); false } // TODO this should never happen! } - case Some(p:CompositeProcess) => { System.err.println("*** [" + i.id + "] Executor encountered composite process thread: " + name); false } // TODO this should never happen! - } - } } - - def postResult(id:Int,ref:Int, res:PiObject):Unit = { + } } + + def postResult(id: Int, ref: Int, res: PiObject): Unit = { System.err.println("*** [" + id + "] Received result for thread " + ref + " : " + res) run(id,{x => x.postResult(ref, res)}) } - - override def simulationReady:Boolean = store.simulationReady } diff --git a/src/com/workflowfm/pew/execution/ProcessExecutor.scala b/src/com/workflowfm/pew/execution/ProcessExecutor.scala index 835688f9..99244a59 100644 --- a/src/com/workflowfm/pew/execution/ProcessExecutor.scala +++ b/src/com/workflowfm/pew/execution/ProcessExecutor.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, PiEventHandler, PiEventHandlerFactory, ResultHandlerFactory } import scala.concurrent._ import scala.concurrent.duration._ @@ -27,16 +28,27 @@ case class AtomicProcessExecutor(process:AtomicProcess) { /** * Trait representing the ability to execute any PiProcess */ -trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => +trait ProcessExecutor[KeyT] { this: PiObservable[KeyT] => /** - * Initializes a PiInstance for a process execution. + * Initializes a PiProcess call for a process execution. * This is always and only invoked before a {@code start}, hence why it is protected. * This separation gives a chance to PiEventHandlers to subscribe before execution starts. * @param process The (atomic or composite) PiProcess to be executed * @param args The PiObject arguments to be passed to the process * @return A Future with the new unique ID that was generated */ - protected def init(process:PiProcess,args:Seq[PiObject]):Future[KeyT] + protected def init(process: PiProcess, args: Seq[PiObject]): Future[KeyT] = init(PiInstance(0, process, args: _*)) + + /** + * Initializes a PiInstance for a process execution. + * A new ID will be generated for the PiInstance to ensure freshness. + * This is always and only invoked before a {@code start}, hence why it is protected. + * This separation gives a chance to PiEventHandlers to subscribe before execution starts. + * @param instance The PiInstance to be executed + * @return A Future with the new unique ID that was generated + */ + protected def init(instance: PiInstance[_]): Future[KeyT] + /** * Starts the execution of an initialized PiInstance. @@ -54,10 +66,20 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => * @param args The (real) arguments to be passed to the process * @return A Future with the ID corresponding to this execution */ - def call(process:PiProcess,args:Seq[Any]):Future[KeyT] = { + def call(process: PiProcess, args: Seq[Any]): Future[KeyT] = { init(process,args map PiObject.apply) map { id => start(id) ; id } } + /** + * A simple {@code init ; start} sequence when we do not need any even listeners. + * A new ID will be generated for the PiInstance to ensure freshness. + * @param instance The PiInstance to be executed + * @return A Future with the ID corresponding to this execution + */ + def call(instance: PiInstance[_]): Future[KeyT] = { + init(instance) map { id => start(id) ; id } + } + /** * A {@code init ; start} sequence that gives us a chance to subscribe a listener * that is specific to this execution. @@ -66,7 +88,11 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => * @param factory A PiEventHandlerFactory which generates PiEventHandler's for a given ID * @return A Future with the PiEventHandler that was generated */ - def call[H <: PiEventHandler[KeyT]](process:PiProcess,args:Seq[Any],factory:PiEventHandlerFactory[KeyT,H]):Future[H] = { + def call[H <: PiEventHandler[KeyT]] ( + process: PiProcess, + args: Seq[Any], + factory: PiEventHandlerFactory[KeyT,H] + ): Future[H] = { init(process,args map PiObject.apply) flatMap { id => val handler = factory.build(id) subscribe(handler).map { _ => @@ -77,53 +103,46 @@ trait ProcessExecutor[KeyT] { this:PiObservable[KeyT] => } /** - * Executes a process with a PromiseHandler - * @param process The (atomic or composite) PiProcess to be executed - * @param args The (real) arguments to be passed to the process - * @return A Future with the result of the executed process + * A {@code init ; start} sequence that gives us a chance to subscribe a listener + * that is specific to this execution. + * A new ID will be generated for the PiInstance to ensure freshness. + * @param instance The PiInstance to be executed + * @param factory A PiEventHandlerFactory which generates PiEventHandler's for a given ID + * @return A Future with the PiEventHandler that was generated */ - def execute(process:PiProcess,args:Seq[Any]):Future[Any] = - call(process,args,new PromiseHandlerFactory[KeyT]({ id => s"[$id]"})) flatMap (_.future) -} - -object ProcessExecutor { - // def default = SingleBlockingExecutor(Map[String,PiProcess]()) - - final case class AlreadyExecutingException(private val cause: Throwable = None.orNull) - extends Exception("Unable to execute more than one process at a time", cause) - - /* - final case class UnknownProcessException(val process:String, private val cause: Throwable = None.orNull) - extends Exception("Unknown process: " + process, cause) - final case class AtomicProcessIsCompositeException(val process:String, private val cause: Throwable = None.orNull) - extends Exception("Executor encountered composite process thread: " + process + " (this should never happen!)", cause) - final case class NoResultException(val id:String, private val cause: Throwable = None.orNull) - extends Exception("Failed to get result for: " + id, cause) - final case class NoSuchInstanceException(val id:String, private val cause: Throwable = None.orNull) - extends Exception("Failed to find instance with id: " + id, cause) - */ -} + def call[H <: PiEventHandler[KeyT]] + (instance: PiInstance[_], + factory: PiEventHandlerFactory[KeyT,H] + ): Future[H] = { + init(instance) flatMap { id => + val handler = factory.build(id) + subscribe(handler).map { _ => + start(id) + handler + } + } + } -trait SimulatorExecutor[KeyT] extends ProcessExecutor[KeyT] { this:PiObservable[KeyT] => /** - * This should check all executing PiInstances if they are simulationReady. - * This means that all possible execution has been performed and they are all - * waiting for simulation time to pass. - * @return true if all PiInstances are simulationReady + * Executes a process with a PromiseHandler + * @param instance The PiInstance to be executed + * @return A Future with the result of the executed process */ - def simulationReady:Boolean + def execute(process: PiProcess, args: Seq[Any]): Future[Any] = + call(process, args, new ResultHandlerFactory[KeyT]) flatMap (_.future) /** - * Executes a process with a PromiseHandler - * Same as ProcessExecutor.execute but blocks until call has been initiated. - * The simulator needs to ensure this has happened before continuing. + * Executes a PiInstance with a PromiseHandler + * A new ID will be generated for the PiInstance to ensure freshness. * @param process The (atomic or composite) PiProcess to be executed * @param args The (real) arguments to be passed to the process * @return A Future with the result of the executed process */ - def simulate(process:PiProcess,args:Seq[Any],timeout:FiniteDuration=10.seconds):Future[Any] = { - val f = call(process,args,new PromiseHandlerFactory[KeyT]({ id => s"[$id]"})) - val handler = Await.result(f, timeout) - handler.future - } + def execute(instance: PiInstance[_]): Future[Any] = + call(instance, new ResultHandlerFactory[KeyT]) flatMap (_.future) +} + +object ProcessExecutor { + final case class AlreadyExecutingException(private val cause: Throwable = None.orNull) + extends Exception("Unable to execute more than one process at a time", cause) } diff --git a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala index 55ffb7ba..595f0d12 100644 --- a/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleBlockingExecutor.scala @@ -1,6 +1,8 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.SimplePiObservable + import scala.concurrent._ import scala.concurrent.duration.Duration import scala.annotation.tailrec @@ -9,8 +11,8 @@ import scala.annotation.tailrec * SingleBlockingExecutor fully executes one PiProcess from a map of given PiProcesses. * It blocks waiting for every atomic call to finish, so has no concurrency. */ -case class SingleBlockingExecutor(processes:Map[String,PiProcess])(implicit val context:ExecutionContext) { // extends ProcessExecutor[Int] with SimplePiObservable[Int] { - def call(process:PiProcess,args:Seq[PiObject]) = { +case class SingleBlockingExecutor(implicit val context:ExecutionContext) { // extends ProcessExecutor[Int] with SimplePiObservable[Int] { + def call(process:PiProcess, args:Seq[PiObject]) = { val s = process.execState(args) System.err.println(" === INITIAL STATE === \n" + s + "\n === === === === === === === ===") val fs = run(s) @@ -21,18 +23,18 @@ case class SingleBlockingExecutor(processes:Map[String,PiProcess])(implicit val } @tailrec - final def run(s:PiState):PiState = { + final def run(s: PiState): PiState = { val ns = s.fullReduce() if (ns.threads isEmpty) ns else run((ns /: ns.threads)(handleThread)) } - def handleThread(s:PiState, x:(Int,PiFuture)):PiState = x match { case (ref,f) => + def handleThread(s: PiState, x: (Int,PiFuture)): PiState = x match { case (ref,f) => handleThread(ref,f,s) getOrElse s } def handleThread(ref:Int, f:PiFuture, s:PiState):Option[PiState] = f match { - case PiFuture(name, outChan, args) => processes get name match { + case PiFuture(name, outChan, args) => s.processes.get(name) match { case None => { System.err.println("*** ERROR *** Unable to find process: " + name) Some(s removeThread ref) @@ -46,14 +48,6 @@ case class SingleBlockingExecutor(processes:Map[String,PiProcess])(implicit val } } - def withProc(p:PiProcess):SingleBlockingExecutor = copy(processes = processes + (p.name->p)) withProcs (p.dependencies :_*) - def withProcs(l:PiProcess*):SingleBlockingExecutor = (this /: l)(_ withProc _) - - //override def simulationReady:Boolean = true - def execute(process:PiProcess,args:Seq[Any]):Option[Any] = - this withProc process call(process,args map PiObject.apply) -} -object SingleBlockingExecutor { - def apply()(implicit context:ExecutionContext):SingleBlockingExecutor = SingleBlockingExecutor(Map[String,PiProcess]())(context) + call(process,args map PiObject.apply) } diff --git a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala index c81f766f..eb5fba7d 100644 --- a/src/com/workflowfm/pew/execution/SingleStateExecutor.scala +++ b/src/com/workflowfm/pew/execution/SingleStateExecutor.scala @@ -1,6 +1,7 @@ package com.workflowfm.pew.execution import com.workflowfm.pew._ +import com.workflowfm.pew.stream.SimplePiObservable import scala.concurrent._ import scala.util.{Failure, Success} @@ -13,20 +14,16 @@ import scala.util.{Failure, Success} * promises/futures from the first workflow can trigger changes on the state! */ -class SingleStateExecutor(processes:PiProcessStore) - (override implicit val executionContext: ExecutionContext = ExecutionContext.global) +class SingleStateExecutor(override implicit val executionContext: ExecutionContext = ExecutionContext.global) extends ProcessExecutor[Int] with SimplePiObservable[Int] { - - def this(l:PiProcess*) = this(SimpleProcessStore(l :_*)) var ctr:Int = 0 var instance:Option[PiInstance[Int]] = None - - override protected def init(p:PiProcess,args:Seq[PiObject]):Future[Int] = Future { - if (instance.isDefined) throw new ProcessExecutor.AlreadyExecutingException() + + override protected def init(instance: PiInstance[_]): Future[Int] = Future { + if (this.instance.isDefined) throw new ProcessExecutor.AlreadyExecutingException() else { - val inst = PiInstance(ctr,p,args:_*) - instance = Some(inst) + this.instance = Some(instance.copy(id = ctr)) ctr = ctr + 1 ctr - 1 } @@ -107,10 +104,5 @@ class SingleStateExecutor(processes:PiProcessStore) } } } - - def simulationReady:Boolean = instance match { - case None => true - case Some(i) => i.simulationReady - } } diff --git a/src/com/workflowfm/pew/metrics/Measure.scala b/src/com/workflowfm/pew/metrics/Measure.scala index 1d9288ec..d30ec085 100644 --- a/src/com/workflowfm/pew/metrics/Measure.scala +++ b/src/com/workflowfm/pew/metrics/Measure.scala @@ -1,24 +1,54 @@ package com.workflowfm.pew.metrics import com.workflowfm.pew._ +import com.workflowfm.pew.stream.PiEventHandler +/** Metrics for a particular instance of an atomic process. + * + * @constructor initialize the metrics of a process for a workflow with ID `piID`, unique call reference `ref`, and process name `process`, starting now + * + * @tparam KeyT the type used for workflow IDs + * @param piID the ID of the workflow that executed this process + * @param ref the call reference for that process, i.e. a unique call ID for this particular workflow + * @param process the name of the process + * @param start the system time in milliseconds when the process call started + * @param finish the system time in milliseconds that the process call finished, or [[scala.None]] if it is still running + * @param result a `String` representation of the returned result from the process call, or [[scala.None]] if it still running. In case of failure, the field is populated with the localized message of the exception thrown + */ case class ProcessMetrics[KeyT] (piID:KeyT, ref:Int, process:String, start:Long=System.currentTimeMillis(), finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) } +/** Metrics for a particular workflow. + * + * @constructor initialize the metrics of a workflow with ID `piID` starting now + * + * @tparam KeyT the type used for workflow IDs + * @param piID the unique ID of the workflow. + * @param start the system time in milliseconds when the workflow started executing + * @param calls the number of calls to atomic processes + * @param finish the system time in milliseconds that this workflow finished, or [[scala.None]] if it is still running + * @param result a `String` representation of the returned result from the workflow, or [[scala.None]] if it still running. In case of failure, the field is populated with the localized message of the exception thrown + */ case class WorkflowMetrics[KeyT] (piID:KeyT, start:Long=System.currentTimeMillis(), calls:Int=0, finish:Option[Long]=None, result:Option[String]=None) { def complete(time:Long,result:Any) = copy(finish=Some(time),result=Some(result.toString)) def call = copy(calls=calls+1) } +/** Collects/aggregates metrics across multiple process calls and workflow executions */ class MetricsAggregator[KeyT] { import scala.collection.immutable.Map - + + /** Process metrics indexed by workflow ID, then by call reference ID */ val processMap = scala.collection.mutable.Map[KeyT,Map[Int,ProcessMetrics[KeyT]]]() + /** Workflow metrics indexed by workflow ID */ val workflowMap = scala.collection.mutable.Map[KeyT,WorkflowMetrics[KeyT]]() // Set - + + /** Adds a new [[ProcessMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same IDs + */ def +=(m:ProcessMetrics[KeyT]) = { val old = processMap.get(m.piID) match { case None => Map[Int,ProcessMetrics[KeyT]]() @@ -26,48 +56,146 @@ class MetricsAggregator[KeyT] { } processMap += (m.piID -> (old + (m.ref -> m))) } + /** Adds a new [[WorkflowMetrics]] instance, taking care of indexing automatically + * Overwrites a previous instance with the same ID + */ def +=(m:WorkflowMetrics[KeyT]) = workflowMap += (m.piID->m) // Update - + + /** Updates a [[ProcessMetrics]] instance. + * + * @return the updated [[processMap]] or [[scala.None]] if the identified instance does not exist + * + * @param piID the workflow ID of the process + * @param ref the call reference of the process + * @param u a function to update the [[ProcessMetrics]] instance + * + * @example Assume function that updates the name of a process in [[ProcessMetrics]] to `"x"`: + * {{{ def myUpdate(p: ProcessMetrics[KeyT]): ProcessMetrics[KeyT] = p.copy(process="x") }}} + * We can update the metrics of process with workflow ID `5` and call reference `2` as follows: + * {{{ metricsAggregator ^ (5,2,myUpdate) }}} + * + */ def ^(piID:KeyT,ref:Int,u:ProcessMetrics[KeyT]=>ProcessMetrics[KeyT]) = processMap.get(piID).flatMap(_.get(ref)).map { m => this += u(m) } + + /** Updates a [[WorkflowMetrics]] instance + * + * @return the updated [[processMap]] or [[scala.None]] if the identified instance does not exist + * + * @param piID the workflow ID of the process + * @param u a function to update the [[ProcessMetrics]] instance + * + * @example Assume function that increases the number of calls [[WorkflowMetrics]] to `"x"`: + * {{{ def myUpdate(w: WorkflowMetrics[KeyT]): WorkflowMetrics[KeyT] = w.copy(calls=calls+1) }}} + * We can update the metrics of workflow with ID `5` as follows: + * {{{ metricsAggregator ^ (5,myUpdate) }}} + * + */ def ^(piID:KeyT,u:WorkflowMetrics[KeyT]=>WorkflowMetrics[KeyT]) = workflowMap.get(piID).map { m => this += u(m) } - + // Handle events - + + /** Handles the event of a workflow starting, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param time the timestamp to be recorded as the workflow start + */ def workflowStart(piID:KeyT, time:Long=System.currentTimeMillis()):Unit = this += WorkflowMetrics(piID,time) + + /** Handles the event of a workflow finishing successfully, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param result the return value of the workflow + * @param time the timestamp to be recorded as the workflow finish + */ def workflowResult(piID:KeyT, result:Any, time:Long=System.currentTimeMillis()):Unit = this ^ (piID,_.complete(time,result)) + + /** Handles the event of a workflow failing with an exception, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ex the thrown exception (or other [[scala.Throwable]]) + * @param time the timestamp to be recorded as the workflow finish + */ def workflowException(piID:KeyT, ex:Throwable, time:Long=System.currentTimeMillis()):Unit = this ^ (piID,_.complete(time,"Exception: " + ex.getLocalizedMessage)) - + + /** Handles the event of an atomic process call, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param process the name of the atomic process being called + * @param time the timestamp to be recorded as the process start + */ def procCall(piID:KeyT, ref:Int, process:String, time:Long=System.currentTimeMillis()):Unit = { this += ProcessMetrics(piID,ref,process,time) this ^ (piID,_.call) } + + /** Handles the event of an atomic process returning successfully, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param result the result returned by the process + * @param time the timestamp to be recorded as the process finish + */ def procReturn(piID:KeyT, ref:Int, result:Any, time:Long=System.currentTimeMillis()):Unit = this ^ (piID,ref,_.complete(time, result)) - def processException(piID:KeyT, ref:Int, ex:Throwable, time:Long=System.currentTimeMillis()):Unit = processFailure(piID,ref,ex.getLocalizedMessage,time) + + /** Handles the event of an atomic process failing with an exception, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param ex the thrown exception (or other [[scala.Throwable]]) + * @param time the timestamp to be recorded as the process finish + */ + def processException(piID:KeyT, ref:Int, ex:Throwable, time:Long=System.currentTimeMillis()):Unit = + processFailure(piID,ref,ex.getLocalizedMessage,time) + + /** Handles the event of an atomic process failing in any way, updating its metrics accordingly. + * + * @param piID the ID of the workflow + * @param ref the call reference ID + * @param ex a `String` explanation of what went wrong + * @param time the timestamp to be recorded as the process finish + */ def processFailure(piID:KeyT, ref:Int, ex:String, time:Long=System.currentTimeMillis()):Unit = { this ^ (piID,_.complete(time,"Exception: " + ex)) this ^ (piID,ref,_.complete(time,"Exception: " + ex)) } // Getters - + + /** Returns the collection of workflow IDs that have been tracked. */ def keys = workflowMap.keys + /** Returns all the tracked instances of [[WorkflowMetrics]] sorted by starting time. */ def workflowMetrics = workflowMap.values.toSeq.sortBy(_.start) + /** Returns all the tracked instances of [[ProcessMetrics]] sorted by starting time. */ def processMetrics = processMap.values.flatMap(_.values).toSeq.sortBy(_.start) + /** Returns all the tracked instances of [[ProcessMetrics]] associated with a particular workflow, sorted by starting time. + * @param id the ID of the workflow + */ def processMetricsOf(id:KeyT) = processMap.getOrElse(id,Map[Int,ProcessMetrics[KeyT]]()).values.toSeq.sortBy(_.start) + /** Returns a [[scala.collection.immutable.Set]] of all process names being tracked. + * This is useful when using process names as a category, for example to colour code tasks in the timeline. + */ def processSet = processMap.values.flatMap(_.values.map(_.process)).toSet[String] } -class MetricsHandler[KeyT](override val name: String, timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime ) +/** A [[MetricsAggregator]] that is also a [[com.workflowfm.pew.stream.PiEventHandler]]. + * Aggregates metrics automatically based on [[PiEvent]]s and system time. + * + * @param name a unique name to identify the instance of [[com.workflowfm.pew.stream.PiEventHandler]] + * @param timeFn the [[PiMetadata]] key to retrieve timing information (the recorder system time by default) + */ +class MetricsHandler[KeyT](timeFn: PiMetadata.Key[Long] = PiMetadata.SystemTime ) extends MetricsAggregator[KeyT] with PiEventHandler[KeyT] { + /** Converts [[PiEvent]]s to metrics updates. */ override def apply( e: PiEvent[KeyT] ): Boolean = { e match { case PiEventStart(i,t) => workflowStart( i.id, timeFn(t) ) @@ -75,6 +203,7 @@ class MetricsHandler[KeyT](override val name: String, timeFn: PiMetadata.Key[Lon case PiEventCall(i,r,p,_,t) => procCall( i, r, p.iname, timeFn(t) ) case PiEventReturn(i,r,s,t) => procReturn( i, r, s, timeFn(t) ) case PiFailureAtomicProcessException(i,r,m,_,t) => processFailure( i, r, m, timeFn(t) ) + case PiEventIdle(_, _) => Unit case ev: PiFailure[KeyT] => workflowException( ev.id, ev.exception, timeFn(ev.metadata) ) diff --git a/src/com/workflowfm/pew/metrics/Output.scala b/src/com/workflowfm/pew/metrics/Output.scala index ac4a5cf5..7541f471 100644 --- a/src/com/workflowfm/pew/metrics/Output.scala +++ b/src/com/workflowfm/pew/metrics/Output.scala @@ -4,9 +4,14 @@ import scala.collection.immutable.Queue import org.apache.commons.lang3.time.DurationFormatUtils import java.text.SimpleDateFormat +/** Manipulates a [[MetricsAggregator]] to produce some output via side-effects. + * @tparam KeyT the type used for workflow IDs + */ trait MetricsOutput[KeyT] extends (MetricsAggregator[KeyT] => Unit) { + /** Compose with another [[MetricsOutput]] in sequence. */ def and(h:MetricsOutput[KeyT]) = MetricsOutputs(this,h) } +/** Contains helpful formatting shortcut functions. */ object MetricsOutput { def formatOption[T](v:Option[T], nullValue: String, format:T=>String={ x:T => x.toString }) = v.map(format).getOrElse(nullValue) def formatTime(format:String)(time:Long) = new SimpleDateFormat(format).format(time) @@ -22,45 +27,90 @@ object MetricsOutput { } getOrElse(nullValue) } +/** A [[MetricsOutput]] consisting of a [[scala.collection.immutable.Queue]] of [[MetricsOutput]]s + * to be run sequentially. + */ case class MetricsOutputs[KeyT](handlers:Queue[MetricsOutput[KeyT]]) extends MetricsOutput[KeyT] { + /** Call all included [[MetricsOutput]]s. */ override def apply(aggregator:MetricsAggregator[KeyT]) = handlers map (_.apply(aggregator)) + /** Add another [[MetricsOutput]] in sequence. */ override def and(h:MetricsOutput[KeyT]) = MetricsOutputs(handlers :+ h) } object MetricsOutputs { + /** Shorthand constructor for a [[MetricsOutputs]] from a list of [[MetricsOutput]]s. */ def apply[KeyT](handlers:MetricsOutput[KeyT]*):MetricsOutputs[KeyT] = MetricsOutputs[KeyT](Queue[MetricsOutput[KeyT]]() ++ handlers) } - +/** Generates a string representation of the metrics using a generalized CSV format. */ trait MetricsStringOutput[KeyT] extends MetricsOutput[KeyT] { + /** A string representing null values. */ val nullValue = "NULL" + /** The field names for [[ProcessMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def procHeader(separator:String) = Seq("ID","PID","Process","Start","Finish","Result").mkString(separator) + + /** String representation of a [[ProcessMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + * @param m the [[ProcessMetrics]] instance to be handled + */ def procCSV(separator:String,timeFormat:Option[String])(m:ProcessMetrics[KeyT]) = m match { case ProcessMetrics(id,r,p,s,f,res) => timeFormat match { case None => Seq(id,r,p,s,MetricsOutput.formatOption(f,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) case Some(format) => Seq(id,r,p,MetricsOutput.formatTime(format)(s),MetricsOutput.formatTimeOption(f,format,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) } } - + + /** The field names for [[WorkflowMetrics]]. + * @param separator a string (such as a space or comma) to separate the names + */ def workflowHeader(separator:String) = Seq("ID","PID","Process","Start","Finish","Result").mkString(separator) + + /** String representation of a [[WorkflowMetrics]] instance. + * + * @param separator a string (such as a space or comma) to separate the values + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + * @param m the [[WorkflowMetrics]] instance to be handled + */ def workflowCSV(separator:String,timeFormat:Option[String])(m:WorkflowMetrics[KeyT]) = m match { case WorkflowMetrics(id,s,c,f,res) => timeFormat match { case None => Seq(id,s,c,MetricsOutput.formatOption(f,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) case Some(format) => Seq(id,MetricsOutput.formatTime(format)(s),c,MetricsOutput.formatTimeOption(f,format,nullValue),MetricsOutput.formatOption(res,nullValue)).mkString(separator) } } - + + /** Formats all [[ProcessMetrics]] in a [[MetricsAggregator]] in a single string. + * + * @param aggregator the [[MetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate process metrics + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + */ def processes(aggregator:MetricsAggregator[KeyT],separator:String,lineSep:String="\n",timeFormat:Option[String]=None) = aggregator.processMetrics.map(procCSV(separator,timeFormat)).mkString(lineSep) - def workflows(aggregator:MetricsAggregator[KeyT],separator:String,lineSep:String="\n",timeFormat:Option[String]=None) = + + /** Formats all [[WorkflowMetrics]] in a [[MetricsAggregator]] in a single string. + * + * @param aggregator the [[MetricsAggregator]] to retrieve the metrics to be formatted + * @param separator a string (such as a space or comma) to separate values + * @param lineSep a string (such as a new line) to separate workflow metrics + * @param timeFormat optional argument to format timestamps using `java.text.SimpleDateFormat` + */ + def workflows(aggregator:MetricsAggregator[KeyT],separator:String,lineSep:String="\n",timeFormat:Option[String]=None) = aggregator.workflowMetrics.map(workflowCSV(separator,timeFormat)).mkString(lineSep) } - -class MetricsPrinter[KeyT] extends MetricsStringOutput[KeyT] { +/** Prints all metrics to standard output. */ +class MetricsPrinter[KeyT] extends MetricsStringOutput[KeyT] { + /** Separates the values. */ val separator = "\t| " + /** Separates metrics instances. */ val lineSep = "\n" + /** Default time format using `java.text.SimpleDateFormat`. */ val timeFormat = Some("YYYY-MM-dd HH:mm:ss.SSS") override def apply(aggregator:MetricsAggregator[KeyT]) = { @@ -80,6 +130,9 @@ ${workflows(aggregator,separator,lineSep,timeFormat)} } } +/** Helper to write stuff to a file. + * @todo Move to [[com.workflowfm.pew.util]] + */ trait FileOutput { import java.io._ @@ -93,6 +146,15 @@ trait FileOutput { } } +/** Outputs metrics to files using a standard CSV format. + * Generates 2 CSV files: + * 1. one for processes with a "-tasks.csv" suffix, + * 2. and one for workflows with a "-workflows.csv" suffix. + * + * @tparam KeyT the type used for workflow IDs + * @param path path to directory where the files will be placed + * @param name file name prefix + */ class MetricsCSVFileOutput[KeyT](path:String,name:String) extends MetricsStringOutput[KeyT] with FileOutput { val separator = "," @@ -105,6 +167,16 @@ class MetricsCSVFileOutput[KeyT](path:String,name:String) extends MetricsStringO } } +/** Outputs metrics to a file using the d3-timeline format. + * Generates 1 file with a "-data.js" suffix. + * This can then be combined with the resources at + * [[https://github.com/PetrosPapapa/WorkflowFM-PEW/tree/master/resources/d3-timeline]] + * to render the timeline in a browser. + * + * @tparam KeyT the type used for workflow IDs + * @param path path to directory where the files will be placed + * @param file file name prefix + */ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[KeyT] with FileOutput { import java.io._ @@ -114,7 +186,8 @@ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[Key val dataFile = s"$path$file-data.js" writeToFile(dataFile, result) } - + + /** Helps build the output with a static system time. */ def build(aggregator:MetricsAggregator[KeyT], now:Long) = { var buf:StringBuilder = StringBuilder.newBuilder buf.append("var processes = [\n") @@ -125,7 +198,15 @@ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[Key buf.append("];\n") buf.toString } - + + /** Encodes an entire workflow as a timeline. + * + * @return the encoded timeline for the workflow + * @param m thr [[WorkflowMetrics]] recorded for the particular workflow + * @param agg the [[MetricsAggregator]] containing all the relevant metrics + * @param now the current (real) to be used as the end time of unfinished processes + * @param prefix a string to prefix (usually some whitespace) to prefix the entry + */ def workflowEntry(m:WorkflowMetrics[KeyT], agg:MetricsAggregator[KeyT], now:Long, prefix:String) = { val processes = (Map[String,Queue[ProcessMetrics[KeyT]]]() /: agg.processMetricsOf(m.piID)){ case (m,p) => { val procs = m.getOrElse(p.process, Queue()) :+ p @@ -134,14 +215,29 @@ class MetricsD3Timeline[KeyT](path:String,file:String) extends MetricsOutput[Key val data = processes.map { case (proc,i) => processEntry(proc, i, now, prefix + "\t") } s"""$prefix{id: \"${m.piID}\", data: [\n${data.mkString("")}$prefix]},\n""" } - + + /** Encodes multiple process calls of the same process in a single lane. + * + * @return the encoded timeline lane + * @param proc the name of the process + * @param i the list of [[ProcessMetrics]] recorded for each process call + * @param now the current (real) to be used as the end time of unfinished processes + * @param prefix a string to prefix (usually some whitespace) to prefix the entry + */ def processEntry(proc:String, i:Seq[ProcessMetrics[KeyT]], now:Long, prefix:String) = { if (i.isEmpty) "" else { val times = ("" /: i){ case (s,m) => s"$s${callEntry(now,m,prefix + "\t")}" } s"""$prefix{label: \"$proc\", times: [\n$times$prefix]},\n""" } } - + + /** Encodes a process call as a timeline task. + * + * @return the encoded timeline task + * @param now the current (real) to be used as the end time of unfinished processes + * @param m the [[ProcessMetrics]] recorded for this process call + * @param prefix a string to prefix (usually some whitespace) to prefix the entry + */ def callEntry(now:Long, m:ProcessMetrics[KeyT], prefix:String) = { s"""$prefix{"label":"${m.ref}", "process": "${m.process}", "starting_time": ${m.start}, "ending_time": ${m.finish.getOrElse(now)}, "result":"${MetricsOutput.formatOption(m.result,"NONE")}"},\n""" diff --git a/src/com/workflowfm/pew/mongodb/MongoExecutor.scala b/src/com/workflowfm/pew/mongodb/MongoExecutor.scala index a396552c..ed3d191c 100644 --- a/src/com/workflowfm/pew/mongodb/MongoExecutor.scala +++ b/src/com/workflowfm/pew/mongodb/MongoExecutor.scala @@ -2,6 +2,7 @@ package com.workflowfm.pew.mongodb import com.workflowfm.pew._ import com.workflowfm.pew.execution._ +import com.workflowfm.pew.stream.SimplePiObservable import com.workflowfm.pew.mongodb.bson.PiCodecProvider import scala.concurrent._ @@ -31,11 +32,11 @@ import org.mongodb.scala.ReadConcern import org.mongodb.scala.WriteConcern class MongoExecutor - (client:MongoClient, db:String, collection:String, processes:PiProcessStore) + (client: MongoClient, db: String, collection: String, processes: PiProcessStore) (implicit val executionContext: ExecutionContext = ExecutionContext.global) extends ProcessExecutor[ObjectId] with SimplePiObservable[ObjectId] { - def this(client:MongoClient, db:String, collection:String, l:PiProcess*) = this(client,db,collection,SimpleProcessStore(l :_*)) + def this(client: MongoClient, db: String, collection: String, l: PiProcess*) = this(client,db,collection,SimpleProcessStore(l :_*)) final val CAS_MAX_ATTEMPTS = 10 final val CAS_WAIT_MS = 1 @@ -45,11 +46,11 @@ class MongoExecutor withCodecRegistry(codecRegistry). //withReadConcern(ReadConcern.LINEARIZABLE). withWriteConcern(WriteConcern.MAJORITY); - val col:MongoCollection[PiInstance[ObjectId]] = database.getCollection(collection) + val col: MongoCollection[PiInstance[ObjectId]] = database.getCollection(collection) - override def init(p:PiProcess,args:Seq[PiObject]):Future[ObjectId] = { + override def init(instance: PiInstance[_]): Future[ObjectId] = { val oid = new ObjectId - val inst = PiInstance(oid,p,args:_*) + val inst = instance.copy(id = oid) col.insertOne(inst).toFuture() map (_=>oid) } diff --git a/src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala b/src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala new file mode 100644 index 00000000..2a5eb285 --- /dev/null +++ b/src/com/workflowfm/pew/mongodb/bson/events/PiEventIdleCodec.scala @@ -0,0 +1,34 @@ +package com.workflowfm.pew.mongodb.bson.events + +import com.workflowfm.pew.PiMetadata.PiMetadataMap +import com.workflowfm.pew.mongodb.bson.auto.ClassCodec +import com.workflowfm.pew.{PiEventIdle, PiInstance} +import org.bson.{BsonReader, BsonWriter} +import org.bson.codecs.{Codec, DecoderContext, EncoderContext} + +class PiEventIdleCodec[T]( piiCodec: Codec[PiInstance[T]], metaCodec: Codec[PiMetadataMap] ) + extends ClassCodec[PiEventIdle[T]] { + + val piiN: String = "pii" + val timeN: String = "timestamp" + + override def encodeBody(writer: BsonWriter, value: PiEventIdle[T], ctx: EncoderContext): Unit = { + + writer.writeName( piiN ) + ctx.encodeWithChildContext( piiCodec, writer, value.i ) + + writer.writeName( timeN ) + ctx.encodeWithChildContext( metaCodec, writer, value.metadata ) + } + + override def decodeBody(reader: BsonReader, ctx: DecoderContext): PiEventIdle[T] = { + + reader.readName( piiN ) + val pii: PiInstance[T] = ctx.decodeWithChildContext( piiCodec, reader ) + + reader.readName( timeN ) + val data: PiMetadataMap = ctx.decodeWithChildContext( metaCodec, reader ) + + PiEventIdle( pii, data ) + } +} diff --git a/src/com/workflowfm/pew/simulation/Coordinator.scala b/src/com/workflowfm/pew/simulation/Coordinator.scala deleted file mode 100644 index 198065a9..00000000 --- a/src/com/workflowfm/pew/simulation/Coordinator.scala +++ /dev/null @@ -1,282 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor._ -import akka.util.Timeout -import akka.pattern.ask -import scala.concurrent.duration._ -import com.workflowfm.pew.simulation.metrics._ -import com.workflowfm.pew.execution._ -import scala.collection.mutable.PriorityQueue -import scala.concurrent.{ Promise, Await, ExecutionContext } -import scala.util.{ Success, Failure } - - - -object Coordinator { - case object Start - //TODO case object Stop - case class Done(time:Long,metrics:SimMetricsAggregator) - case object Ping - case class Time(time:Long) - - case class AddSim(t:Long,sim:Simulation,exe:SimulatorExecutor[_]) - case class AddSims(l:Seq[(Long,Simulation)],exe:SimulatorExecutor[_]) - case class AddSimNow(sim:Simulation,exe:SimulatorExecutor[_]) - case class AddSimsNow(l:Seq[Simulation],exe:SimulatorExecutor[_]) - - case class AddResource(r:TaskResource) - case class AddResources(l:Seq[TaskResource]) - - case class SimDone(name:String,result:String) - - case class AddTask(t:TaskGenerator, promise:Promise[TaskMetrics], resources:Seq[String]) - case object AckTask - - private case object Tick - private case object Tack - private case object Tock - - def props( - scheduler: Scheduler, - startingTime: Long = 0L, - timeoutMillis: Int = 50 - )(implicit system: ActorSystem, executionContext:ExecutionContext - ): Props = Props(new Coordinator(scheduler,startingTime,timeoutMillis)(system,executionContext)) -} - -class Coordinator( - scheduler :Scheduler, - startingTime:Long, - timeoutMillis:Int -)( - implicit system: ActorSystem, - implicit val executionContext:ExecutionContext -) extends Actor { - - import scala.collection.mutable.Queue - - sealed trait Event extends Ordered[Event] { - def time:Long - def compare(that:Event) = { - that.time.compare(time) - } - } - case class FinishingTask(override val time:Long,task:Task) extends Event - case class StartingSim(override val time:Long,simulation:Simulation,executor:SimulatorExecutor[_]) extends Event - - var resourceMap :Map[String,TaskResource] = Map[String,TaskResource]() ///: resources){ case (m,r) => m + (r.name -> r)} - var simulations :Map[String,SimulatorExecutor[_]] = Map[String,SimulatorExecutor[_]]() - val tasks :Queue[Task] = Queue() - - val events = new PriorityQueue[Event]() - - var starter:Option[ActorRef] = None - var time = startingTime - var taskID = 0L - - val metrics = new SimMetricsAggregator() - - //implicit val timeout = Timeout(timeoutMillis.millis) - - def addResource(r:TaskResource) = if (!resourceMap.contains(r.name)) { - println(s"[$time] Adding resource ${r.name}") - resourceMap += r.name -> r - metrics += r - } - - protected def handleEvent(event:Event) = event match { - //println("["+time+"] ========= Event! ========= ") - // A task is finished - case FinishingTask(t,task) if (t == time) => { - // Unbind the resources - //resourceMap map { case (n,r) => (n,resourceUpdate(r)) } TODO why was that better originally? - task.taskResources(resourceMap).foreach(_.finishTask(time)) - // Mark the task as completed - // This will cause workflows to reduce and maybe produce more tasks - task.complete(metrics.taskMap.getOrElse(task.id, TaskMetrics(task).start(time - task.duration))) - } - // A simulation (workflow) is starting now - case StartingSim(t,sim,exec) if (t == time)=> startSimulation(time,sim,exec) - case _ => println(s"[$time] <*> <*> <*> Failed to handle event: $event") - } - - protected def startSimulation(t:Long, s:Simulation, e:SimulatorExecutor[_]) :Boolean = { - if (t == time) { - println("["+time+"] Starting simulation: \"" + s.name +"\".") - metrics += (s,t) - s.run(e).onComplete({ - case Success(res) => { - stopSimulation(s.name, res.toString) - println("*** Result of " + s.name + ": " + res) - } - case Failure(ex) => { - stopSimulation(s.name, ex.getLocalizedMessage) - println("*** Exception in " + s.name + ": ") - ex.printStackTrace() - } - }) - - simulations += (s.name -> e) - true - } else false - } - - protected def stopSimulation(name:String, result:String) = { - simulations -= name - (metrics^name) (_.done(result,time)) - println(s"[$time] Simulation $name reported done.") - } - - // protected def updateSimulation(s:String, f:WorkflowMetricTracker=>WorkflowMetricTracker) = simulations = simulations map { - // case (n,m,e) if n.equals(s) => (n,f(m),e) - // case x => x - // } - - protected def resourceIdle(r:TaskResource) = if (r.isIdle) { - (metrics^r)(_.idle(time-r.lastUpdate)) - r.update(time) - } - - protected def startTask(task:Task) { - tasks.dequeueFirst (_.id == task.id) - // Mark the start of the task in the metrics - (metrics^task.id)(_.start(time)) - task.taskResources(resourceMap) map { r => - // Update idle time if resource has been idle - if (r.isIdle) (metrics^r)(_.idle(time-r.lastUpdate)) - // Bind each resource to this task - r.startTask(task, time) - // Add the task and resource cost to the resource metrics - (metrics^r)(_.task(task, r.costPerTick)) - } - // Add the task to the simulation metrics - (metrics^task.simulation)(_.task(task).addDelay(time - task.created)) - // Generate a FinishTask event to be triggered at the end of the event - events += FinishingTask(time+task.duration,task) - } - - /** - * Runs all tasks that require no resources - */ - protected def runNoResourceTasks() = tasks.dequeueAll(_.resources.isEmpty).map(startTask) - - protected def workflowsReady = simulations forall (_._2.simulationReady) - - /** - * First half of a clock tick - * We need two halves because we want to give the workflows a chance to reduce - * and register new tasks to the Coordinator. The Coordinator must receive those - * through messages between now and the Tack message. - */ - protected def tick :Unit = { - - // Are events pending? - if (!events.isEmpty) { - // Grab the first event - val event = events.dequeue() - // Did we somehow go past the event time? This should never happen. - if (event.time < time) { - println(s"[$time] *** Unable to handle past event for time: [${event.time}]") - } else { - // Jump ahead to the event time. This is a priority queue so we shouldn't skip any events - time = event.time - handleEvent(event) - - // Handle all events that are supposed to happen now, so that we free resources for the Scheduler - while (events.headOption.map(_.time == time).getOrElse(false)) - handleEvent(events.dequeue) - } - } - // val startedActors = res filter (_._1 == true) map (_._2.executor) - // for (a <- startedActors) - // a ? AkkaExecutor.Ping - - self ! Coordinator.Tack - } - - - protected def tack :Unit = { - println(s"[$time] Waiting for workflow progress...") - Thread.sleep(timeoutMillis) - - if (!workflowsReady) { - self ! Coordinator.Tack - // val simActors = simulations map (_._2.executor) - // for (a <- simActors) - // a ? AkkaExecutor.Ping} - - } else { - //println("################### pass!!! #######################") - self ! Coordinator.Tock - } - } - - - protected def tock :Unit = { - // Make sure we run tasks that need no resources - runNoResourceTasks() - - // Assign the next tasks - scheduler.getNextTasks(tasks, time, resourceMap).foreach(startTask) - // Update idle resources - resourceMap.values.foreach(resourceIdle) - - - // We finish if there are no events, no tasks, and all workflows have reduced - // Actually all workflows must have reduced at this stage, but we check anyway - //println(s"!TOCK! Events:${events.size} Tasks:${tasks.size} Workflows:$workflowsReady Simulations:${simulations.size} ") - if (events.isEmpty && tasks.isEmpty && workflowsReady) { // && simulations.isEmpty) { //&& resources.forall(_.isIdle) - println("["+time+"] All events done. All tasks done. All workflows idle.") // All simulations done..") - metrics.ended - // Tell whoever started us that we are done - starter map { a => a ! Coordinator.Done(time,metrics) } - - } else { - self ! Coordinator.Tick - } - } - - def start(a:ActorRef) = if (starter.isEmpty) { - starter = Some(a) - metrics.started - tick - } - - def receive = { - case Coordinator.AddSim(t,s,e) => events += StartingSim(t,s,e) - case Coordinator.AddSims(l,e) => events ++= l map { case (t,s) => StartingSim(t,s,e) } - case Coordinator.AddSimNow(s,e) => events += StartingSim(time,s,e) - case Coordinator.AddSimsNow(l,e) => events ++= l map { s => StartingSim(time,s,e) } - - case Coordinator.AddResource(r) => addResource(r) - case Coordinator.AddResources(r) => r foreach addResource - - case Coordinator.AddTask(gen,promise,resources) => { - val creation = if (gen.createTime >= 0) gen.createTime else time - // Create the task - val t = gen.create(taskID,creation,resources:_*) - // This is ok only if the simulation is running in memory - // Promises cannot be sent over messages otherwise as they are not serializable - promise.completeWith(t.promise.future) - // Make sure the next taskID will be fresh - taskID = taskID + 1L - println(s"[$time] Adding task [$taskID] created at [$creation]: ${t.name} (${t.simulation}).") - - // Calculate the cost of all resource usage. We only know this now! - val resourceCost = (0L /: t.taskResources(resourceMap)) { case (c,r) => c + r.costPerTick * t.duration } - t.addCost(resourceCost) - - metrics += t - tasks += t - - //sender() ! Coordinator.AckTask(t) //uncomment this to acknowledge AddTask - } - - case Coordinator.Start => start(sender) - case Coordinator.Tick => tick - case Coordinator.Tack => tack - case Coordinator.Tock => tock - case Coordinator.Ping => sender() ! Coordinator.Time(time) - - } -} diff --git a/src/com/workflowfm/pew/simulation/Generators.scala b/src/com/workflowfm/pew/simulation/Generators.scala deleted file mode 100644 index a52b4bde..00000000 --- a/src/com/workflowfm/pew/simulation/Generators.scala +++ /dev/null @@ -1,16 +0,0 @@ -package com.workflowfm.pew.simulation - -trait ValueGenerator[T] { - def get :T - def estimate :T -} - -case class ConstantGenerator[T](value :T) extends ValueGenerator[T] { - def get = value - def estimate = value -} - -case class UniformGenerator(min :Int, max:Int) extends ValueGenerator[Int] { - def get = new util.Random().nextInt(max-min) + min - def estimate = (max + min) / 2 -} \ No newline at end of file diff --git a/src/com/workflowfm/pew/simulation/Scheduler.scala b/src/com/workflowfm/pew/simulation/Scheduler.scala deleted file mode 100644 index 1c808186..00000000 --- a/src/com/workflowfm/pew/simulation/Scheduler.scala +++ /dev/null @@ -1,167 +0,0 @@ -package com.workflowfm.pew.simulation - -import scala.annotation.tailrec - -trait Scheduler { - def getNextTasks(tasks:Seq[Task], currentTime:Long, resourceMap:Map[String,TaskResource]) :Seq[Task] - - def isIdleResource(r:String, resourceMap:Map[String,TaskResource]) = resourceMap.get(r) match { - case None => false - case Some(s) => s.isIdle - } - -} - -object DefaultScheduler extends Scheduler { - import scala.collection.immutable.Queue - - override def getNextTasks(tasks:Seq[Task], currentTime:Long, resourceMap:Map[String,TaskResource]) :Seq[Task] = - findNextTasks(currentTime, resourceMap, resourceMap.mapValues(Schedule(_)), tasks.toList, Queue()) - - @tailrec - def findNextTasks( - currentTime:Long, - resourceMap:Map[String,TaskResource], - schedules:Map[String,Schedule], - tasks:List[Task], - result:Queue[Task] - ):Seq[Task] = tasks match { - case Nil => result - case t :: rest => { - val start = Schedule.merge(t.resources.flatMap(schedules.get(_))) ? (currentTime,t) - val schedules2 = (schedules /: t.resources) { - case (s,r) => s + (r -> (s.getOrElse(r,Schedule(Nil)) +> (start,t))) - } - val result2 = if (start == currentTime && t.taskResources(resourceMap).forall(_.isIdle)) result :+ t else result - findNextTasks(currentTime, resourceMap, schedules2, rest, result2) - } - } -} - -case class Schedule(tasks:List[(Long,Long)]) { - - def +(start:Long,end:Long):Option[Schedule] = Schedule.add(start,end,tasks) match { - case None => None - case Some(l) => Some(copy(tasks=l)) - } - - def +>(start:Long,end:Long):Schedule = Schedule.add(start,end,tasks) match { - case None => { - System.err.println(s"*** Unable to add ($start,$end) to Schedule: $tasks") - this - } - case Some(l) => copy(tasks=l) - } - - def +>(startTime:Long, t:Task):Schedule = this +> (startTime,startTime+t.estimatedDuration) - - def ?(currentTime:Long, t:Task):Long = Schedule.fit(currentTime,t.estimatedDuration,tasks) - - - def ++(s:Schedule):Schedule = Schedule(Schedule.merge(tasks,s.tasks)) - - def isValid = Schedule.isValid(tasks) -} - -object Schedule { - import scala.collection.immutable.Queue - - def apply(r:TaskResource):Schedule = r.currentTask match { - case None => Schedule(List()) - case Some((s,t)) => Schedule((s,s + t.estimatedDuration) :: Nil) - } - - @tailrec - def add ( - start:Long, end:Long, - tasks:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):Option[List[(Long,Long)]] = tasks match { - case Nil => Some(result :+ (start,end) toList) - case (l:Long,r:Long) :: t => - if (l > end) Some(result ++ ((start,end) :: (l,r) :: t) toList) - else if (l == end) Some(result ++ ((start,r) :: t) toList) - else if (r < start) add(start,end,t,result :+ ((l,r))) - else if (r == start) add(l,end,t,result) - else /* if (r >= end) */ None - //else None - } - - @tailrec - def fit ( - start:Long, - duration:Long, - tasks:List[(Long,Long)] - ):Long = tasks match { - case Nil => start - case (l,_) :: _ if (l >= start + duration) => start - case (_,r) :: t => fit(r,duration,t) - } - - @tailrec - def merge( - g1:List[(Long,Long)], - g2:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):List[(Long,Long)] = g1 match { - case Nil => result ++ g2 toList - case (l1,r1) :: t1 => g2 match { - case Nil => result ++ g1 toList - case (l2,r2) :: t2 => { - if (r2 < l1) merge(g1,t2,result :+ (l2,r2)) - else if (r1 < l2) merge(t1,g2,result :+ (l1,r1)) - else if (r1 == r2) merge(t1,t2,result :+ (math.min(l1,l2),r1)) - else if (r2 == l1) merge((l2,r1)::t1,t2,result) - else if (r1 == l2) merge(t1,(l1,r2)::t2,result) - else if (r1 < r2) merge(t1,(math.min(l1,l2),r2)::t2,result) - else /* if (r1 > r2)*/ merge((math.min(l1,l2),r1)::t1,t2,result) - } - } - } - - def merge(schedules:Seq[Schedule]):Schedule = { - (Schedule(List()) /: schedules)(_ ++ _) - } - - @deprecated("No longer using gaps in Schedule","1.2.0") - def fitInGaps ( - start:Long, end:Long, - gaps:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):Option[List[(Long,Long)]] = gaps match { - case Nil => Some(result.toList) - case (l:Long,r:Long) :: t => - if (l == start && end == r) fitInGaps(start,end,t,result) // event fits exactly - else if (l == start && end <= r) fitInGaps(start,end,t,result :+ ((end,r)) )// add an event at the beginning of the gap - else if (l <= start && end == r) fitInGaps(start,end,t,result :+ ((l,start)) ) // add an event at the end of the gaps - else if (l < start && end < r) fitInGaps(start,end,t,result :+ ((l,start)) :+ ((end,r)) ) // add an event within a gap - else if (start > r || end < l) fitInGaps(start,end,t,result :+ ((l,r)) ) - else None - } - - @deprecated("No longer using gaps in Schedule","1.2.0") - // we assume all gap lists finish with a (t,Long.MaxValue) gap - def mergeGaps ( - g1:List[(Long,Long)], - g2:List[(Long,Long)], - result:Queue[(Long,Long)] = Queue[(Long,Long)]() - ):List[(Long,Long)] = g1 match { - case Nil => result toList - case (l1,r1) :: t1 => g2 match { - case Nil => result toList - case (l2,r2) :: t2 => { - if (r2 <= l1) mergeGaps(g1,t2,result) - else if (r1 <= l2) mergeGaps (t1,g2,result) - else if (r1 == Long.MaxValue && r1 == r2) result :+ (math.max(l1,l2),r1) toList - else if (r2 <= r1) mergeGaps(g1,t2,result :+ (math.max(l1,l2),r2)) - else /* if (r1 < r2) */ mergeGaps(t1,g2,result :+ (math.max(l1,l2),r1)) - } - } - } - - def isValid(gaps:List[(Long,Long)], end:Long = Long.MinValue):Boolean = gaps match { - case Nil => true - case (l,r) :: t if end < l && l < r => isValid(t, r) - case _ => false - } -} diff --git a/src/com/workflowfm/pew/simulation/Simulation.scala b/src/com/workflowfm/pew/simulation/Simulation.scala deleted file mode 100644 index a939f4be..00000000 --- a/src/com/workflowfm/pew/simulation/Simulation.scala +++ /dev/null @@ -1,40 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor._ -import scala.concurrent.duration._ -import scala.concurrent.Await -import scala.concurrent.Future -import com.workflowfm.pew.execution.AkkaExecutor -import scala.util.Success -import scala.util.Failure -import scala.concurrent.ExecutionContext -import com.workflowfm.pew.PiProcess -import com.workflowfm.pew.execution.SimulatorExecutor - - -abstract class Simulation(val name:String) { //extends SimulationMetricTracker - def run(executor:SimulatorExecutor[_]):Future[Any] - def getProcesses():Seq[PiProcess] -} - -class TaskSimulation(simulationName:String, coordinator:ActorRef, resources:Seq[String], duration:ValueGenerator[Long], val cost:ValueGenerator[Long]=new ConstantGenerator(0L), interrupt:Int=(-1), priority:Task.Priority=Task.Medium)(implicit system: ActorSystem) extends Simulation(simulationName) { - def run(executor:SimulatorExecutor[_]) = { - TaskGenerator(simulationName + "Task", simulationName, duration, cost, interrupt, priority).addTo(coordinator,resources :_*) - } - override def getProcesses() = Seq() -} - -class MockExecutor extends Actor { - def receive = { - case AkkaExecutor.Ping => sender() ! AkkaExecutor.Ping - } -} - -trait SimulatedProcess { this:PiProcess => - def simulationName:String - override def isSimulatedProcess = true - - def simulate[T](gen:TaskGenerator, coordinator:ActorRef, result:T, resources:String*)(implicit system: ActorSystem, context: ExecutionContext = ExecutionContext.global):Future[T] ={ - gen.addTo(coordinator, resources:_*).map(_ => result) - } -} diff --git a/src/com/workflowfm/pew/simulation/Task.scala b/src/com/workflowfm/pew/simulation/Task.scala deleted file mode 100644 index 8d64909a..00000000 --- a/src/com/workflowfm/pew/simulation/Task.scala +++ /dev/null @@ -1,101 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor._ -import akka.pattern.ask -import akka.util.Timeout -import scala.concurrent.Promise -import scala.concurrent.duration._ -import com.workflowfm.pew.simulation.metrics._ - -object Task { - sealed trait Priority extends Ordered[Priority] { - def value:Int - def compare(that:Priority) = this.value - that.value - } - case object Highest extends Priority { val value = 5 } - case object High extends Priority { val value = 4 } - case object Medium extends Priority { val value = 3 } - case object Low extends Priority { val value = 2 } - case object VeryLow extends Priority { val value = 1 } -} - -class Task ( - val id:Long, - val name:String, - val simulation:String, - val created:Long, - val resources:Seq[String], - val duration:Long, - val estimatedDuration:Long, - val initialCost:Long, - val interrupt:Int=Int.MaxValue, - val priority:Task.Priority=Task.Medium - ) extends Ordered[Task] { - - val promise:Promise[TaskMetrics] = Promise() - - var cost:Long = initialCost - - // execute will be called once by each associated TaskResource - def complete(metrics:TaskMetrics) = if (!promise.isCompleted) promise.success(metrics) - - def addCost(extra:Long) = cost += extra - - def nextPossibleStart(currentTime:Long, resourceMap:Map[String,TaskResource]) = { - (currentTime /: resources){ case (i,rN) => resourceMap.get(rN) match { - case None => throw new RuntimeException(s"Resource $rN not found!") - case Some(r) => Math.max(i,r.nextAvailableTimestamp(currentTime)) - }} - } - - def taskResources(resourceMap:Map[String,TaskResource]) = resources flatMap (resourceMap.get(_)) - - - def compare(that:Task) = { - lazy val cPriority = that.priority.compare(this.priority) - lazy val cResources = that.resources.size.compare(this.resources.size) - lazy val cAge = this.created.compare(that.created) - lazy val cDuration = that.estimatedDuration.compare(this.estimatedDuration) - lazy val cInterrupt = this.interrupt.compare(that.interrupt) - lazy val cID = this.id.compare(that.id) - - if (cPriority != 0) cPriority - else if (cAge != 0) cAge - else if (cResources != 0) cResources - else if (cDuration != 0) cDuration - else if (cInterrupt != 0) cInterrupt - else cID - } - - override def toString = { - val res = resources.mkString(",") - s"Task($name)($res)" - } -} - -case class TaskGenerator ( - name :String, - simulation:String, - duration:ValueGenerator[Long], - cost:ValueGenerator[Long], - interrupt:Int=(-1), - priority:Task.Priority=Task.Medium, - createTime:Long=(-1) -) { - def create(id:Long, time:Long, resources:String*) = new Task(id,name,simulation,time,resources,duration.get,duration.estimate,cost.get,interrupt,priority) - def withPriority(p:Task.Priority) = copy(priority = p) - def withInterrupt(int:Int) = copy(interrupt = int) - def withDuration(dur:ValueGenerator[Long]) = copy(duration = dur) - def withName(n:String) = copy(name = n) - def withSimulation(s:String) = copy(simulation=s) - def withCreationTime(t:Long) = copy(createTime=t) - - def addTo(coordinator:ActorRef, resources:String*)(implicit system: ActorSystem) = { - //implicit val timeout = Timeout(1.second) - // change this to ? to require an acknowledgement - val promise = Promise[TaskMetrics]() - coordinator ! Coordinator.AddTask(this,promise,resources) - promise.future - } - -} diff --git a/src/com/workflowfm/pew/simulation/TaskResource.scala b/src/com/workflowfm/pew/simulation/TaskResource.scala deleted file mode 100644 index 5ea38c56..00000000 --- a/src/com/workflowfm/pew/simulation/TaskResource.scala +++ /dev/null @@ -1,62 +0,0 @@ -package com.workflowfm.pew.simulation - -import scala.collection.mutable.Queue -import com.workflowfm.pew.simulation.metrics._ - -object TaskResource { - sealed trait State - case object Busy extends State - case class Finished(t:Task) extends State - case object Idle extends State -} - -class TaskResource(val name:String,val costPerTick:Int) { - var currentTask :Option[(Long,Task)] = None - var lastUpdate :Long = 1 - - def isIdle :Boolean = currentTask == None - - def finishTask(currentTime:Long) :Option[Task] = currentTask match { - case None => { - //println("["+currentTime+"] \"" + name + "\" is idle.") - None - } - case Some((startTime,task)) => - if (currentTime >= startTime + task.duration) { - println("["+currentTime+"] \"" + name + "\" detached from task \"" + task.name + " (" + task.simulation +")\".") - currentTask = None - lastUpdate = currentTime - Some(task) - } - else { - //println("["+currentTime+"] \"" + name + "\" is attached to task \"" + task.name + " (" + task.simulation +")\" - " + (startTime + duration - currentTime) + " ticks remaining.") - None - } - } - - def startTask(task:Task,currentTime:Long) = { - currentTask match { - case None => { - println("["+currentTime+"] \"" + name + "\" is NOW attached to task \"" + task.name + " (" + task.simulation +")\" - " + task.duration + " ticks remaining.") - currentTask = Some(currentTime,task) - lastUpdate = currentTime - true - } - case Some((_,currentTask)) => { - println("["+currentTime+"] <*> <*> <*> ERROR <*> <*> <*> \"" + name + "\" tried to attach to \"" + task.name + " (" + task.simulation +")\" but is already attached to \"" + currentTask.name + "\"!") - false - } - } - } - - - def nextAvailableTimestamp(currentTime:Long) :Long = currentTask match { - case None => currentTime - case Some((startTime,t)) => { - startTime + t.estimatedDuration - } - } - - - def update(time:Long) = lastUpdate = time -} \ No newline at end of file diff --git a/src/com/workflowfm/pew/simulation/metrics/Actor.scala b/src/com/workflowfm/pew/simulation/metrics/Actor.scala deleted file mode 100644 index cba860f4..00000000 --- a/src/com/workflowfm/pew/simulation/metrics/Actor.scala +++ /dev/null @@ -1,52 +0,0 @@ -package com.workflowfm.pew.simulation.metrics - -import akka.actor.Props -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.ActorSystem -import com.workflowfm.pew.execution.SimulatorExecutor -import com.workflowfm.pew.simulation.Simulation -import com.workflowfm.pew.simulation.Coordinator - - -object SimMetricsActor { - case class Start(coordinator:ActorRef) - case class StartSims(coordinator:ActorRef,sims:Seq[(Long,Simulation)],executor:SimulatorExecutor[_]) - case class StartSimsNow(coordinator:ActorRef,sims:Seq[Simulation],executor:SimulatorExecutor[_]) - - def props(m:SimMetricsOutput, callbackActor:Option[ActorRef]=None)(implicit system: ActorSystem): Props = Props(new SimMetricsActor(m,callbackActor)(system)) -} - -// Provide a callbackActor to get a response when we are done. Otherwise we'll shutdown the ActorSystem - -class SimMetricsActor(m:SimMetricsOutput, callbackActor:Option[ActorRef])(implicit system: ActorSystem) extends Actor { - var coordinator:Option[ActorRef] = None - - def receive = { - case SimMetricsActor.Start(coordinator) if this.coordinator.isEmpty => { - this.coordinator = Some(coordinator) - coordinator ! Coordinator.Start - } - - case SimMetricsActor.StartSims(coordinator,sims,executor) if this.coordinator.isEmpty => { - this.coordinator = Some(coordinator) - coordinator ! Coordinator.AddSims(sims,executor) - coordinator ! Coordinator.Start - } - - case SimMetricsActor.StartSimsNow(coordinator,sims,executor) if this.coordinator.isEmpty => { - this.coordinator = Some(coordinator) - coordinator ! Coordinator.AddSimsNow(sims,executor) - coordinator ! Coordinator.Start - } - - case Coordinator.Done(t:Long,ma:SimMetricsAggregator) if this.coordinator == Some(sender) => { - this.coordinator = None - m(t,ma) - callbackActor match { - case None => system.terminate() - case Some(actor) => actor ! Coordinator.Done(t,ma) - } - } - } -} diff --git a/src/com/workflowfm/pew/simulation/metrics/Measure.scala b/src/com/workflowfm/pew/simulation/metrics/Measure.scala deleted file mode 100644 index 7f2a6e3f..00000000 --- a/src/com/workflowfm/pew/simulation/metrics/Measure.scala +++ /dev/null @@ -1,144 +0,0 @@ -package com.workflowfm.pew.simulation.metrics - -import com.workflowfm.pew.simulation._ - - -//trait Metrics { -// def stringValues :List[String] -// def values(sep:String=",") = stringValues.mkString(sep) -//} -// -//class MetricTracker[T <: Metrics](init :T) { -// var metrics :T = init -// def <~(u:T=>T) :this.type = { synchronized { metrics = u(metrics) } ; this } -//} -// -//object TaskMetrics { -// def header(sep:String) = List("Task","Start","Delay","Duration","Cost","Workflow","Resources").mkString(sep) -//} -case class TaskMetrics ( - id:Long, - task:String, - simulation:String, - created:Long, - started:Option[Long], - duration:Long, - cost:Long, - resources:Seq[String] - ) { - //override def stringValues = List(task,start,delay,duration,cost,workflow,"\"" + resources.mkString(",") + "\"") map (_.toString) -// def addDelay(d :Long) = copy(delay = delay + d) -// def addDuration(d :Long) = copy(duration = duration + d) -// def addCost(c:Long) = copy(cost = cost + c) - def start(st:Long) = copy(started=Some(st)) - def delay = started match { - case None => 0L - case Some(s) => s - created - } - def fullName = s"$task($simulation)" -// def done(t:Task, time:Long, cost:Long, costPerTick:Long) = { -// val st = started match { -// case None => time -// case Some(t) => t -// } -// copy( duration = duration + time - st, cost = cost + costPerTick * (time-st), resources = t.resources) -// } -} -object TaskMetrics { - def apply(task:Task):TaskMetrics = TaskMetrics(task.id, task.name, task.simulation, task.created, None, task.duration, task.cost, task.resources) -} - -case class SimulationMetrics( - name:String, - started:Long, - duration:Long, - delay:Long, - tasks:Int, - cost:Long, - result:Option[String] - ) { - def addDuration(d:Long) = copy(duration = duration + d) - def addCost(c:Long) = copy(cost = cost + c) - def addDelay(d:Long) = copy(delay = delay + d) - def task(task:Task) = copy(tasks = tasks + 1, cost = cost + task.cost) - def done(res:String, time:Long) = copy( result = Some(res), duration = duration + time - started ) // TODO should this and result be one? -} -object SimulationMetrics { - def apply(name:String, t:Long):SimulationMetrics = SimulationMetrics(name,t,0L,0L,0,0L,None) -} - -case class ResourceMetrics ( - name:String, - busyTime:Long, - idleTime:Long, - tasks:Int, - cost:Long - ) { - def idle(i:Long) = copy(idleTime = idleTime + i) - def task(task:Task, costPerTick:Long) = copy( - tasks = tasks + 1, - cost = cost + task.duration * costPerTick, - busyTime = busyTime + task.duration - ) -} -object ResourceMetrics { - def apply(name:String):ResourceMetrics = ResourceMetrics(name,0L,0L,0,0L) - def apply(r:TaskResource):ResourceMetrics = ResourceMetrics(r.name,0L,0L,0,0L) -} - -class SimMetricsAggregator { - import scala.collection.immutable.Map - - var start:Option[Long] = None - var end:Option[Long] = None - - def started = start match { - case None => start = Some(System.currentTimeMillis()) - case _ => () - } - - def ended = end = Some(System.currentTimeMillis()) - - val taskMap = scala.collection.mutable.Map[Long,TaskMetrics]() - val simMap = scala.collection.mutable.Map[String,SimulationMetrics]() - val resourceMap = scala.collection.mutable.Map[String,ResourceMetrics]() - - - // Set - - def +=(m:TaskMetrics):TaskMetrics = { taskMap += (m.id->m) ; m } - def +=(m:SimulationMetrics):SimulationMetrics = { simMap += (m.name->m) ; m } - def +=(m:ResourceMetrics):ResourceMetrics = { resourceMap += (m.name->m) ; m } - - def +=(task:Task):TaskMetrics = this += TaskMetrics(task) - def +=(s:Simulation,t:Long):SimulationMetrics = this += SimulationMetrics(s.name,t) - def +=(r:TaskResource):ResourceMetrics = this += ResourceMetrics(r) - - - // Update - - def ^(taskID:Long)(u:TaskMetrics=>TaskMetrics):Option[TaskMetrics] = - taskMap.get(taskID).map { m => this += u(m) } - def ^(task:Task)(u:TaskMetrics=>TaskMetrics):Option[TaskMetrics] = - taskMap.get(task.id).map { m => this += u(m) } - def ^(simulation:Simulation)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = - simMap.get(simulation.name).map { m => this += u(m) } - def ^(simulationName:String)(u:SimulationMetrics=>SimulationMetrics):Option[SimulationMetrics] = - simMap.get(simulationName).map { m => this += u(m) } -// def ^(resource:String)(u:ResourceMetrics=>ResourceMetrics):Option[ResourceMetrics] = -// resourceMap.get(resource).map { m => this += u(m) } - def ^(resource:TaskResource)(u:ResourceMetrics=>ResourceMetrics):Option[ResourceMetrics] = - resourceMap.get(resource.name).map { m => this += u(m) } - - - // Getters - - def taskMetrics = taskMap.values.toSeq.sortBy(_.started) - def simulationMetrics = simMap.values.toSeq.sortBy(_.name) - def resourceMetrics = resourceMap.values.toSeq.sortBy(_.name) - def taskSet = taskMap.values.map(_.task).toSet[String] - - // TODO: we used to have 2 levels of sorting! - def taskMetricsOf(r:ResourceMetrics) = taskMap.values.toSeq.filter(_.resources.contains(r.name)).sortBy(_.started) - def taskMetricsOf(s:SimulationMetrics) = taskMap.values.toSeq.filter(_.simulation.equals(s.name)).sortBy(_.started) -} diff --git a/src/com/workflowfm/pew/simulation/metrics/Output.scala b/src/com/workflowfm/pew/simulation/metrics/Output.scala deleted file mode 100644 index 07b28ed6..00000000 --- a/src/com/workflowfm/pew/simulation/metrics/Output.scala +++ /dev/null @@ -1,147 +0,0 @@ -package com.workflowfm.pew.simulation.metrics - -import scala.collection.immutable.Queue -import com.workflowfm.pew.metrics.{ FileOutput, MetricsOutput } - -trait SimMetricsOutput extends ((Long,SimMetricsAggregator) => Unit) { - def and(h:SimMetricsOutput) = SimMetricsOutputs(this,h) -} - -case class SimMetricsOutputs(handlers:Queue[SimMetricsOutput]) extends SimMetricsOutput { - override def apply(time:Long,aggregator:SimMetricsAggregator) = handlers map (_.apply(time,aggregator)) - override def and(h:SimMetricsOutput) = SimMetricsOutputs(handlers :+ h) -} -object SimMetricsOutputs { - def apply(handlers:SimMetricsOutput*):SimMetricsOutputs = SimMetricsOutputs(Queue[SimMetricsOutput]() ++ handlers) - def none = SimMetricsOutputs() -} - - -trait SimMetricsStringOutput extends SimMetricsOutput { - val nullValue = "NULL" - - def taskHeader(separator:String) = Seq("ID","Task","Simulation","Created","Start","Delay","Duration","Cost","Resources").mkString(separator) - def taskCSV(separator:String, resSeparator:String)(m:TaskMetrics) = m match { - case TaskMetrics(id,task,sim,ct,st,dur,cost,res) => - Seq(id,task,sim,ct,MetricsOutput.formatOption(st,nullValue),m.delay,dur,cost,res.mkString(resSeparator)).mkString(separator) - } - - def simHeader(separator:String) = Seq("Name","Start","Duration","Delay","Tasks","Cost","Result").mkString(separator) - def simCSV(separator:String)(m:SimulationMetrics) = m match { - case SimulationMetrics(name,st,dur,delay,ts,c,res) => - Seq(name,st,dur,delay,ts,c,res).mkString(separator) - } - - def resHeader(separator:String) = Seq("Name","Busy","Idle","Tasks","Cost").mkString(separator) - def resCSV(separator:String)(m:ResourceMetrics) = m match { - case ResourceMetrics(name,b,i,ts,c) => - Seq(name,b,i,ts,c).mkString(separator) - } - - - def tasks(aggregator:SimMetricsAggregator,separator:String,lineSep:String="\n",resSeparator:String=";") = - aggregator.taskMetrics.map(taskCSV(separator,resSeparator)).mkString(lineSep) - def simulations(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = - aggregator.simulationMetrics.map(simCSV(separator)).mkString(lineSep) - def resources(aggregator:SimMetricsAggregator,separator:String, lineSep:String="\n") = - aggregator.resourceMetrics.map(resCSV(separator)).mkString(lineSep) -} - - -class SimMetricsPrinter extends SimMetricsStringOutput { - def apply(totalTicks:Long,aggregator:SimMetricsAggregator) = { - val sep = "\t| " - val lineSep = "\n" - val timeFormat = "YYYY-MM-dd HH:mm:ss.SSS" - val durFormat = "HH:mm:ss.SSS" - val nullTime = "NONE" - println( -s""" -Tasks ------ -${taskHeader(sep)} -${tasks(aggregator,sep,lineSep)} - -Simulations ------------ -${simHeader(sep)} -${simulations(aggregator,sep,lineSep)} - -Resources ---------- -${resHeader(sep)} -${resources(aggregator,sep,lineSep)} ---------- - -Started: ${MetricsOutput.formatTimeOption(aggregator.start, timeFormat, nullTime)} -Ended: ${MetricsOutput.formatTimeOption(aggregator.end, timeFormat, nullTime)} -Duration: ${MetricsOutput.formatDuration(aggregator.start, aggregator.end, durFormat, nullTime)} -""" - ) - } -} - -class SimCSVFileOutput(path:String,name:String) extends SimMetricsStringOutput with FileOutput { - import java.io._ - - val separator = "," - val lineSep = "\n" - - def apply(totalTicks:Long,aggregator:SimMetricsAggregator) = { - val taskFile = s"$path$name-tasks.csv" - val simulationFile = s"$path$name-simulations.csv" - val resourceFile = s"$path$name-resources.csv" - writeToFile(taskFile, taskHeader(separator) + "\n" + tasks(aggregator,separator,lineSep)) - writeToFile(simulationFile, simHeader(separator) + "\n" + simulations(aggregator,separator,lineSep)) - writeToFile(resourceFile, resHeader(separator) + "\n" + resources(aggregator,separator,lineSep)) - } -} - - -class SimD3Timeline(path:String,file:String,tick:Int=1) extends SimMetricsOutput with FileOutput { - import java.io._ - - override def apply(totalTicks:Long, aggregator:SimMetricsAggregator) = { - val result = build(aggregator,System.currentTimeMillis()) - println(result) - val dataFile = s"$path$file-simdata.js" - writeToFile(dataFile, result) - } - - def build(aggregator:SimMetricsAggregator, now:Long) = { - var buf:StringBuilder = StringBuilder.newBuilder - buf.append("var tasks = [\n") - for (p <- aggregator.taskSet) buf.append(s"""\t"$p",\n""") - buf.append("];\n\n") - buf.append("var resourceData = [\n") - for (m <- aggregator.resourceMetrics) buf.append(s"""${resourceEntry(m, aggregator)}\n""") - buf.append("];\n\n") - buf.append("var simulationData = [\n") - for (m <- aggregator.simulationMetrics) buf.append(s"""${simulationEntry(m, aggregator)}\n""") - buf.append("];\n") - buf.toString - } - - def simulationEntry(s:SimulationMetrics,agg:SimMetricsAggregator) = { - val times = agg.taskMetricsOf(s).map(taskEntry).mkString(",\n") -s"""{label: "${s.name}", times: [ -$times -]},""" - } - - def resourceEntry(res:ResourceMetrics,agg:SimMetricsAggregator) = { - val times = agg.taskMetricsOf(res).map(taskEntry).mkString(",\n") -s"""{label: "${res.name}", times: [ -$times -]},""" - } - - - def taskEntry(m:TaskMetrics) = { - // we don't want it to be 0 because D3 doesn't deal with it well - val start = m.started.getOrElse(1L) * tick - val finish = (m.started.getOrElse(1L) + m.duration) * tick - val delay = m.delay * tick - s"""\t{"label":"${m.fullName}", task: "${m.task}", "id":${m.id}, "starting_time": $start, "ending_time": $finish, delay: $delay, cost: ${m.cost}}""" - } -} diff --git a/src/com/workflowfm/pew/stateless/StatelessExecutor.scala b/src/com/workflowfm/pew/stateless/StatelessExecutor.scala index 412eabfb..6240a726 100644 --- a/src/com/workflowfm/pew/stateless/StatelessExecutor.scala +++ b/src/com/workflowfm/pew/stateless/StatelessExecutor.scala @@ -1,7 +1,7 @@ package com.workflowfm.pew.stateless import akka.Done -import com.workflowfm.pew.PiObservable +import com.workflowfm.pew.stream.PiObservable import com.workflowfm.pew.execution._ import scala.concurrent.duration.Duration diff --git a/src/com/workflowfm/pew/stateless/components/ResultListener.scala b/src/com/workflowfm/pew/stateless/components/ResultListener.scala index efc90607..5c2d4899 100644 --- a/src/com/workflowfm/pew/stateless/components/ResultListener.scala +++ b/src/com/workflowfm/pew/stateless/components/ResultListener.scala @@ -1,7 +1,7 @@ package com.workflowfm.pew.stateless.components import com.workflowfm.pew.stateless.StatelessMessages.PiiLog -import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, SimplePiObservable, PiEventHandler } import org.bson.types.ObjectId import scala.language.implicitConversions diff --git a/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala b/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala index 089fdea4..14846512 100644 --- a/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala +++ b/src/com/workflowfm/pew/stateless/instances/kafka/MinimalKafkaExecutor.scala @@ -4,6 +4,7 @@ import akka.Done import akka.actor.ActorSystem import akka.stream.Materializer import com.workflowfm.pew._ +import com.workflowfm.pew.stream.{ PiObservable, DelegatedPiObservable } import com.workflowfm.pew.stateless._ import com.workflowfm.pew.stateless.components.ResultListener import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors @@ -33,19 +34,13 @@ class MinimalKafkaExecutor( implicit val environment: KafkaExecutorEnvironment ) protected var piiStore: PiInstanceStore[ObjectId] = SimpleInstanceStore() - /** - * Initializes a PiInstance for a process execution. - * This is always and only invoked before a {@code start}, hence why it is protected. - * This separation gives a chance to PiEventHandlers to subscribe before execution starts. - * @param process The (atomic or composite) PiProcess to be executed - * @param args The PiObject arguments to be passed to the process - * @return A Future with the new unique ID that was generated - */ - override def init( process: PiProcess, args: Seq[PiObject] ): Future[ObjectId] = { + + + override def init( instance: PiInstance[_] ): Future[ObjectId] = { if (isShutdown) throw new ShutdownExecutorException( "`init` was called." ) val piiId = ObjectId.get - piiStore = piiStore.put( PiInstance( piiId, process, args:_* ) ) + piiStore = piiStore.put( instance.copy(id = piiId) ) Future.successful( piiId ) } diff --git a/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala b/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala index 4253631f..faac8325 100644 --- a/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala +++ b/src/com/workflowfm/pew/stateless/instances/kafka/components/Tracked.scala @@ -9,7 +9,6 @@ import akka.{Done, NotUsed} import com.workflowfm.pew.stateless.StatelessMessages.AnyMsg import com.workflowfm.pew.stateless.instances.kafka.settings.KafkaExecutorEnvironment import com.workflowfm.pew.util.ClassLoaderUtil.withClassLoader -import org.apache.kafka.clients.producer.KafkaProducer import org.bson.types.ObjectId import scala.concurrent.{ExecutionContext, Future} @@ -147,9 +146,10 @@ object Tracked { * * (Note: Use `lazyProducer` to minimize the number of new Producers which are created, * this reduces the number of system resources used (such as file handles)) + * Note on the note: lazyProducer is no longer available in the latest version */ - def createProducer[K, V]( settings: ProducerSettings[K, V] ): KafkaProducer[K, V] - = withClassLoader( null ) { settings.lazyProducer } + def createProducer[K, V]( settings: ProducerSettings[K, V] ): org.apache.kafka.clients.producer.Producer[K, V] + = withClassLoader( null ) { settings.createKafkaProducer() } } /** A Tracked type which uses a `Committable` as tracking information. diff --git a/src/com/workflowfm/pew/stream/PiEventHandler.scala b/src/com/workflowfm/pew/stream/PiEventHandler.scala new file mode 100644 index 00000000..05d5d79f --- /dev/null +++ b/src/com/workflowfm/pew/stream/PiEventHandler.scala @@ -0,0 +1,43 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.{ PiEvent, PiEventResult } + +import java.text.SimpleDateFormat + +import scala.collection.immutable.Queue + +// Return true if the handler is done and needs to be unsubscribed. +/** A listener for [[PiEvent]]s. */ +trait PiEventHandler[KeyT] extends (PiEvent[KeyT]=>Boolean) { + /** Compose with another handler. */ + def and(h:PiEventHandler[KeyT]) = MultiPiEventHandler(this,h) +} + +/** A factory that generates a handler that is related to a particular workflow ID. + * The ID is generated by the [[com.workflowfm.pew.execution.ProcessExecutor]] so we do not know it in advance. + */ +trait PiEventHandlerFactory[T,H <: PiEventHandler[T]] { + def build(id:T):H +} + +/** Example of a [[PiEventHandler]] that simply prints a string representation of the event to `System.err`. */ +class PrintEventHandler[T] extends PiEventHandler[T] { + val formatter = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss.SSS") + override def apply(e:PiEvent[T]) = { + val time = formatter.format(e.rawTime) + System.err.println(s"[$time] ${e.asString}") + false + } +} + +/** A [[PiEventHandler]] consisting of a queue of multiple handlers. */ +case class MultiPiEventHandler[T](handlers:Queue[PiEventHandler[T]]) extends PiEventHandler[T] { + override def apply(e:PiEvent[T]) = handlers map (_(e)) forall (_ == true) + override def and(h:PiEventHandler[T]) = MultiPiEventHandler(handlers :+ h) +} + +object MultiPiEventHandler { + def apply[T](handlers:PiEventHandler[T]*):MultiPiEventHandler[T] = MultiPiEventHandler[T](Queue[PiEventHandler[T]]() ++ handlers) +} + + diff --git a/src/com/workflowfm/pew/stream/PiObservable.scala b/src/com/workflowfm/pew/stream/PiObservable.scala new file mode 100644 index 00000000..b9b328ce --- /dev/null +++ b/src/com/workflowfm/pew/stream/PiObservable.scala @@ -0,0 +1,63 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.PiEvent + +import scala.concurrent.{ Promise, Future, ExecutionContext } + +/** Has the ability to publish [[PiEvent]]s. + * This is separate from [[PiObservable]] as in some cases publishing events + * and handling listeners happens in 2 different places. + */ +trait PiPublisher[T] { + protected def publish(evt:PiEvent[T]):Unit +} + +/** A kill switch allowing us to stop a [[PiEventHandler]]. */ +trait PiSwitch { + def stop:Unit +} + +/** Anything that can be observed by a [[PiEventHandler]]. + * This is separate from [[PiPublisher]] as in some cases publishing events + * and handling listeners happens in 2 different places. + */ +trait PiObservable[T] { + /** Subscribes a [[com.workflowfm.pew.stream.PiEventHandler]] to observe. + * @param handler the handler to subscribe + * @return the [[com.workflowfm.pew.stream.PiSwitch]] that allows us to stop/unsubscribe the subscribed handler + */ + def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] +} + +/** A simple [[PiObservable]] and [[PiPublisher]] with a mutable map of handlers. */ +trait SimplePiObservable[T] extends PiObservable[T] with PiPublisher[T] { + import collection.mutable.Map + + implicit val executionContext:ExecutionContext + + protected val handlers:Map[String,PiEventHandler[T]] = Map[String,PiEventHandler[T]]() + + override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = Future { + val name = java.util.UUID.randomUUID.toString + handlers += (name -> handler) + Switch(name) + } + + def unsubscribe(handlerName:String):Future[Boolean] = Future { + handlers.remove(handlerName).isDefined + } + + override def publish(evt:PiEvent[T]) = { + handlers.retain((k,v) => !v(evt)) + } + + case class Switch(name:String) extends PiSwitch { + override def stop:Unit = unsubscribe(name) + } +} + +trait DelegatedPiObservable[T] extends PiObservable[T] { + val worker: PiObservable[T] + + override def subscribe( handler: PiEventHandler[T] ): Future[PiSwitch] = worker.subscribe( handler ) +} diff --git a/src/com/workflowfm/pew/stream/PiStream.scala b/src/com/workflowfm/pew/stream/PiStream.scala new file mode 100644 index 00000000..95e03c21 --- /dev/null +++ b/src/com/workflowfm/pew/stream/PiStream.scala @@ -0,0 +1,38 @@ +package com.workflowfm.pew.stream + +import akka.actor.ActorRef +import akka.stream.KillSwitch +import com.workflowfm.pew.{ PiEvent, PiEventResult } +import scala.concurrent.{ ExecutionContext, Future } +import scala.concurrent.duration.FiniteDuration +import scala.reflect.ClassTag +import uk.ac.ed.inf.ppapapan.subakka.{ HashSetPublisher, Subscriber, SubscriptionSwitch } + +class PiSubscriber[T](handler: PiEventHandler[T]) extends Subscriber[PiEvent[T]] { + var switch: Option[SubscriptionSwitch] = None + + override def onInit(publisher: ActorRef, killSwitch: SubscriptionSwitch): Unit = switch = Some(killSwitch) + override def onEvent(event: PiEvent[T]): Unit = if (handler(event)) switch.map(_.stop()) +} + +/** A [[PiSource]] that also implements [[PiPublisher]] with Akka Streams. + * Uses [[akka.stream.scaladsl.BroadcastHub]], which allows us to clone new sources and attach the + * handlers as sinks. + */ +trait PiStream[T] extends PiPublisher[T] with HashSetPublisher[PiEvent[T]] with PiObservable[T] { + implicit val tag: ClassTag[PiEvent[T]] + implicit val executionContext: ExecutionContext + implicit val timeout: FiniteDuration + + override def subscribe(handler:PiEventHandler[T]):Future[PiSwitch] = { + new PiSubscriber[T](handler).subscribeTo(self, None, timeout)(context.system,tag) map { + i => PiKillSwitch(i.killSwitch) + } + } +} + + +/** A wrapper of [[akka.stream.KillSwitch]] to stop [[PiEventHandler]]s. */ +case class PiKillSwitch(switch: SubscriptionSwitch) extends PiSwitch { + override def stop = switch.stop() +} diff --git a/src/com/workflowfm/pew/stream/PromiseHandler.scala b/src/com/workflowfm/pew/stream/PromiseHandler.scala new file mode 100644 index 00000000..a1c19cf5 --- /dev/null +++ b/src/com/workflowfm/pew/stream/PromiseHandler.scala @@ -0,0 +1,76 @@ +package com.workflowfm.pew.stream + +import com.workflowfm.pew.{ PiEvent, PiException, PiFailure, PiEventResult } + +import scala.concurrent.{ Promise, Future, ExecutionContext } + + +trait PromiseHandler[T,R] extends PiEventHandler[T] { + val id:T + + protected val promise = Promise[R]() + def future = promise.future + + // class PromiseException(message:String) extends Exception(message) + + override def apply(e:PiEvent[T]) = if (e.id == this.id) update(e) match { + case PiEventResult(i,res,_) => promise.success(succeed(res)); true + case ex: PiFailure[T] => fail(ex.exception) match { + case Left(r) => promise.success(r); true + case Right(x) => promise.failure(x); true + } + case _ => false + } else false + + /** + * This should handle the event (if needed) and return it, potentially updated. + * (Default does nothing.) + */ + def update(event:PiEvent[T]) :PiEvent[T] = event + + /** + * This will be executed when the workflow completes successfully. + * @param result the result of the workflow + * @return an object of type R that will complete the promise + */ + def succeed(result:Any) :R + + /** + * This will be executed when the workflow fails due to an exception. + * @param exception the exception that occurred + * @return either an object to complete the promise successfully or an exception to fail the promise with + */ + def fail(exception:PiException[T]) :Either[R,Exception] +} + +class ResultHandler[T](override val id:T) extends PromiseHandler[T,Any] { + + override def succeed(result:Any) = result + override def fail(exception:PiException[T]) = Right(exception) +} + +class ResultHandlerFactory[T] extends PiEventHandlerFactory[T,ResultHandler[T]] { + override def build(id:T) = new ResultHandler[T](id) +} + + + +class CounterHandler[T](override val id:T) extends PromiseHandler[T,Int] { + private var counter:Int = 0 + def count = counter + + override def update(event:PiEvent[T]) = { + counter += 1 + // TODO add metadata for the counter here, because why not? + event + } + + override def succeed(result:Any) = counter + override def fail(exception:PiException[T]) = Left(counter) +} + +class CounterHandlerFactory[T] extends PiEventHandlerFactory[T,CounterHandler[T]] { + override def build(id:T) = new CounterHandler[T](id) +} + + diff --git a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala index df0f0e53..76dc4955 100644 --- a/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/AkkaExecutorTests.scala @@ -7,8 +7,10 @@ import org.scalatest.junit.JUnitRunner import scala.concurrent._ import scala.concurrent.duration._ import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution._ import RexampleTypes._ +import java.util.UUID @RunWith(classOf[JUnitRunner]) class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll with ProcessExecutorTester { @@ -30,7 +32,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute atomic PbI once" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(pbi,Seq(1)) val r1 = await(f1) @@ -38,7 +40,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute atomic PbI twice concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(pbi,Seq(2)) val f2 = ex.execute(pbi,Seq(1)) @@ -49,7 +51,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample once" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(21)) val r1 = await(f1) @@ -57,7 +59,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample once with same timings" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val r1 = await(f1) @@ -65,7 +67,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample twice concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(31)) val f2 = ex.execute(ri,Seq(12)) @@ -76,7 +78,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample twice with same timings concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) @@ -87,7 +89,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample thrice concurrently" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) val f3 = ex.execute(ri,Seq(11)) @@ -100,8 +102,8 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi r3 should be (("PbISleptFor1s","PcISleptFor1s")) } - it should "execute Rexample twice, each with a differnt component" in { - val ex = new AkkaExecutor(pai,pbi,pci,pci2,ri,ri2) + it should "execute Rexample twice, each with a different component" in { + val ex = new AkkaExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri2,Seq(11)) @@ -113,7 +115,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi it should "handle a failing atomic process" in { val p = new FailP - val ex = new AkkaExecutor(p) + val ex = new AkkaExecutor() val f1 = ex.execute(p,Seq(1)) try { @@ -127,7 +129,7 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "handle a failing composite process" in { - val ex = new AkkaExecutor(pai,pbi,pcif,rif) + val ex = new AkkaExecutor() val f1 = rif(21)(ex)//ex.execute(rif,Seq(21)) try { @@ -143,23 +145,23 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi } it should "execute Rexample with a CounterHandler" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter") - ex.subscribe(new PrintEventHandler("printer")) + val ex = new AkkaExecutor() + val factory = new CounterHandlerFactory[UUID] + val kill = ex.subscribe(new PrintEventHandler) val f1 = ex.call(ri,Seq(21),factory) flatMap(_.future) val r1 = await(f1) - r1 should be (8) - ex.unsubscribe("printer") + r1 should be (11) + kill.map(_.stop) } it should "allow separate handlers per executor" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) - val ex2 = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter") - ex.subscribe(new PrintEventHandler("printer")) - ex2.subscribe(new PrintEventHandler("printer")) + val ex = new AkkaExecutor() + val ex2 = new AkkaExecutor() + val factory = new CounterHandlerFactory[UUID] + val k1 = ex.subscribe(new PrintEventHandler) + val k2 = ex2.subscribe(new PrintEventHandler) val f1 = ex.call(ri,Seq(99),factory) flatMap(_.future) val f2 = ex2.execute(ri,Seq(11)) @@ -168,15 +170,15 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi // r2 should be (("PbISleptFor2s","PcISleptFor1s")) val r1 = await(f1) - r1 should be (8) + r1 should be (11) - ex.unsubscribe("printer") - ex2.unsubscribe("printer") + k1.map(_.stop) + k2.map(_.stop) } it should "allow separate handlers for separate workflows" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter" + _) + val ex = new AkkaExecutor() + val factory = new CounterHandlerFactory[UUID] val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) val f2 = ex.call(ri,Seq(11),factory) flatMap(_.future) @@ -185,17 +187,17 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi // r2 should be (("PbISleptFor2s","PcISleptFor1s")) val r1 = await(f1) - r1 should be (8) + r1 should be (11) val r2 = await(f2) - r2 should be (8) + r2 should be (11) } it should "unsubscribe handlers successfully" in { - val ex = new AkkaExecutor(pai,pbi,pci,ri) - val factory = new CounterHandlerFactory[Int]("counter" + _) - ex.subscribe(new PrintEventHandler("printerX")) - ex.unsubscribe("printerX") + val ex = new AkkaExecutor() + val factory = new CounterHandlerFactory[UUID] + val kill = ex.subscribe(new PrintEventHandler) + kill.map(_.stop) val f1 = ex.call(ri,Seq(55),factory) flatMap(_.future) val f2 = ex.call(ri,Seq(11),factory) flatMap(_.future) @@ -204,9 +206,20 @@ class AkkaExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll wi // r2 should be (("PbISleptFor2s","PcISleptFor1s")) val r1 = await(f1) - r1 should be (8) + r1 should be (11) val r2 = await(f2) - r2 should be (8) + r2 should be (11) + } + + + it should "execute a reduced Rexample instance" in { + val ex = new AkkaExecutor() + + val instance = PiInstance(0,ri,PiObject(11)).reduce.postResult(0, PiObject((1,2))).reduce + val f1 = ex.execute(instance) + + val r1 = await(f1) + r1 should be (("PbISleptFor1s","PcISleptFor2s")) } } diff --git a/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala b/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala index 3f4994c6..18e9718a 100644 --- a/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala +++ b/test/com/workflowfm/pew/execution/CompositeExecutionTests.scala @@ -13,7 +13,7 @@ class CompositeExecutionTests extends FlatSpec with Matchers { implicit val context:ExecutionContext = ExecutionContext.global "SingleBlockingExecutor" should "execute C1" in { - SingleBlockingExecutor() withProcs (P1,C1) call(C1,Seq(PiPair(PiItem("OH"),PiItem("HAI!")))) should be( Some("OH++HAI!") ) + SingleBlockingExecutor() call(C1,Seq(PiPair(PiItem("OH"),PiItem("HAI!")))) should be( Some("OH++HAI!") ) } object P1 extends AtomicProcess { // X,Y -> (X++Y) @@ -36,7 +36,7 @@ class CompositeExecutionTests extends FlatSpec with Matchers { "SingleBlockingExecutor" should "execute C2" in { - SingleBlockingExecutor() withProcs (P2A,P2B,C2) call(C2,Seq(PiItem("HI:"))) should be( Some("HI:AABB") ) + SingleBlockingExecutor() call(C2,Seq(PiItem("HI:"))) should be( Some("HI:AABB") ) } object P2A extends AtomicProcess { // X -> XAA @@ -69,7 +69,7 @@ class CompositeExecutionTests extends FlatSpec with Matchers { "SingleBlockingExecutor" should "execute C3" in { - SingleBlockingExecutor() withProcs (P3A,P3B,C3) call(C3,Seq(PiItem("HI:"))) should be( Some(("HI:AARR","HI:BB")) ) + SingleBlockingExecutor() call(C3,Seq(PiItem("HI:"))) should be( Some(("HI:AARR","HI:BB")) ) } object P3A extends AtomicProcess { // X -> (XAA,XBB) diff --git a/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala b/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala index fe8736ec..f5e6d786 100644 --- a/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/MultiStateExecutorTests.scala @@ -25,7 +25,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto val ri = new R(pai,pbi,pci) "MultiStateExecutor" should "execute atomic PbI once" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(pbi,Seq(2)) val r1 = await(f1) @@ -33,7 +33,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute atomic PbI twice concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(pbi,Seq(2)) val f2 = ex.execute(pbi,Seq(1)) @@ -44,7 +44,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample once" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(21)) val r1 = await(f1) @@ -52,7 +52,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample twice concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(31)) val f2 = ex.execute(ri,Seq(12)) @@ -63,7 +63,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample twice with same timings concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) @@ -74,7 +74,7 @@ class MultiStateExecutorTests extends FlatSpec with Matchers with ProcessExecuto } "MultiStateExecutor" should "execute Rexample thrice concurrently" in { - val ex = new MultiStateExecutor(pai,pbi,pci,ri) + val ex = new MultiStateExecutor() val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) val f3 = ex.execute(ri,Seq(11)) diff --git a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala index bc042c7a..1e1c0867 100644 --- a/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala +++ b/test/com/workflowfm/pew/execution/SingleStateExecutorTests.scala @@ -12,7 +12,7 @@ import com.typesafe.config.ConfigFactory import scala.concurrent.Await import scala.concurrent.duration.Duration import com.workflowfm.pew._ - +import com.workflowfm.pew.stream._ @RunWith(classOf[JUnitRunner]) @@ -28,20 +28,20 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut val rif = new R(pai,pbi,pcif) "SingleStateExecutor" should "execute Rexample concurrently" in { - val executor = new SingleStateExecutor(pai,pbi,pci,ri) - executor.subscribe(new PrintEventHandler("printer")) + val executor = new SingleStateExecutor() + executor.subscribe(new PrintEventHandler) exe(executor,ri,13)//.isEmpty should be( false ) - //exe(new SingleStateExecutor(pai,pbi,pci,ri),ri,31)//.isEmpty should be( false ) + //exe(new SingleStateExecutor(),ri,31)//.isEmpty should be( false ) } "SingleStateExecutor" should "handle a failing component process" in { - val ex = new SingleStateExecutor(pai,pbi,pcif,rif) + val ex = new SingleStateExecutor() val f1 = rif(21)(ex)//ex.execute(rif,Seq(21)) try { await(f1) } catch { - case (e:Exception) => e.getMessage.contains("Exception: Fail") should be (true) + case (e:Exception) => e.getMessage.contains("Fail") should be (true) } } @@ -54,7 +54,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut // } "SingleStateExecutor" should "execute C1" in { - val executor = new SingleStateExecutor(P1,C1) + val executor = new SingleStateExecutor() exe(executor,C1,("OH","HAI!")) should be( "OH++HAI!" ) } @@ -78,7 +78,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut "SingleStateExecutor" should "execute C2" in { - exe(new SingleStateExecutor(P2A,P2B,C2),C2,"HI:") should be( "HI:AABB" ) + exe(new SingleStateExecutor(),C2,"HI:") should be( "HI:AABB" ) } object P2A extends AtomicProcess { // X -> XAA @@ -111,7 +111,7 @@ class SingleStateExecutorTests extends FlatSpec with Matchers with ProcessExecut "SingleStateExecutor" should "execute C3" in { - exe(new SingleStateExecutor(P3A,P3B,C3),C3,"HI:") should be( ("HI:AARR","HI:BB") ) + exe(new SingleStateExecutor(),C3,"HI:") should be( ("HI:AARR","HI:BB") ) } object P3A extends AtomicProcess { // X -> (XAA,XBB) diff --git a/test/com/workflowfm/pew/metrics/MetricsTests.scala b/test/com/workflowfm/pew/metrics/MetricsTests.scala index abf06953..00af521f 100644 --- a/test/com/workflowfm/pew/metrics/MetricsTests.scala +++ b/test/com/workflowfm/pew/metrics/MetricsTests.scala @@ -10,8 +10,10 @@ import scala.concurrent._ import scala.concurrent.Await import scala.concurrent.duration._ import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution._ import RexampleTypes._ +import java.util.UUID @RunWith(classOf[JUnitRunner]) class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with ProcessExecutorTester { @@ -33,27 +35,27 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr } it should "measure things" in { - val handler = new MetricsHandler[Int]("metrics") + val handler = new MetricsHandler[UUID] - val ex = new AkkaExecutor(pai,pbi,pci,ri) - ex.subscribe(handler) + val ex = new AkkaExecutor() + val k1 = ex.subscribe(handler) val f1 = ex.execute(ri,Seq(11)) await(f1) - ex.unsubscribe("metrics") + k1.map(_.stop) handler.keys.size shouldBe 1 handler.processMetrics.size shouldBe 3 handler.workflowMetrics.size shouldBe 1 - handler.processMetricsOf(0).size shouldBe 3 + //handler.processMetricsOf(0).size shouldBe 3 // TODO need to find a way to test this } it should "output a D3 timeline of 3 Rexample workflows" in { - val handler = new MetricsHandler[Int]("metrics") + val handler = new MetricsHandler[UUID] - val ex = new AkkaExecutor(pai,pbi,pci,ri) - ex.subscribe(handler) + val ex = new AkkaExecutor() + val k1 = ex.subscribe(handler) val f1 = ex.execute(ri,Seq(11)) val f2 = ex.execute(ri,Seq(11)) @@ -66,10 +68,10 @@ class MetricsTests extends FlatSpec with Matchers with BeforeAndAfterAll with Pr val r3 = await(f3) r3 should be (("PbISleptFor1s","PcISleptFor1s")) - ex.unsubscribe("metrics") + k1.map(_.stop) - new MetricsPrinter[Int]()(handler) - new MetricsD3Timeline[Int]("resources/d3-timeline","Rexample3")(handler) + new MetricsPrinter[UUID]()(handler) + new MetricsD3Timeline[UUID]("resources/d3-timeline","Rexample3")(handler) } } diff --git a/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala b/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala index f40f32a1..661477b5 100644 --- a/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala +++ b/test/com/workflowfm/pew/mongodb/MongoExecutorTests.scala @@ -2,6 +2,7 @@ package com.workflowfm.pew.mongodb import akka.actor.ActorSystem import com.workflowfm.pew._ +import com.workflowfm.pew.stream._ import com.workflowfm.pew.execution.RexampleTypes._ import com.workflowfm.pew.execution._ import org.bson.types.ObjectId @@ -115,7 +116,7 @@ class MongoExecutorTests extends FlatSpec with Matchers with BeforeAndAfterAll w it should "fail properly when a workflow doesn't exist" in { val ex = new MongoExecutor(client, "pew", "test_exec_insts",pai,pbi,pci,pci2,ri,ri2) val id = new ObjectId() - val handler = new PromiseHandler("unitHandler",id) + val handler = new ResultHandler(id) ex.subscribe(handler) ex.postResult(id, 0, MetadataAtomicProcess.result(PiItem(0))) diff --git a/test/com/workflowfm/pew/simulation/CoordinatorTests.scala b/test/com/workflowfm/pew/simulation/CoordinatorTests.scala deleted file mode 100644 index 9925889c..00000000 --- a/test/com/workflowfm/pew/simulation/CoordinatorTests.scala +++ /dev/null @@ -1,204 +0,0 @@ -package com.workflowfm.pew.simulation - -import akka.actor.ActorSystem -import akka.testkit.{ ImplicitSender, TestActors, TestKit } -import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner -import scala.concurrent._ -import scala.concurrent.Await -import scala.concurrent.duration._ -import com.workflowfm.pew._ -import com.workflowfm.pew.execution._ -import com.workflowfm.pew.simulation.metrics._ - - -@RunWith(classOf[JUnitRunner]) -class CoordinatorTests extends TestKit(ActorSystem("CoordinatorTests")) with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { - implicit val executionContext = ExecutionContext.global //system.dispatchers.lookup("akka.my-dispatcher") - implicit val timeout:FiniteDuration = 10.seconds - - override def afterAll:Unit = { - TestKit.shutdownActorSystem(system) - } - - "The Coordinator" must { - - val executor = new AkkaExecutor() - val handler = SimMetricsOutputs(new SimMetricsPrinter()) - - //expectNoMessage(200.millis) - "execute a simple task" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s = new TaskSimulation("S", coordinator, Seq("A","B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (3L) - } - - "execute two independent tasks in parallel" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (3L) - } - - "queue two tasks with the same resource" in { - val resA = new TaskResource("A",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (5L) - } - - "measure delays and idling appropriately" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("A","B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Medium) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - done.time should be (5L) - val metricB = done.metrics.resourceMetrics.find { x => x.name.equals("B") } - metricB should not be empty - metricB map { x => x.idleTime should be (2L) } - val metricS2T = done.metrics.taskMetrics.find { x => x.fullName.equals("S2Task(S2)") } - metricS2T should not be empty - metricS2T map { x => x.delay should be (2L) } - } - - "measure intermediate delays and idling appropriately" in { - val resA = new TaskResource("A",1) - val resB = new TaskResource("B",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("B"), new ConstantGenerator(3), new ConstantGenerator(2), -1, Task.Highest) - val s3 = new TaskSimulation("S3", coordinator, Seq("A","B"), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Medium) - - coordinator ! Coordinator.AddResources(Seq(resA,resB)) - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.AddSim(2,s3,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - - done.time should be (6L) - val metricA = done.metrics.resourceMetrics.find { x => x.name.equals("A") } - metricA should not be empty - metricA map { x => x.idleTime should be (1L) } - val metricS3T = done.metrics.taskMetrics.find { x => x.fullName.equals("S3Task(S3)") } - metricS3T should not be empty - metricS3T map { x => x.delay should be (2L) } - } - - "run a task with no resources" in { - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq(), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - handler(done.time,done.metrics) - - done.time should be (3) - done.metrics.resourceMetrics.isEmpty should be (true) - done.metrics.simulationMetrics.size should be (1) - done.metrics.taskMetrics.size should be (1) - } - - "run multiple tasks with no resources" in { - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq(), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq(), new ConstantGenerator(2), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddSim(1,s1,executor) - coordinator ! Coordinator.AddSim(1,s2,executor) - coordinator ! Coordinator.Start - - val done = expectMsgType[Coordinator.Done](20.seconds) - handler(done.time,done.metrics) - - done.time should be (3L) - done.metrics.resourceMetrics.isEmpty should be (true) - done.metrics.simulationMetrics.size should be (2) - done.metrics.taskMetrics.size should be (2) - } - } - - "The MetricsActor" must { - - val executor = new AkkaExecutor() - val handler = SimMetricsOutputs(new SimMetricsPrinter()) - - //expectNoMessage(200.millis) - "work properly" in { - println ("*** MetricsActor results should appear here:") - - val resA = new TaskResource("A",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s = new TaskSimulation("S", coordinator, Seq("A"), new ConstantGenerator(1), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA)) - coordinator ! Coordinator.AddSim(1,s,executor) - - val metricsActor = system.actorOf(SimMetricsActor.props(handler,Some(self))) - metricsActor ! SimMetricsActor.Start(coordinator) - - expectMsgType[Coordinator.Done](20.seconds) - } - - "start multiple simulations with one message" in { - println ("*** MetricsActor multiple results should appear here:") - - val resA = new TaskResource("A",1) - - val coordinator = system.actorOf(Coordinator.props(DefaultScheduler)) - val s1 = new TaskSimulation("S1", coordinator, Seq("A"), new ConstantGenerator(1), new ConstantGenerator(2), -1, Task.Highest) - val s2 = new TaskSimulation("S2", coordinator, Seq("A"), new ConstantGenerator(1), new ConstantGenerator(2), -1, Task.Highest) - - coordinator ! Coordinator.AddResources(Seq(resA)) - - val metricsActor = system.actorOf(SimMetricsActor.props(handler,Some(self))) - metricsActor ! SimMetricsActor.StartSims(coordinator,Seq((1,s1),(1,s2)),executor) - - expectMsgType[Coordinator.Done](20.seconds) - } - } -} - diff --git a/test/com/workflowfm/pew/simulation/Scheduler.scala b/test/com/workflowfm/pew/simulation/Scheduler.scala deleted file mode 100644 index fe224470..00000000 --- a/test/com/workflowfm/pew/simulation/Scheduler.scala +++ /dev/null @@ -1,191 +0,0 @@ -package com.workflowfm.pew.simulation - -import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner - -@RunWith(classOf[JUnitRunner]) -class SchedulerTests extends WordSpecLike with Matchers with BeforeAndAfterAll - with TaskTester with ScheduleTester { - - "The Schedule" must { - - "fit a task at the edge of another" in { - s((1,2)) + (2,3) should be (Some(s((1,3)))) - s((3,4)) + (2,3) should be (Some(s((2,4)))) - } - - "fit a task at the edge of two others" in { - s((1,2),(3,4)) + (2,3) should be (Some(s((1,4)))) - s((1,2),(4,5)) + (2,3) should be (Some(s((1,3),(4,5)))) - s((1,2),(4,5)) + (3,4) should be (Some(s((1,2),(3,5)))) - } - - "fit a task in gaps" in { - s((1,2)) + (3,4) should be (Some(s((1,2),(3,4)))) - s((3,4)) + (1,2) should be (Some(s((1,2),(3,4)))) - s((1,2),(3,4)) + (5,6) should be (Some(s((1,2),(3,4),(5,6)))) - s((1,2),(5,6)) + (3,4) should be (Some(s((1,2),(3,4),(5,6)))) - s((3,4),(5,6)) + (1,2) should be (Some(s((1,2),(3,4),(5,6)))) - } - - "not fit tasks that clash with the start of another task" in { - s((1,2)) + (1,3) should be (None) - s((1,4)) + (1,3) should be (None) - s((2,3)) + (1,3) should be (None) - s((2,4)) + (1,3) should be (None) - } - - "not fit tasks that clash with the end of another task" in { - s((2,4)) + (3,4) should be (None) - s((3,4)) + (2,4) should be (None) - s((1,4)) + (1,5) should be (None) - s((1,5)) + (1,4) should be (None) - s((2,3)) + (1,3) should be (None) - } - - "not fit tasks that overlap with another task" in { - s((1,3)) + (1,3) should be (None) - s((1,4)) + (2,3) should be (None) - s((2,3)) + (1,4) should be (None) - } - - "not fit tasks that clash with two other tasks" in { - s((1,2),(4,6)) + (2,5) should be (None) - s((1,2),(4,6)) + (3,5) should be (None) - s((1,2),(4,6)) + (2,6) should be (None) - s((1,2),(4,6)) + (3,6) should be (None) - s((1,2),(4,6)) + (2,7) should be (None) - s((1,2),(4,6)) + (3,7) should be (None) - s((1,3),(4,6)) + (2,4) should be (None) - } - - "merge single tasks with common start" in { - s((1,2)) ++ s((1,2)) should be (s((1,2))) - s((1,2)) ++ s((1,3)) should be (s((1,3))) - s((1,3)) ++ s((1,2)) should be (s((1,3))) - } - - "merge single tasks with common finish" in { - s((1,3)) ++ s((2,3)) should be (s((1,3))) - s((2,3)) ++ s((1,3)) should be (s((1,3))) - } - - "merge single tasks that don't overlap" in { - s((1,2)) ++ s((2,3)) should be (s((1,3))) - s((1,2)) ++ s((3,4)) should be (s((1,2),(3,4))) - s((1,2),(5,6)) ++ s((3,4)) should be (s((1,2),(3,4),(5,6))) - s((2,3)) ++ s((1,2)) should be (s((1,3))) - s((3,4)) ++ s((1,2)) should be (s((1,2),(3,4))) - s((3,4)) ++ s((1,2),(5,6)) should be (s((1,2),(3,4),(5,6))) - } - - "merge multiple tasks with one overlapping task" in { - s((1,2),(3,4),(5,6)) ++ s((1,6)) should be (s((1,6))) - s((1,2),(3,4),(5,6)) ++ s((0,6)) should be (s((0,6))) - s((1,2),(3,4),(5,6)) ++ s((1,7)) should be (s((1,7))) - s((1,2),(3,4),(5,6)) ++ s((0,7)) should be (s((0,7))) - s((1,6)) ++ s((1,2),(3,4),(5,6)) should be (s((1,6))) - s((0,6)) ++ s((1,2),(3,4),(5,6)) should be (s((0,6))) - s((1,7)) ++ s((1,2),(3,4),(5,6)) should be (s((1,7))) - s((0,7)) ++ s((1,2),(3,4),(5,6)) should be (s((0,7))) - } - - "merge multiple overlapping tasks" in { - s((1,2),(3,4),(5,6)) ++ s((2,3),(4,5),(6,7)) should be (s((1,7))) - s((1,2),(3,4),(5,6)) ++ s((2,3),(4,5)) should be (s((1,6))) - } - } - - "The DefaultScheduler" must { - - "select a single task" in { - val m = new TestResourceMap("A") - m.s(t(1L,Seq("A"))) should be (Seq(1L)) - } - - "select multiple tasks" in { - val m = new TestResourceMap("A","B") - m.s( - t(1L,Seq("A")), - t(2L,Seq("B")) - ) should be (Seq(1L,2L)) - } - - "select an earlier task" in { - val m = new TestResourceMap("A") - m.s( - t(1L,Seq("A"),Task.Medium,2L), - t(2L,Seq("A"),Task.Medium,1L)) should be (Seq(2L)) - } - - "not select a blocked task" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("A","B"),Task.Highest), - t(2L,Seq("A"),Task.VeryLow,0L,2L)) should be (Nil) - } - - "select a lower priority task if it will finish on time" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("A","B"),Task.Highest), - t(2L,Seq("A"),Task.VeryLow)) should be (List(2L)) - } - - "not block higher priority tasks" in { - val m = new TestResourceMap("A","B") + ("B",1L) - //DefaultScheduler.nextEstimatedTaskStart(t(1L,Seq("A","B"),Task.Highest), 0L, m.m,Seq( t(1L,Seq("A","B"),Task.Highest),t(2L,Seq("A"),Task.VeryLow,0L,100L))) should be (1L) - m.s( - t(1L,Seq("A","B"),Task.Highest), - t(2L,Seq("A"),Task.VeryLow,0L,100L)) should be (Nil) - } - - "not block higher priority tasks based on ordering" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("A","B"),Task.Medium,0L), - t(2L,Seq("A"),Task.Medium,2L,100L)) should be (Nil) - } - - "not block higher priority tasks of other resources" in { - val m = new TestResourceMap("A","B") //+ ("B",1L) - m.s( - t(1L,Seq("B"),Task.Highest), - t(2L,Seq("A","B"),Task.VeryLow,0L,100L)) should be (Seq(1L)) - } - - "consider all higher priority tasks for availability" in { - val m = new TestResourceMap("A","B") + ("B",1L) - m.s( - t(1L,Seq("B"),Task.Highest), - t(2L,Seq("A","B"),Task.Medium), - t(3L,Seq("A"),Task.VeryLow,0L,2L)) should be (List(3L)) - } - - } - -} - -trait ScheduleTester { - def s(l:(Long,Long)*) = Schedule(l.toList) -} - -class TestResourceMap(names:String*) { - // create a resource map - val m:Map[String,TaskResource] = Map[String,TaskResource]() ++ (names map { n => (n,r(n)) }) - - // create a resource - def r(name:String) = new TaskResource(name,0) - - // pre-attach Tasks to resources - def +(r:String,duration:Long):TestResourceMap = { - m.get(r).map { _.startTask(new Task(0,"_","_",0L,Seq(r),duration,duration,0L), 0L) } - this - } - - // test DefaultScheduler - def s(tasks:Task*):Seq[Long] = - DefaultScheduler.getNextTasks(tasks.sorted, 0L, m) map (_.id) - -} diff --git a/test/com/workflowfm/pew/simulation/Task.scala b/test/com/workflowfm/pew/simulation/Task.scala deleted file mode 100644 index 3226e66a..00000000 --- a/test/com/workflowfm/pew/simulation/Task.scala +++ /dev/null @@ -1,52 +0,0 @@ -package com.workflowfm.pew.simulation - -import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import org.junit.runner.RunWith -import org.scalatest.junit.JUnitRunner - -@RunWith(classOf[JUnitRunner]) -class TaskTests extends WordSpecLike with Matchers with BeforeAndAfterAll with TaskTester { - - "Task priority" must { - - "prioritize higher priority" in { - t(2L,Seq("A"),Task.High,2L,1L,0) < t(1L,Seq("A","B"),Task.Medium,1L,2L,1) should be (true) - } - - "prioritize old age" in { - t(2L,Seq("A","B"),Task.Medium,2L,1L,0) > t(1L,Seq("A"),Task.Medium,1L,2L,1) should be (true) - } - - "prioritize more resources" in { - t(2L,Seq("A","B"),Task.Medium,0L,1L,0) < t(1L,Seq("A"),Task.Medium,0L,2L,1) should be (true) - } - - "prioritize longer duration" in { - t(2L,Seq("A"),Task.Medium,0L,1L,0) > t(1L,Seq("A"),Task.Medium,0L,2L,1) should be (true) - } - - "prioritize lower interrupt" in { - t(2L,Seq("A"),Task.Medium,0L,1L,0) < t(1L,Seq("A"),Task.Medium,0L,1L,1) should be (true) - } - - "prioritize lower ID if all else fails" in { - t(2L,Seq("A"),Task.Medium,0L,1L,0) > t(1L,Seq("A"),Task.Medium,0L,1L,0) should be (true) - } - - } -} - - -trait TaskTester { - // create a Task - def t( - id:Long, - resources:Seq[String], - priority:Task.Priority=Task.Medium, - created:Long = 0L, - duration:Long = 1L, - interrupt:Int = 0, - name:String="X" - ) = - new Task(id,name,"Test",created,resources,duration,duration,0L,interrupt,priority) -} diff --git a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala index 3d8f2633..fe5cd3f5 100644 --- a/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala +++ b/test/com/workflowfm/pew/stateless/KafkaExecutorTests.scala @@ -9,7 +9,8 @@ import com.workflowfm.pew.stateless.instances.kafka.CustomKafkaExecutor import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors import com.workflowfm.pew.stateless.instances.kafka.components.KafkaConnectors.{DrainControl, sendMessages} import com.workflowfm.pew.stateless.instances.kafka.settings.{KafkaExecutorEnvironment, KafkaExecutorSettings} -import com.workflowfm.pew.{PiEventFinish, PromiseHandler, _} +import com.workflowfm.pew.{PiEventFinish, _} +import com.workflowfm.pew.stream.ResultHandler import org.apache.kafka.common.utils.Utils import org.bson.types.ObjectId import org.junit.runner.RunWith @@ -238,11 +239,11 @@ class KafkaExecutorTests def call( p: PiProcess, args: PiObject* ): Future[Any] = { val pii = PiInstance(ObjectId.get, p, args: _*) - val handler = new PromiseHandler("test", pii.id) + val handler = new ResultHandler(pii.id) listener.subscribe(handler) sendMessages(ReduceRequest(pii, Seq()), PiiLog(PiEventStart(pii))) - handler.promise.future + handler.future } def shutdown(): Unit = { @@ -288,12 +289,12 @@ class KafkaExecutorTests } def baremetalCall( ex: CustomKafkaExecutor, p: PiProcess, args: PiObject* ): Future[Any] = { - val piiId = await( ex.init( p, args.toSeq ) ) - val handler = new PromiseHandler("test", piiId) + val piiId = await( ex.init( PiInstance(0, p, args:_* ) ) ) + val handler = new ResultHandler(piiId) ex.subscribe(handler) ex.start(piiId) - handler.promise.future + handler.future } it should "call an atomic PbI (baremetal interface)" in { @@ -681,7 +682,7 @@ class KafkaExecutorTests val onCompletion: MessageMap = { - val handler = new PromiseHandler[ObjectId]("testhandler", ourPiiId) + val handler = new ResultHandler[ObjectId](ourPiiId) val ex2 = makeExecutor(completeProcess.settings) tryBut {