code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package mesosphere.marathon.tasks import java.io._ import javax.inject.Inject import mesosphere.marathon.Protos._ import mesosphere.marathon.state.{ PathId, StateMetrics, Timestamp } import mesosphere.marathon.{ Main, MarathonConf } import com.codahale.metrics.MetricRegistry import org.apache.log4j.Logger import org.apache.mesos.Protos.TaskStatus import org.apache.mesos.state.{ State, Variable } import scala.collection.JavaConverters._ import scala.collection._ import scala.collection.concurrent.TrieMap import scala.collection.immutable.Set import scala.concurrent.Future class TaskTracker @Inject() ( state: State, config: MarathonConf, val registry: MetricRegistry) extends StateMetrics { import mesosphere.marathon.tasks.TaskTracker._ import mesosphere.util.BackToTheFuture.futureToFuture import mesosphere.util.ThreadPoolContext.context implicit val timeout = config.zkFutureTimeout private[this] val log = Logger.getLogger(getClass.getName) val PREFIX = "task:" val ID_DELIMITER = ":" private[this] val apps = TrieMap[PathId, InternalApp]() private[tasks] def fetchFromState(id: String): Variable = timedRead { state.fetch(id).get(timeout.duration.length, timeout.duration.unit) } private[tasks] def getKey(appId: PathId, taskId: String): String = { PREFIX + appId.safePath + ID_DELIMITER + taskId } def get(appId: PathId): Set[MarathonTask] = getInternal(appId).values.toSet def getVersion(appId: PathId, taskId: String): Option[Timestamp] = get(appId).collectFirst { case mt: MarathonTask if mt.getId == taskId => Timestamp(mt.getVersion) } private def getInternal(appId: PathId): TrieMap[String, MarathonTask] = apps.getOrElseUpdate(appId, fetchApp(appId)).tasks def list: Map[PathId, App] = apps.mapValues(_.toApp).toMap def count(appId: PathId): Int = getInternal(appId).size def contains(appId: PathId): Boolean = apps.contains(appId) def take(appId: PathId, n: Int): Set[MarathonTask] = get(appId).take(n) def created(appId: PathId, task: MarathonTask): Unit = { // Keep this here so running() can pick it up getInternal(appId) += (task.getId -> task) } def running(appId: PathId, status: TaskStatus): Future[MarathonTask] = { val taskId = status.getTaskId.getValue get(appId).find(_.getId == taskId) match { case Some(oldTask) if !oldTask.hasStartedAt => // staged val task = oldTask.toBuilder .setStartedAt(System.currentTimeMillis) .setStatus(status) .build getInternal(appId) += (task.getId -> task) store(appId, task).map(_ => task) case Some(oldTask) => // running val msg = s"Task for ID $taskId already running, ignoring" log.warn(msg) Future.failed(new Exception(msg)) case _ => val msg = s"No staged task for ID $taskId, ignoring" log.warn(msg) Future.failed(new Exception(msg)) } } def terminated(appId: PathId, status: TaskStatus): Future[Option[MarathonTask]] = { val appTasks = getInternal(appId) val app = apps(appId) val taskId = status.getTaskId.getValue appTasks.get(taskId) match { case Some(task) => app.tasks.remove(task.getId) val variable = fetchFromState(getKey(appId, taskId)) timedWrite { state.expunge(variable) } log.info(s"Task $taskId expunged and removed from TaskTracker") if (app.shutdown && app.tasks.isEmpty) { // Are we shutting down this app? If so, remove it remove(appId) } Future.successful(Some(task)) case None => if (app.shutdown && app.tasks.isEmpty) { // Are we shutting down this app? If so, remove it remove(appId) } Future.successful(None) } } def shutdown(appId: PathId): Unit = { apps.getOrElseUpdate(appId, fetchApp(appId)).shutdown = true if (apps(appId).tasks.isEmpty) remove(appId) } private[this] def remove(appId: PathId): Unit = { apps.remove(appId) log.warn(s"App $appId removed from TaskTracker") } def statusUpdate(appId: PathId, status: TaskStatus): Future[Option[MarathonTask]] = { val taskId = status.getTaskId.getValue getInternal(appId).get(taskId) match { case Some(task) if statusDidChange(task.getStatus, status) => val updatedTask = task.toBuilder .setStatus(status) .build getInternal(appId) += (task.getId -> updatedTask) store(appId, updatedTask).map(_ => Some(updatedTask)) case Some(task) => log.debug(s"Ignoring status update for ${task.getId}. Status did not change.") Future.successful(Some(task)) case _ => log.warn(s"No task for ID $taskId") Future.successful(None) } } def stagedTasks(): Iterable[MarathonTask] = apps.values.flatMap(_.tasks.values.filter(_.getStartedAt == 0)) def checkStagedTasks: Iterable[MarathonTask] = { // stagedAt is set when the task is created by the scheduler val now = System.currentTimeMillis val expires = now - Main.conf.taskLaunchTimeout() val toKill = stagedTasks.filter(_.getStagedAt < expires) toKill.foreach(t => { log.warn(s"Task '${t.getId}' was staged ${(now - t.getStagedAt) / 1000}s ago and has not yet started") }) toKill } def expungeOrphanedTasks(): Unit = { // Remove tasks that don't have any tasks associated with them. Expensive! log.info("Expunging orphaned tasks from store") val stateTaskKeys = timedRead { state.names.get.asScala.filter(_.startsWith(PREFIX)) } val appsTaskKeys = apps.values.flatMap { app => app.tasks.keys.map(taskId => getKey(app.appName, taskId)) }.toSet for (stateTaskKey <- stateTaskKeys) { if (!appsTaskKeys.contains(stateTaskKey)) { log.info(s"Expunging orphaned task with key $stateTaskKey") val variable = timedRead { state.fetch(stateTaskKey).get(timeout.duration.length, timeout.duration.unit) } timedWrite { state.expunge(variable) } } } } private[tasks] def fetchApp(appId: PathId): InternalApp = { log.debug(s"Fetching app from store $appId") val names = timedRead { state.names().get.asScala.toSet } val tasks = TrieMap[String, MarathonTask]() val taskKeys = names.filter(name => name.startsWith(PREFIX + appId.safePath + ID_DELIMITER)) for { taskKey <- taskKeys task <- fetchTask(taskKey) } tasks += (task.getId -> task) new InternalApp(appId, tasks, false) } def fetchTask(appId: PathId, taskId: String): Option[MarathonTask] = fetchTask(getKey(appId, taskId)) private[tasks] def fetchTask(taskKey: String): Option[MarathonTask] = { val bytes = fetchFromState(taskKey).value if (bytes.length > 0) { val source = new ObjectInputStream(new ByteArrayInputStream(bytes)) deserialize(taskKey, source) } else None } def deserialize(taskKey: String, source: ObjectInputStream): Option[MarathonTask] = { if (source.available > 0) { try { val size = source.readInt val bytes = new Array[Byte](size) source.readFully(bytes) Some(MarathonTask.parseFrom(bytes)) } catch { case e: com.google.protobuf.InvalidProtocolBufferException => log.warn(s"Unable to deserialize task state for $taskKey", e) None } } else { log.warn(s"Unable to deserialize task state for $taskKey") None } } def legacyDeserialize(appId: PathId, source: ObjectInputStream): TrieMap[String, MarathonTask] = { var results = TrieMap[String, MarathonTask]() if (source.available > 0) { try { val size = source.readInt val bytes = new Array[Byte](size) source.readFully(bytes) val app = MarathonApp.parseFrom(bytes) if (app.getName != appId.toString) { log.warn(s"App name from task state for $appId is wrong! Got '${app.getName}' Continuing anyway...") } results ++= app.getTasksList.asScala.map(x => x.getId -> x) } catch { case e: com.google.protobuf.InvalidProtocolBufferException => log.warn(s"Unable to deserialize task state for $appId", e) } } else { log.warn(s"Unable to deserialize task state for $appId") } results } def serialize(task: MarathonTask, sink: ObjectOutputStream): Unit = { val size = task.getSerializedSize sink.writeInt(size) sink.write(task.toByteArray) sink.flush() } def store(appId: PathId, task: MarathonTask): Future[Variable] = { val oldVar = fetchFromState(getKey(appId, task.getId)) val bytes = new ByteArrayOutputStream() val output = new ObjectOutputStream(bytes) serialize(task, output) val newVar = oldVar.mutate(bytes.toByteArray) timedWrite { state.store(newVar) } } private[tasks] def statusDidChange(statusA: TaskStatus, statusB: TaskStatus): Boolean = { val healthy = statusB.hasHealthy && (!statusA.hasHealthy || statusA.getHealthy != statusB.getHealthy) healthy || statusA.getState != statusB.getState } } object TaskTracker { private[marathon] class InternalApp( val appName: PathId, var tasks: TrieMap[String, MarathonTask], var shutdown: Boolean) { def toApp: App = App(appName, tasks.values.toSet, shutdown) } case class App(appName: PathId, tasks: Set[MarathonTask], shutdown: Boolean) }
14Zen/marathon
src/main/scala/mesosphere/marathon/tasks/TaskTracker.scala
Scala
apache-2.0
9,521
package com.sutol.scalgen.proj2 // Created by sutol on 14/04/2016. Part of scalgen. object proj2 { def main(args: Array[String]) { val pop = new Pop2() var num1: Int = 0 var num2: Int = 0 while (pop.step()) { num1 = pop.getBest.genes & 1111 num2 = pop.getBest.genes >> 4 println(num1.toString + " " + num2.toString + " : " + pop.getBest.getFitness.toString) } } }
sutolll/scalgen
proj2/proj2.scala
Scala
mit
450
package parser.json.detail import parser.json.GenericJsonParser import play.api.libs.json.JsValue import models.Skimbo import parser.json.providers.FacebookWallParser object FacebookPostDetails extends GenericJsonParser { override def asSkimbo(json: JsValue): Option[Skimbo] = FacebookWallParser.asSkimbo(json) override def cut(json: JsValue) = List(json) }
Froggies/Skimbo
app/parser/json/detail/FacebookPostDetails.scala
Scala
agpl-3.0
370
package contege.seqgen import scala.collection.JavaConversions._ import scala.collection.mutable.Set import scala.collection.mutable.Map import java.util.ArrayList import contege.ClassReader import contege.Random import contege.Atom import contege.ConstructorAtom import contege.MethodAtom import contege.Stats import contege.Config import contege.GlobalState /** * Finds a variable of the given type. * If necessary, appends calls to the sequence to create such a variable. * Possibly uses some variable already in the given sequence. */ class GetParamTask[CallSequence <: AbstractCallSequence[CallSequence]](seqBefore: CallSequence, typ: String, nullAllowed: Boolean, global: GlobalState) extends Task[CallSequence](global) { var param: Option[Variable] = None private val maxRecursion = 50 // getting one param may require getting another; to avoid infinite recursion/very long computation, stop at some point private var currentRecursion = 0 override def run = { global.stats.paramTasksStarted.add("GetParamTask for "+typ) val ret = super.run if (!ret.isDefined) global.stats.paramTasksFailed.add("GetParamTask for "+typ) ret } def computeSequenceCandidate = { val newSequence = seqBefore.copy param = findVarOfType(typ, newSequence, nullAllowed) if (param.isDefined) { Some(newSequence) } else { None } } private def findVarOfType(typ: String, sequence: CallSequence, nullAllowed: Boolean): Option[Variable] = { if (currentRecursion > maxRecursion) { return None } currentRecursion += 1 if (sequence.types.contains(typ) && global.random.nextBool) { // reuse existing var of this type val vars = sequence.varsOfType(typ) val selectedVar = vars(global.random.nextInt(vars.size)) return Some(selectedVar) } else if (!global.typeProvider.primitiveProvider.isNonRefType(typ) && nullAllowed && global.random.nextBool) { return Some(NullConstant) // occasionally, use null (reduce probability?) } else { if (global.typeProvider.primitiveProvider.isPrimitiveOrWrapper(typ)) { return Some(new Constant(global.typeProvider.primitiveProvider.next(typ))) } else { // append calls to the sequence to create a new var of this type var atomOption = global.typeProvider.atomGivingType(typ) var downcast = false if (!atomOption.isDefined) { if (nullAllowed && global.random.nextBool) { global.stats.nullParams.add(typ) return Some(NullConstant) } else { // try to call a method where we downcast the return value val atomWithDowncastOption = global.typeProvider.atomGivingTypeWithDowncast(typ) if (atomWithDowncastOption.isDefined) { atomOption = atomWithDowncastOption downcast = true } else { return None } } } val atom = atomOption.get val receiver = if (atom.isStatic || atom.isConstructor) None else { // recursively try to find a variable we can use as receiver findVarOfType(atom.declaringType, sequence, false) match { case Some(r) => { // if the receiver is the OUT, we should only use CUT methods (only important for subclass testing) if (seqBefore.getCutVariable != null && seqBefore.getCutVariable == r && !global.typeProvider.cutMethods.contains(atom)) { return None } Some(r) } case None => return None // cannot find any receiver, stop searching this path } } val args = new ArrayList[Variable]() atom.paramTypes.foreach(t => { val arg = findVarOfType(t, sequence, true) match { case Some(a) => { args.add(a) } case None => return None // cannot find any argument, stop searching this path } }) assert(atom.returnType.isDefined) val retVal = Some(new ObjectVariable) var downcastType = if (downcast) Some(typ) else None sequence.appendCall(atom, receiver, args, retVal, downcastType) return retVal } } } }
michaelpradel/ConTeGe
src/contege/seqgen/GetParamTask.scala
Scala
gpl-2.0
4,110
/* * Copyright 2007-2010 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb { package example { package lib { import _root_.net.liftweb._ import http._ import http.rest._ import common._ import json._ import util._ import _root_.net.liftweb.example.model._ object WebServices extends RestHelper { // a JSON-able class that holds a User case class UserInfo(firstName: String, lastName: String, email: String) { def toXml = <user firstname={firstName} lastName={lastName} email={email}/> def toJson = Extraction.decompose(this) } // a JSON-able class that holds all the users case class AllUsers(users: List[UserInfo]) { def toJson = Extraction.decompose(this) def toXml = <users>{users.map(_.toXml)}</users> } // define a REST handler for an XML request serve { case "webservices" :: "all_users" :: _ XmlGet _ => AllUsers(User.findAll()).toXml } // define a REST handler for a JSON reqest serve { case "webservices" :: "all_users" :: _ JsonGet _ => AllUsers(User.findAll()).toJson } /* * While many on the Web use GET requests in this way, a client shouldn't * be given the expectation of resource state change or creation * through a GET. GET should be idempotent and safe. This doesn't mean * that a service couldn't create or modify state as as result * (e.g. logging, counting the number of requests, creating business * objects). It's just that any such state-related operations should * not be visible through GET. In the above example, it is implied * that a client could send a GET request in order to create a user. * * AKA -- don't do it this way in the real world, this is an example * of using Scala's guards */ serveJx { case Req("webservices" :: "add_user" :: _, _, rt) if rt.post_? || rt.get_? => addUser() } { // How do we convert a UserInfo to either XML or JSON? case (JsonSelect, u, _) => u.toJson case (XmlSelect, u, _) => u.toXml } // a couple of helpful conversion rules implicit def userToInfo(u: User): UserInfo = UserInfo(u.firstName, u.lastName, u.email) implicit def uLstToInfo(ul: List[User]): List[UserInfo] = ul.map(userToInfo) // extract the parameters, create a user // return the appropriate response def addUser(): Box[UserInfo] = for { firstname <- S.param("firstname") ?~ "firstname parameter missing" ~> 400 lastname <- S.param("lastname") ?~ "lastname parameter missing" email <- S.param("email") ?~ "email parameter missing" } yield { val u = User.create.firstName(firstname). lastName(lastname).email(email) S.param("password") foreach u.password.set u.saveMe } } } } }
wsaccaco/lift
examples/example/src/main/scala/net/liftweb/example/lib/WebServices.scala
Scala
apache-2.0
3,312
package lore.compiler.feedback import lore.compiler.core.Position import lore.compiler.semantics.NamePath import lore.compiler.types.{TupleType, Type} object CoreFeedback { object Trait { case class NotFound(name: NamePath) extends Feedback.Error(Position.unknown) { override def message: String = s"The core trait $name is not defined. Please include Pyramid in your project" + s" dependencies or write your own trait definition." } case class TraitExpected(name: NamePath) extends Feedback.Error(Position.unknown) { override def message: String = s"The type $name is not a trait. Please include Pyramid in your project" + s" dependencies or write your own proper trait definition." } } object MultiFunction { case class NotFound(name: NamePath, inputType: TupleType) extends Feedback.Error(Position.unknown) { override def message: String = s"The core multi-function $name is not defined for the argument types $inputType." + s" Please include Pyramid in your project dependencies or write your own function definition." } case class IllegalOutputType(name: NamePath, inputType: TupleType, outputType: Type) extends Feedback.Error(Position.unknown) { override def message: String = s"The core multi-function $name for argument types $inputType has the wrong output" + s" type. Please include Pyramid in your project dependencies or ensure that the function has the following" + s" output type: $outputType." } } }
marcopennekamp/lore
compiler/src/lore/compiler/feedback/CoreFeedback.scala
Scala
mit
1,525
package com.greencatsoft.d3.selection import scala.scalajs.js import scala.scalajs.js.UndefOr import org.scalajs.dom.Node trait DataDriven[A <: Node, B <: Selection[A, B]] extends js.Object { import DataDriven._ def data[T](): js.Array[T] = js.native def data[T](values: js.Array[T]): BoundSelection[A, B] = js.native def data[T](values: js.Array[T], key: KeyFunction[T]): BoundSelection[A, B] = js.native def data(provider: js.Function1[Any, Any]): BoundSelection[A, B] = js.native def data[T](provider: js.Function1[Any, T], key: KeyFunction[T]): BoundSelection[A, B] = js.native def datum[T](): UndefOr[T] = js.native def datum(value: Any): B = js.native def datum[T](value: ElementIterator[A, T]): B = js.native def filter(selector: String): B = js.native def filter[T](filter: ElementIterator[A, T]): B = js.native def sort[T](comparator: js.Function2[T, T, Int]): B = js.native def order(): B = js.native } object DataDriven { type KeyFunction[A] = js.ThisFunction2[Any, A, Int, Any] trait BoundSelection[A <: Node, B <: Selection[A, B]] extends js.Object { this: B => def enter(): SelectionBuilder[A, B] = js.native def exit(): B = js.native } trait SelectionBuilder[A <: Node, B <: Selection[A, B]] extends Container[A, B] }
sid-kap/scalajs-d3
src/main/scala/com/greencatsoft/d3/selection/DataDriven.scala
Scala
apache-2.0
1,301
package com.twitter.inject.thrift.integration.http_server import com.google.inject.{Provides, Singleton} import com.twitter.finagle.thrift.ClientId import com.twitter.inject.thrift.ThriftClientModule import com.twitter.test.thriftscala.EchoService import com.twitter.util.Future import com.twitter.conversions.time._ object EchoThriftClientModule extends ThriftClientModule[EchoService[Future]] { @Provides @Singleton def clientId: ClientId = ClientId("echo-http-service") override val label = "echo-service" override val dest = "flag!thrift-echo-service" override val connectTimeout = 1L.seconds override val requestTimeout = 1L.seconds }
tom-chan/finatra
inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/integration/http_server/EchoThriftClientModule.scala
Scala
apache-2.0
658
package org.woodyalen202 /** * Created by lichuansun on 14-6-24. */ trait TestTrait { }
woodyalen202/based-scala
src/main/java/org/woodyalen202/TestTrait.scala
Scala
mit
92
/* Copyright 2009-2021 EPFL, Lausanne */ package stainless package extraction package object inlining { object trees extends Trees with inox.ast.SimpleSymbols { case class Symbols( functions: Map[Identifier, FunDef], sorts: Map[Identifier, ADTSort] ) extends SimpleSymbols with StainlessAbstractSymbols { override val symbols: this.type = this } override def mkSymbols(functions: Map[Identifier, FunDef], sorts: Map[Identifier, ADTSort]): Symbols = { Symbols(functions, sorts) } object printer extends Printer { val trees: inlining.trees.type = inlining.trees } } def extractor(using inox.Context) = { utils.DebugPipeline("FunctionSpecialization", FunctionSpecialization(trees)) andThen utils.DebugPipeline("UnfoldOpaque", UnfoldOpaque(trees)) andThen utils.DebugPipeline("CallSiteInline", CallSiteInline(trees)) andThen utils.DebugPipeline("ChooseInjector", ChooseInjector(trees)) andThen utils.DebugPipeline("ChooseEncoder", ChooseEncoder(trees)) andThen utils.DebugPipeline("FunctionInlining", FunctionInlining(trees, trace.trees)) } def fullExtractor(using inox.Context) = extractor andThen nextExtractor def nextExtractor(using inox.Context) = trace.fullExtractor def phaseSemantics(using inox.Context): inox.SemanticsProvider { val trees: inlining.trees.type } = { extraction.phaseSemantics(inlining.trees)(fullExtractor) } def nextPhaseSemantics(using inox.Context): inox.SemanticsProvider { val trees: trace.trees.type } = { trace.phaseSemantics } }
epfl-lara/stainless
core/src/main/scala/stainless/extraction/inlining/package.scala
Scala
apache-2.0
1,567
package de.htwg.zeta.server.model.modelValidator.validator.rules.metaModelIndependent import de.htwg.zeta.common.models.project.instance.elements.NodeInstance import de.htwg.zeta.server.model.modelValidator.validator.rules.SingleNodeRule /** * This file was created by Tobias Droth as part of his master thesis at HTWG Konstanz (03/2017 - 09/2017). */ class NodesAttributesNamesNotEmpty extends SingleNodeRule { override val name: String = getClass.getSimpleName override val description: String = "Attribute names of nodes attributes must not be empty." override val possibleFix: String = "Add name to every attribute." override def isValid(node: NodeInstance): Option[Boolean] = Some(!node.attributeValues.keys.toSeq.contains("")) }
Zeta-Project/zeta
api/server/app/de/htwg/zeta/server/model/modelValidator/validator/rules/metaModelIndependent/NodesAttributesNamesNotEmpty.scala
Scala
bsd-2-clause
748
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.execution import java.util.concurrent.atomic.AtomicLong import minitest.TestSuite import monix.execution.BufferCapacity.{Bounded, Unbounded} import monix.execution.ChannelType.{MPMC, MPSC, SPMC, SPSC} import monix.execution.internal.Platform import monix.execution.schedulers.TestScheduler import scala.collection.immutable.Queue import scala.concurrent.Future import scala.concurrent.duration._ object AsyncQueueFakeSuite extends BaseAsyncQueueSuite[TestScheduler] { def setup() = TestScheduler() def tearDown(env: TestScheduler): Unit = assert(env.state.tasks.isEmpty, "should not have tasks left to execute") def testFuture(name: String, times: Int)(f: Scheduler => Future[Unit]): Unit = { def repeatTest(test: Future[Unit], n: Int)(implicit ec: Scheduler): Future[Unit] = if (n > 0) test.flatMap(_ => repeatTest(test, n - 1)) else Future.successful(()) test(name) { implicit ec => repeatTest(f(ec), times) ec.tick(1.day) } } } object AsyncQueueGlobalSuite extends BaseAsyncQueueSuite[Scheduler] { def setup() = Scheduler.global def tearDown(env: Scheduler): Unit = () def testFuture(name: String, times: Int)(f: Scheduler => Future[Unit]): Unit = { def repeatTest(test: Future[Unit], n: Int)(implicit ec: Scheduler): Future[Unit] = if (n > 0) FutureUtils .timeout(test, 60.seconds) .flatMap(_ => repeatTest(test, n - 1)) else Future.successful(()) testAsync(name) { implicit ec => repeatTest(f(ec), times) } } } abstract class BaseAsyncQueueSuite[S <: Scheduler] extends TestSuite[S] { val repeatForFastTests = { if (Platform.isJVM) 1000 else 100 } val repeatForSlowTests = { if (Platform.isJVM) 50 else 1 } /** TO IMPLEMENT ... */ def testFuture(name: String, times: Int = 1)(f: Scheduler => Future[Unit]): Unit testFuture("simple offer and poll", times = repeatForFastTests) { implicit s => val queue = AsyncQueue.bounded[Int](10) for { _ <- queue.offer(1) _ <- queue.offer(2) _ <- queue.offer(3) r1 <- queue.poll() r2 <- queue.poll() r3 <- queue.poll() } yield { assertEquals(r1, 1) assertEquals(r2, 2) assertEquals(r3, 3) } } testFuture("async poll", times = repeatForFastTests) { implicit s => val queue = AsyncQueue.bounded[Int](10) for { _ <- queue.offer(1) r1 <- queue.poll() _ <- Future(assertEquals(r1, 1)) f <- Future(queue.poll()) _ <- Future(assertEquals(f.value, None)) _ <- queue.offer(2) r2 <- f } yield { assertEquals(r2, 2) } } testFuture("offer/poll over capacity", times = repeatForFastTests) { implicit s => val queue = AsyncQueue.bounded[Long](10) val count = 1000L def producer(n: Long): Future[Unit] = if (n > 0) queue.offer(count - n).flatMap(_ => producer(n - 1)) else Future.successful(()) def consumer(n: Long, acc: Queue[Long] = Queue.empty): Future[Long] = if (n > 0) queue.poll().flatMap { a => consumer(n - 1, acc.enqueue(a)) } else Future.successful(acc.foldLeft(0L)(_ + _)) val p = producer(count) val c = consumer(count) for { _ <- p r <- c } yield { assertEquals(r, count * (count - 1) / 2) } } testFuture("tryOffer / tryPoll", times = repeatForFastTests) { implicit ec => val queue = AsyncQueue.bounded[Long](16) val count = 1000L def producer(n: Long): Future[Unit] = if (n > 0) Future(queue.tryOffer(count - n)).flatMap { case true => producer(n - 1) case false => FutureUtils.delayedResult(10.millis)(()).flatMap(_ => producer(n)) } else { Future.successful(()) } def consumer(n: Long, acc: Queue[Long] = Queue.empty): Future[Long] = if (n > 0) Future(queue.tryPoll()).flatMap { case Some(a) => consumer(n - 1, acc.enqueue(a)) case None => FutureUtils.delayedResult(10.millis)(()).flatMap(_ => consumer(n, acc)) } else Future.successful(acc.foldLeft(0L)(_ + _)) val c = consumer(count) val p = producer(count) for { _ <- p r <- c } yield { assertEquals(r, count * (count - 1) / 2) } } testFuture("drain; MPMC; unbounded", times = repeatForFastTests) { implicit ec => testDrain(Unbounded(), MPMC) } testFuture("drain; MPSC; unbounded", times = repeatForFastTests) { implicit ec => testDrain(Unbounded(), MPSC) } testFuture("drain; SPMC; unbounded", times = repeatForFastTests) { implicit ec => testDrain(Unbounded(), SPMC) } testFuture("drain; SPMC; unbounded", times = repeatForFastTests) { implicit ec => testDrain(Unbounded(), SPSC) } testFuture("drain; MPMC; bounded", times = repeatForFastTests) { implicit ec => testDrain(Bounded(32), MPMC) } testFuture("drain; MPSC; bounded", times = repeatForFastTests) { implicit ec => testDrain(Bounded(32), MPSC) } testFuture("drain; SPMC; bounded", times = repeatForFastTests) { implicit ec => testDrain(Bounded(32), SPMC) } testFuture("drain; SPMC; bounded", times = repeatForFastTests) { implicit ec => testDrain(Bounded(32), SPSC) } def testDrain(bc: BufferCapacity, ct: ChannelType)(implicit ec: Scheduler): Future[Unit] = { val count = 1000 val elems = for (i <- 0 until count) yield i val queue = AsyncQueue.withConfig[Int](bc, ct) val f1 = queue.drain(1000, 1000) val f2 = queue.offerMany(elems) for { _ <- f2 r <- f1 } yield { assertEquals(r.sum, count * (count - 1) / 2) } } testFuture("clear") { implicit s => val queue = AsyncQueue.bounded[Int](10) for { _ <- queue.offer(1) _ <- Future(queue.clear()) r <- Future(queue.tryPoll()) } yield { assertEquals(r, None) } } testFuture("clear after overflow") { implicit s => val queue = AsyncQueue.bounded[Int](512) val fiber = queue.offerMany(0 until 1000) for { _ <- FutureUtils.timeoutTo(fiber, 3.millis, Future.successful(())) _ <- Future(queue.clear()) _ <- fiber } yield () } testFuture("concurrent producer - consumer; MPMC; bounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Bounded(128), MPMC) testConcurrency(queue, count, 3) } testFuture("concurrent producer - consumer; MPMC; unbounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Unbounded(), MPMC) testConcurrency(queue, count, 3) } testFuture("concurrent producer - consumer; MPSC; bounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Bounded(128), MPSC) testConcurrency(queue, count, 1) } testFuture("concurrent producer - consumer; MPSC; unbounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Unbounded(), MPSC) testConcurrency(queue, count, 1) } testFuture("concurrent producer - consumer; SPMC; bounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Bounded(128), SPMC) testConcurrency(queue, count, 3) } testFuture("concurrent producer - consumer; SPMC; unbounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Unbounded(), SPMC) testConcurrency(queue, count, 3) } testFuture("concurrent producer - consumer; SPSC; bounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Bounded(128), SPSC) testConcurrency(queue, count, 1) } testFuture("concurrent producer - consumer; SPSC; unbounded") { implicit ec => val count = if (Platform.isJVM) 10000 else 1000 val queue = AsyncQueue.withConfig[Int](Unbounded(), SPSC) testConcurrency(queue, count, 1) } def testConcurrency(queue: AsyncQueue[Int], n: Int, workers: Int)(implicit s: Scheduler): Future[Unit] = { def producer(n: Int): Future[Unit] = { def offerViaTry(n: Int): Future[Unit] = Future(queue.tryOffer(n)).flatMap { case true => Future.successful(()) case false => FutureUtils.delayedResult(10.millis)(()).flatMap(_ => offerViaTry(n)) } if (n > 0) { val offer = if (n % 2 == 0) queue.offer(n) else offerViaTry(n) offer.flatMap(_ => producer(n - 1)) } else { queue.offerMany(for (_ <- 0 until workers) yield 0) } } val atomic = new AtomicLong(0) def consumer(idx: Int = 0): Future[Unit] = { def pollViaTry(): Future[Int] = Future(queue.tryPoll()).flatMap { case Some(v) => Future.successful(v) case None => FutureUtils.delayedResult(10.millis)(()).flatMap(_ => pollViaTry()) } val poll = if (idx % 2 == 0) queue.poll() else pollViaTry() poll.flatMap { i => if (i > 0) { atomic.addAndGet(i.toLong) consumer(idx + 1) } else { Future.successful(()) } } } val tasks = (producer(n) +: (0 until workers).map(_ => consumer())).toList for (_ <- Future.sequence(tasks)) yield { assertEquals(atomic.get(), n.toLong * (n + 1) / 2) } } }
monix/monix
monix-execution/shared/src/test/scala/monix/execution/AsyncQueueSuite.scala
Scala
apache-2.0
10,251
import leon.instrumentation._ import leon.collection._ import leon.lang._ import ListSpecs._ import leon.annotation._ import conctrees.ConcTrees._ object Conqueue { def max(x: BigInt, y: BigInt): BigInt = if (x >= y) x else y def abs(x: BigInt): BigInt = if (x < 0) -x else x sealed abstract class ConQ[T] { val isLazy: Boolean = this match { case PushLazy(_, _) => true case _ => false } val isSpine: Boolean = this match { case Spine(_, _) => true case _ => false } val pushLazyInv: Boolean = this match { case PushLazy(ys, xs) => !ys.isEmpty && (xs match { case Spine(h, rear) => !h.isEmpty && rear.pushLazyInv //note: head cannot be empty for a lazy closure //h.level == ys.level (omitting this for now) case _ => false }) case Spine(_, rear) => rear.pushLazyInv case _ => true } val zeroPreceedsLazy: Boolean = { this match { case Spine(h, PushLazy(_, q)) => (h == Empty[T]()) && q.zeroPreceedsLazy // the position before pushlazy is Empty case Spine(Empty(), rear) => rear.weakZeroPreceedsLazy // here we have seen a zero case Spine(h, rear) => rear.zeroPreceedsLazy //here we have not seen a zero case Tip(_) => true case _ => false // this implies that a ConQ cannot start with a lazy closure } } ensuring (res => !res || weakZeroPreceedsLazy) //zeroPreceedsLazy is a stronger property val weakZeroPreceedsLazy: Boolean = { this match { case Spine(h, PushLazy(_, q)) => q.zeroPreceedsLazy case Spine(_, rear) => rear.weakZeroPreceedsLazy case Tip(_) => true case _ => false // this implies that a ConQ cannot start with a lazy closure } } val valid = { zeroPreceedsLazy && pushLazyInv } val weakValid = { weakZeroPreceedsLazy && pushLazyInv } val isConcrete: Boolean = { this match { case Spine(_, rear) => rear.isConcrete case Tip(_) => true case _ => false } } ensuring (res => !res || valid) val firstLazyClosure: ConQ[T] = { require(this.pushLazyInv) this match { case Spine(_, pl: PushLazy[T]) => pl case Spine(_, tail) => tail.firstLazyClosure case _ => this } } ensuring (res => !res.isSpine && res.pushLazyInv) def suffix(sch: ConQ[T]): Boolean = { //checks if sch is a suffix of 'this' (this == sch) || { this match { case Spine(_, rear) => rear.suffix(sch) case _ => false } } } ensuring (res => sch match { case Spine(_, rear) => !res || suffix(rear) case _ => true }) } case class Tip[T](t: Conc[T]) extends ConQ[T] case class Spine[T](head: Conc[T], rear: ConQ[T]) extends ConQ[T] // a closure corresponding to 'push' operations case class PushLazy[T](ys: Conc[T], xs: Spine[T]) extends ConQ[T] def queueScheduleProperty[T](xs: ConQ[T], sch: PushLazy[T]) = { sch match { case PushLazy(_, _) => xs.valid && xs.firstLazyClosure == sch //sch is the first lazy closure of 's' case _ => false } } def weakScheduleProperty[T](xs: ConQ[T], sch: PushLazy[T]) = { sch match { case PushLazy(_, _) => xs.weakValid && xs.firstLazyClosure == sch //sch is the first lazy closure of 's' case _ => false } } def schedulesProperty[T](q: ConQ[T], schs: List[ConQ[T]]): Boolean = { schs match { case Cons(pl @ PushLazy(_, nestq), tail) => queueScheduleProperty(q, pl) && schedulesProperty(nestq, tail) case Nil() => //q.valid // here, for now we do not enforce that q should not have any closures. q.isConcrete case _ => false // other cases are not valid } } def weakSchedulesProperty[T](q: ConQ[T], schs: List[ConQ[T]]): Boolean = { schs match { case Cons(pl @ PushLazy(_, nestq), tail) => weakScheduleProperty(q, pl) && schedulesProperty(nestq, tail) case Nil() => //q.valid q.isConcrete case _ => false } } case class Wrapper[T](queue: ConQ[T], schedule: List[ConQ[T]]) { val valid: Boolean = { schedulesProperty(queue, schedule) } } def pushLeft[T](ys: Single[T], xs: ConQ[T]): (ConQ[T], BigInt) = { require(xs.valid) xs match { case Tip(CC(_, _)) => (Spine(ys, xs), 1) case Tip(Empty()) => (Tip(ys), 1) case Tip(t @ Single(_)) => (Tip(CC(ys, t)), 1) case s @ Spine(_, _) => val (r, t) = pushLeftLazy(ys, s) //ensure precondition here (r, t + 1) } } ensuring (res => !res._1.isLazy && res._2 <= 2) def pushLeftLazy[T](ys: Conc[T], xs: Spine[T]): (Spine[T], BigInt) = { require(!ys.isEmpty && xs.valid) // && //(xs.head.isEmpty || xs.head.level == ys.level)) xs match { case Spine(Empty(), rear) => //note: 'rear' is not materialized here (Spine(ys, rear), 1) // if rear was 'PushLazy', this would temporarily break the 'zeroPreceedsLazy' invariant case Spine(head, rear) => val carry = CC(head, ys) //here, head and ys are of the same level rear match { //here, rear cannot be 'PushLazy' by the 'zeroPreceedsLazy' invariant case s @ Spine(Empty(), srear) => (Spine(Empty(), Spine(carry, srear)), 1) case s @ Spine(_, _) => (Spine(Empty(), PushLazy(carry, s)), 1) case t @ Tip(tree) if tree.level > carry.level => // can this happen ? this means tree is of level at least two greater than rear ? (Spine(Empty(), Spine(carry, t)), 1) case Tip(tree) => // here tree level and carry level are equal (Spine(Empty(), Spine(Empty(), Tip(CC(tree, carry)))), 1) } } } ensuring (res => res._1.isSpine && res._1.weakValid && res._2 <= 1) /** * Materialize will evaluate ps and update the references to * ps in xs. Ideally, the second argument should include every * structure that may contain 'pl'. */ def materialize[T](mat: ConQ[T], xs: ConQ[T], schs: Cons[ConQ[T]]): (Spine[T], ConQ[T], BigInt) = { require(weakSchedulesProperty(xs, schs) && schs.head == mat) mat match { case PushLazy(elem, q) => val (nr, t) = pushLeftLazy(elem, q) (nr, updateReferences(xs, mat, schs), t + 1) } } ensuring (res => (res._1 match { case Spine(_, pl @ PushLazy(_, _)) => schedulesProperty(res._2, Cons(pl, schs.tail)) case _ => schedulesProperty(res._2, schs.tail) }) && res._3 <= 2) /** * This does not take any time, by the definition of laziness */ def updateReferences[T](xs: ConQ[T], mat: ConQ[T], schs: Cons[ConQ[T]]): ConQ[T] = { require(weakSchedulesProperty(xs, schs) && schs.head == mat) xs match { case Spine(h, pl @ PushLazy(elem, q)) if (pl == mat) => //ADT property implies that we need not search in the sub-structure 'q'. Spine(h, pushLeftLazy(elem, q)._1) //here, we can ignore the time, this only captures the semantics case Spine(h, rear) => //here mat and xs cannot be equal, so look in the substructures Spine(h, updateReferences(rear, mat, schs)) } } ensuring (res => mat match { case PushLazy(elem, q) => pushLeftLazy(elem, q)._1 match { case Spine(_, pl @ PushLazy(_, _)) => schedulesProperty(res, Cons(pl, schs.tail)) case _ => schedulesProperty(res, schs.tail) } }) def pushLeftAndPay[T](ys: Single[T], w: Wrapper[T]): (Wrapper[T], BigInt) = { require(w.valid) val (nq, t1) = pushLeft(ys, w.queue) // the queue invariant could be temporarily broken // update the schedule val nschs = nq match { case Spine(_, pl @ PushLazy(_, nest)) => w.queue match { case Spine(head, rear) if !head.isEmpty => Cons[ConQ[T]](pl, w.schedule) case _ => w.schedule } case Tip(_) => w.schedule case Spine(_, rear) => w.schedule } val (fschs, fq, t2) = pay(nschs, nq) (Wrapper(fq, fschs), t1 + t2 + 1) } ensuring (res => res._1.valid && res._2 <= 6) def pay[T](schs: List[ConQ[T]], xs: ConQ[T]): (List[ConQ[T]], ConQ[T], BigInt) = { require(weakSchedulesProperty(xs, schs)) schs match { case c @ Cons(pl @ PushLazy(_, nestq), rest) => val (matr, nxs, matt) = materialize(pl, xs, c) matr match { case Spine(_, pl @ PushLazy(_, _)) => (Cons(pl, rest), nxs, matt + 1) case _ => (rest, nxs, matt + 1) } case Nil() => (Nil(), xs, 1) // here every thing is concretized } } ensuring (res => schedulesProperty(res._2, res._1) && res._3 <= 3) }
epfl-lara/leon
testcases/lazy-datastructures/ManualnOutdated/Conqueue-Manual.scala
Scala
gpl-3.0
9,047
/* * This file is part of Kiama. * * Copyright (C) 2014-2015 Anthony M Sloane, Macquarie University. * * Kiama is free software: you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * * Kiama is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for * more details. * * You should have received a copy of the GNU Lesser General Public License * along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see * <http://www.gnu.org/licenses/>. */ package org.kiama package relation import scala.language.higherKinds /** * A template trait for Relation-like types. `T` and `U` are the domain * and range types of the relation, respectively. `Repr` is the type * constructor for the concrete representation of a particular relation * type. */ trait RelationLike[T,U,Repr[_,_]] { import org.kiama.util.Comparison.{contains, distinct, same} /** * A companion object that provides factory methods for this kind of * relation. */ def companion : RelationFactory[Repr] /** * The graph of this relation. */ def graph : List[(T,U)] /** * Apply this relation (same as `image`). */ def apply (t : T) : List[U] = image (t) /** * Build a new relation by collecting pairs produced by the partial * function `f` wherever it is defined on pairs of this relation. */ def collect[V,W] (f : ((T,U)) ==> (V,W)) : Repr[V,W] = companion.fromGraph (graph.collect (f)) /** * Compose this relation with `st`. */ def compose[S] (st : RelationLike[S,T,Repr]) : Repr[S,U] = companion.fromGraph ( for ((s, t1) <- st.graph; (t2, u) <- graph; if same (t1, t2)) yield (s, u) ) /** * Does the domain of this relation contain the value `t`? */ def containsInDomain (t : T) : Boolean = contains (domain, t) /** * Does the range of this relation contain the value `u`? */ def containsInRange (u : U) : Boolean = contains (range, u) /** * The domain of this relation. */ lazy val domain : List[T] = distinct (graph.map (_._1)) /** * The image of a value of the relation's domain is a set of the * values in the range that are related to that domain value. */ def image (t : T) : List[U] = graph.collect { case (t1, u) if same (t, t1) => u } /** * A relation that maps each element of the range to its position * (starting counting at zero). */ lazy val index : Repr[U,Int] = companion.fromGraph (graph.map (_._2).zipWithIndex) /** * Invert this relation. In other words, if `(t,u)` is in the relation, * then `(u,t)` is in the inverted relation. */ lazy val inverse : Repr[U,T] = companion.fromGraph (graph.map (_.swap)) /** * Is this relation empty (i.e., contains no pairs)? */ lazy val isEmpty : Boolean = graph.isEmpty /** * An auxiliary extractor for this relation that matches pairs. The * match succeeds if and only if the matched value `t` has a unique * image in the relation. Both `t` and its unique image value are * returned for a successful match. */ object pair { def unapply (t : T) : Option[(T,U)] = image (t) match { case List (u) => Some ((t, u)) case _ => None } } /** * The preImage of a value of the relation's range is a set of the * values in the domain that are related to that range value. */ def preImage (u : U) : List[T] = graph.collect { case (t, u1) if same (u, u1) => t } /** * A relation that maps each element of the domain to its position * starting at zero. */ lazy val preIndex : Repr[T,Int] = companion.fromGraph (graph.map (_._1).zipWithIndex) /** * Domain projection, i.e., form a relation that relates each * value in the domain to all of the related values in the range. */ lazy val projDomain : Repr[T,List[U]] = companion.fromGraph (domain.map (t => (t, image (t)))) /** * Range projection, i.e., form a relation that relates each * value in the range to all of the related values in the domain. */ lazy val projRange : Repr[U,List[T]] = companion.fromGraph (range.map (u => (u, preImage (u)))) /** * The range of this relation. */ lazy val range : List[U] = distinct (graph.map (_._2)) /** * A relation can be used as an extractor that matches if and only if * the matched value `t` has a unique image in the relation. The unique * image value is returned for a successful match. */ def unapply (t : T) : Option[U] = image (t) match { case List (u) => Some (u) case _ => None } /** * A relation can be used as an extractor that returns the image for a * given domain value `t`. Fails if `t` is not in the domain. */ def unapplySeq (t : T) : Option[List[U]] = { val ti = image (t) if (ti.isEmpty) None else Some (ti) } /** * Union this relation with `r`. */ def union (r : RelationLike[T,U,Repr]) : Repr[T,U] = companion.fromGraph (graph ++ r.graph) /** * Return the sub-relation of this relation that contains just those * pairs that have `t` as their domain element. */ def withDomain (t : T) : Repr[T,U] = companion.fromGraph (graph.filter { case (t1, _) => same (t, t1) }) /** * Return the sub-relation of this relation that contains just those * pairs that have `u` as their range element. */ def withRange (u : U) : Repr[T,U] = companion.fromGraph (graph.filter { case (_, u1) => same (u, u1) }) }
solomono/kiama
library/src/org/kiama/relation/RelationLike.scala
Scala
gpl-3.0
6,257
package chrome.utils import chrome.app.runtime.Runtime import chrome.app.runtime.bindings.{LaunchData, Request} trait ChromeApp { def main(args: Array[String]): Unit = { Runtime.onLaunched.listen(onLaunched) Runtime.onRestarted.listen((_) => onRestart) Runtime.onEmbedRequested.listen(onEmbedRequested) } def onLaunched(launchData: LaunchData): Unit = {} def onRestart(): Unit = {} def onEmbedRequested(request: Request): Unit = {} }
lucidd/scala-js-chrome
bindings/src/main/scala/chrome/utils/ChromeApp.scala
Scala
mit
463
/* scala-stm - (c) 2009-2011, Stanford University, PPL */ package scala.concurrent.stm package skel import scala.collection.mutable private[stm] object HashTrieTMap { def empty[A, B]: TMap[A, B] = new HashTrieTMap(Ref(TxnHashTrie.emptyMapNode[A, B]).single) def newBuilder[A, B]: mutable.Builder[(A, B), TMap[A, B]] = new mutable.Builder[(A, B), TMap[A, B]] { var root: TxnHashTrie.BuildingNode[A, B] = TxnHashTrie.emptyMapBuildingNode[A, B] def clear(): Unit = { root = TxnHashTrie.emptyMapBuildingNode[A, B] } def += (kv: (A, B)): this.type = { root = TxnHashTrie.buildingPut(root, kv._1, kv._2) ; this } def result(): TMap[A, B] = { val r = root root = null new HashTrieTMap(Ref(r.endBuild).single) } } } private[skel] class HashTrieTMap[A, B] private (root0: Ref.View[TxnHashTrie.Node[A, B]] ) extends TxnHashTrie[A, B](root0) with TMapViaClone[A, B] { //// construction override def empty: TMap.View[A, B] = new HashTrieTMap(Ref(TxnHashTrie.emptyMapNode[A, B]).single) override def clone: HashTrieTMap[A, B] = new HashTrieTMap(cloneRoot) //// TMap.View aggregates override def isEmpty: Boolean = singleIsEmpty override def size: Int = singleSize override def iterator: Iterator[(A, B)] = mapIterator override def keysIterator: Iterator[A] = mapKeyIterator override def valuesIterator: Iterator[B] = mapValueIterator override def foreach[U](f: ((A, B)) => U): Unit = singleMapForeach(f) override def clear(): Unit = { root() = TxnHashTrie.emptyMapNode[A, B] } //// TMap.View per-element override def contains(key: A): Boolean = singleContains(key) override def apply(key: A): B = singleGetOrThrow(key) def get(key: A): Option[B] = singleGet(key) override def put(key: A, value: B): Option[B] = singlePut(key, value) override def update(key: A, value: B): Unit = singlePut(key, value) override def += (kv: (A, B)): this.type = { singlePut(kv._1, kv._2) ; this } override def remove(key: A): Option[B] = singleRemove(key) override def -= (key: A): this.type = { singleRemove(key) ; this } //// optimized TMap versions def isEmpty(implicit txn: InTxn): Boolean = txnIsEmpty def size(implicit txn: InTxn): Int = singleSize def foreach[U](f: ((A, B)) => U)(implicit txn: InTxn): Unit = txnMapForeach(f) def contains(key: A)(implicit txn: InTxn): Boolean = txnContains(key) def apply(key: A)(implicit txn: InTxn): B = txnGetOrThrow(key) def get(key: A)(implicit txn: InTxn): Option[B] = txnGet(key) def put(key: A, value: B)(implicit txn: InTxn): Option[B] = txnPut(key, value) def remove(key: A)(implicit txn: InTxn): Option[B] = txnRemove(key) def transform(f: (A, B) => B)(implicit txn: InTxn): this.type = { single transform f ; this } def retain(p: (A, B) => Boolean)(implicit txn: InTxn): this.type = { single retain p ; this } }
nbronson/scala-stm
src/main/scala/scala/concurrent/stm/skel/HashTrieTMap.scala
Scala
bsd-3-clause
2,913
package im.mange.shoreditch import im.mange.shoreditch.api._ import im.mange.shoreditch.handler.HttpMethodPartialFunctions._ import im.mange.shoreditch.handler.{Request, Route, ShoreditchHandler} case class Shoreditch(base: String = "shoreditch", version: String, longName: String, alias: String, checksEnabled: Boolean = true, actionsEnabled: Boolean = true, debug: Boolean = false, routes: Seq[Route[Service]]) { private val handler = new ShoreditchHandler(this) def handle(request: Request) = { val theHandler = handler.handler(request) // println(theHandler) theHandler.map(_()) } val actions = handler.actions val checks = handler.checks if (debug) println( s"""\\nShoreditch: /$base => $longName ($alias) V$version, checksEnabled: $checksEnabled, actionsEnabled: $actionsEnabled | (${checks.size}) checks:\\n${describe(checks.toSeq)} | (${actions.size}) actions:\\n${describe(actions.toSeq)} """.stripMargin ) private def describe(x: Seq[(String, Service)]) = x.map(c => s" - ${c._1 + " -> " + c._2}").mkString("\\n") } object Shoreditch { implicit class CheckRouteBuildingString(val path: String) extends AnyVal { def action(f: ⇒ Action): Route[Service] = POST0("action/" + path)(f) def check(f: ⇒ Check): Route[Service] = GET0("check/" + path)(f) def check(f: (String) ⇒ Check): Route[Service] = GET1("check/" + path)(f) def check(f: (String,String) ⇒ Check): Route[Service] = GET2("check/" + path)(f) } }
alltonp/shoreditch
src/main/scala/im/mange/shoreditch/Shoreditch.scala
Scala
apache-2.0
1,688
package assets.mustache.overseas import uk.gov.gds.ier.transaction.overseas.lastUkAddress.LastUkAddressLookupMustache import uk.gov.gds.ier.test._ class LastUkAddressLookupTemplateTest extends TemplateTestSuite with LastUkAddressLookupMustache { it should "properly render" in { running(FakeApplication()) { val data = new LookupModel( question = Question(), postcode = Field( id = "postcodeId", name = "postcodeName", classes = "postcodeClasses", value = "postcodeValue" ) ) val html = Mustache.render("overseas/lastUkAddressLookup", data) val doc = Jsoup.parse(html.toString) val fieldset = doc.select("fieldset").first() val label = fieldset.select("label").first() label.attr("for") should be("postcodeId") val divWrapper = fieldset.select("div").first() divWrapper.attr("class") should include("postcodeClasses") val input = divWrapper.select("input").first() input.attr("id") should be("postcodeId") input.attr("name") should be("postcodeName") input.attr("value") should be("postcodeValue") input.attr("class") should include("postcodeClasses") } } }
michaeldfallen/ier-frontend
test/assets/mustache/overseas/LastUkAddressLookupTemplateTest.scala
Scala
mit
1,230
/** * Copyright 2010-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package examples object FunSpecExamples extends StyleTraitExamples { val name: String = "FunSpec" val description: String = """For teams coming from Ruby's RSpec tool, FunSpec will feel very familiar; More generally, for any team that prefers BDD, FunSpec's nesting and gentle guide to structuring text (with describe and it) provides an excellent general-purpose choice for writing specification-style tests.""" /* val exampleUsage: String = """<span class="stImport">import org.scalatest.FunSpec</span> |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | describe(<span class="stLiteral">"A Set"</span>) { | describe(<span class="stLiteral">"when empty"</span>) { | it(<span class="stLiteral">"should have size 0"</span>) { assert(<span class="stType">Set</span>.empty.size === <span class="stLiteral">0</span>) } | it(<span class="stLiteral">"should produce NoSuchElementException when head is invoked"</span>) { | intercept[<span class="stType">NoSuchElementException]</span> { <span class="stType">Set</span>.empty.head } | } | } | } |} """.stripMargin */ val exampleUsage: String = """<span class="stImport">import org.scalatest._</span> | |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | <span class="stReserved">override</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">NoArgTest</span>) = { <span class="stExplain">// Define a shared fixture</span> | <span class="stExplain">// Shared setup (run at beginning of each test)</span> | <span class="stReserved">try</span> test() | <span class="stReserved">finally</span> { | <span class="stExplain">// Shared cleanup (run at end of each test)</span> | } | } | | <span class="stExplain">// Describe a <em>scope</em> for a <em>subject</em>, in this case: "A Set"</span> | describe(<span class="stLiteral">"A Set"</span>) { <span class="stExplain">// All tests within these curly braces are about "A Set"</span> | | <span class="stExplain">// Can describe nested scopes that "narrow" its outer scopes</span> | describe(<span class="stLiteral">"(when empty)"</span>) { <span class="stExplain">// All tests within these curly braces are about "A Set (when empty)"</span> | | it(<span class="stLiteral">"should have size 0"</span>) { <span class="stExplain">// Here, 'it' refers to "A Set (when empty)". The full name</span> | assert(<span class="stType">Set</span>.empty.size == <span class="stLiteral">0</span>) <span class="stExplain">// of this test is: "A Set (when empty) should have size 0"</span> | } | it(<span class="stLiteral">"should produce NoSuchElementException when head is invoked"</span>) { // <span class="stExplain">Define another test</span> | intercept[<span class="stType">NoSuchElementException</span>] { | <span class="stType">Set</span>.empty.head | } | } | ignore(<span class="stLiteral">"should be empty"</span>) { <span class="stExplain">// To ignore a test, change 'it' to 'ignore'...</span> | assert(<span class="stType">Set</span>.empty.isEmpty) | } | } | | <span class="stExplain">// Describe a second nested scope that narrows "A Set" in a different way</span> | describe(<span class="stLiteral">"(when non-empty)"</span>) { <span class="stExplain">// All tests within these curly braces are about "A Set (when non-empty)"</span> | | it(<span class="stLiteral">"should have the correct size"</span>) { <span class="stExplain">// Here, 'it' refers to "A Set (when non-empty)". This test's full</span> | assert(<span class="stType">Set</span>(<span class="stLiteral">1</span>, <span class="stLiteral">2</span>, <span class="stLiteral">3</span>).size == <span class="stLiteral">3</span>) <span class="stExplain">// name is: "A Set (when non-empty) should have the correct size"</span> | } | <span class="stExplain">// Define a pending test by using (pending) for the body</span> | it(<span class="stLiteral">"should return a contained value when head is invoked"</span>) (pending) | <span class="stImport">import tagobjects.Slow</span> | it(<span class="stLiteral">"should be non-empty"</span>, <span class="stType">Slow</span>) { <span class="stExplain">// Tag a test by placing a tag object after the test name</span> | assert(<span class="stType">Set</span>(<span class="stLiteral">1</span>, <span class="stLiteral">2</span>, <span class="stLiteral">3</span>).nonEmpty) | } | } | } |} | |<span class="stExplain">// Can also pass fixtures into tests with fixture.FunSpec</span> |<span class="stReserved">class</span> <span class="stType">StringSpec</span> <span class="stReserved">extends</span> <span class="stType">fixture.FunSpec</span> { | <span class="stReserved">type</span> FixtureParam = <span class="stType">String</span> <span class="stExplain">// Define the type of the passed fixture object</span> | <span class="stReserved">override</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">OneArgTest</span>) = { | <span class="stExplain">// Shared setup (run before each test), including...</span> | <span class="stReserved">val</span> fixture = <span class="stLiteral">"a fixture object"</span> <span class="stExplain">// ...creating a fixture object</span> | <span class="stReserved">try</span> test(fixture) <span class="stExplain">// Pass the fixture into the test</span> | <span class="stReserved">finally</span> { | <span class="stExplain">// Shared cleanup (run at end of each test)</span> | } | } | describe(<span class="stLiteral">"The passed fixture"</span>) { | it(<span class="stLiteral">"can be used in the test"</span>) { s =&gt; <span class="stExplain">// Fixture passed in as s</span> | assert(s == <span class="stLiteral">"a fixture object"</span>) | } | } |} | |@DoNotDiscover <span class="stExplain">// Disable discovery of a test class</span> |<span class="stReserved">class</span> <span class="stType">InvisibleSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> } | |@Ignore <span class="stExplain">// Ignore all tests in a test class</span> |<span class="stReserved">class</span> <span class="stType">IgnoredSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> } | |<span class="stImport">import tags.Slow</span> |@Slow <span class="stExplain">// Mark all tests in a test class with a tag</span> |<span class="stReserved">class</span> <span class="stType">SlowSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> } |""".stripMargin val play2Example: String = """<span class="stImport">import org.scalatest._</span> |<span class="stImport">import play.api.test._</span> |<span class="stImport">import play.api.test.Helpers._</span> | |<span class="stReserved">class</span> <span class="stType">ExampleSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> { | describe(<span class="stLiteral">"Application should"</span>) { | it(<span class="stLiteral">"send 404 on a bad request"</span>) { | running(<span class="stType">FakeApplication</span>()) { | route(<span class="stType">FakeRequest</span>(GET, <span class="stLiteral">"/boum"</span>)) shouldBe <span class="stType">None</span> | } | } | it(<span class="stLiteral">"render the index page"</span>) { | running(<span class="stType">FakeApplication</span>()) { | <span class="stReserved">val</span> home = route(<span class="stType">FakeRequest</span>(GET, <span class="stLiteral">"/"</span>)).get | status(home) shouldBe OK | contentType(home) shouldBe <span class="stType">Some(<span class="stLiteral">"text/html"</span>)</span> | contentAsString(home) should include (<span class="stLiteral">"ScalaTest"</span>) | } | } | } |}""".stripMargin val doNotDiscover: String = """<span class="stImport">import org.scalatest._</span> |@DoNotDiscover |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> } """.stripMargin val ignoreTest: String = """<span class="stImport">import org.scalatest._</span> |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | ignore(<span class="stLiteral">"should have size 0"</span>) { <span class="stBlockComment">/*code omitted*/</span> } |}""".stripMargin val pendingTest: String = """<span class="stImport">import org.scalatest._</span> |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | it(<span class="stLiteral">"should have size 0"</span>) (pending) |}""".stripMargin val taggingTest: String = """<span class="stImport">import org.scalatest._</span> |<span class="stReserved">object</span> <span class="stType">SlowTest</span> <span class="stReserved">extends</span> <span class="stType">Tag</span>(<span class="stLiteral">"com.mycompany.tags.SlowTest"</span>) |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | it(<span class="stLiteral">"should have size 0"</span>, <span class="stType">SlowTest</span>) { | <span class="stBlockComment">/*code omitted*/</span> | } |}""".stripMargin val infoTest: String = """<span class="stImport">import org.scalatest._</span> |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | it(<span class="stLiteral">"should have size 0"</span>) { | info(<span class="stLiteral">"Some information."</span>) | <span class="stBlockComment">/*code omitted*/</span> | } |}""".stripMargin val fixtureNoArgTest: String = """<span class="stImport">import org.scalatest._</span> |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { | <span class="stReserved">def</span> setup() { <span class="stBlockComment">/*code omitted*/</span> } | <span class="stReserved">def</span> cleanup() { <span class="stBlockComment">/*code omitted*/</span> } | <span class="stReserved">override</span> <span class="stReserved">protected</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">NoArgTest</span>) = { | setup() | <span class="stReserved">try</span> test() <span class="stReserved">finally</span> cleanup() | } |}""".stripMargin val fixtureOneArgTest: String = """<span class="stImport">import org.scalatest._</span> |<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">fixture.FunSpec</span> { | <span class="stReserved">def</span> setup() { <span class="stBlockComment">/*code omitted*/</span> } | <span class="stReserved">def</span> cleanup() { <span class="stBlockComment">/*code omitted*/</span> } | <span class="stReserved">type</span> FixtureParam = <span class="stType">String</span> | <span class="stReserved">override</span> <span class="stReserved">protected</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">OneArgTest</span>) = { | setup() | <span class="stReserved">try</span> test(<span class="stLiteral">"this is a fixture param"</span>) <span class="stReserved">finally</span> cleanup() | } |}""".stripMargin val seleniumExample: String = """<span class="stImport">import org.scalatest._ |import selenium._</span> |<span class="stReserved">class</span> <span class="stType">BlogSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> <span class="stReserved">with</span> <span class="stType">WebBrowser</span> <span class="stReserved">with</span> <span class="stType">HtmlUnit</span> { | <span class="stReserved">val</span> host = <span class="stLiteral">"http://localhost:9000/"</span> | it(<span class="stLiteral">"should have the correct title"</span>) { | go to (host + <span class="stLiteral">"index.html"</span>) | pageTitle should be (<span class="stLiteral">"Awesome Blog"</span>) | } |}""".stripMargin }
jedesah/scalatest-website
app/examples/FunSpecExamples.scala
Scala
apache-2.0
14,454
package io.buoyant.namerd.iface import com.twitter.conversions.DurationOps._ import com.twitter.finagle._ import com.twitter.finagle.naming.NameInterpreter import com.twitter.logging.Level import com.twitter.util._ import io.buoyant.namer.{ConfiguredDtabNamer, DelegateTree, Metadata, RichActivity} import io.buoyant.namerd.NullDtabStore import io.buoyant.test.{Awaits, FunSuite} import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.time._ class HttpNamerEndToEndTest extends FunSuite with Eventually with IntegrationPatience with Awaits { implicit override val patienceConfig = PatienceConfig( timeout = scaled(Span(5, Seconds)), interval = scaled(Span(100, Milliseconds)) ) def retryIn() = 1.second val clientId = Path.empty val ns = "testns" test("service resurrection") { val serverState = Var[Activity.State[NameTree[Name.Bound]]](Activity.Pending) @volatile var clientState: Activity.State[NameTree[Name.Bound]] = Activity.Pending val reqDtab = Dtab.read("/woop => /w00t") val reqPath = Path.read("/woop/woop") val id = Path.read("/io.l5d.w00t/woop") val namer = new Namer { def lookup(path: Path) = path match { case Path.Utf8("woop") => Activity(serverState) case _ => Activity.exception(new Exception) } } def interpreter(ns: String) = new NameInterpreter { def bind(dtab: Dtab, path: Path) = if (dtab == reqDtab && path == reqPath) Activity(serverState) else Activity.exception(new Exception) } val namers = Map(Path.read("/io.l5d.w00t") -> namer) val service = new HttpControlService(NullDtabStore, interpreter, namers) val client = new StreamingNamerClient(service, ns) val act = client.bind(reqDtab, reqPath) val obs = act.states.respond { s => clientState = s } assert(clientState == Activity.Pending) val serverAddr0 = Var[Addr](Addr.Bound()) serverState() = Activity.Ok(NameTree.Leaf(Name.Bound(serverAddr0, id))) eventually { assert(clientState == serverState.sample()) } val Activity.Ok(NameTree.Leaf(bound0)) = clientState assert(bound0.id == id) @volatile var clientAddr0: Addr = Addr.Pending bound0.addr.changes.respond(clientAddr0 = _) assert(clientAddr0 == Addr.Bound()) serverAddr0() = Addr.Bound( Set(Address("127.1", 4321)), Addr.Metadata(Metadata.authority -> "acme.co") ) eventually { assert(clientAddr0 == Addr.Bound(Set(Address("127.1", 4321)), Addr.Metadata(Metadata.authority -> "acme.co"))) } serverAddr0() = Addr.Bound( Set(Address("127.1", 5432)), Addr.Metadata(Metadata.authority -> "acme.co") ) eventually { assert(clientAddr0 == Addr.Bound(Set(Address("127.1", 5432)), Addr.Metadata(Metadata.authority -> "acme.co"))) } serverState() = Activity.Ok(NameTree.Neg) eventually { assert(clientState == serverState.sample()) } eventually { assert(clientAddr0 == Addr.Neg) } val serverAddr1 = Var[Addr](Addr.Bound()) serverState() = Activity.Ok(NameTree.Leaf(Name.Bound(serverAddr1, id))) eventually { assert(clientState == serverState.sample()) } val Activity.Ok(NameTree.Leaf(bound1)) = clientState assert(bound1.id == id) @volatile var clientAddr1: Addr = Addr.Pending bound1.addr.changes.respond(clientAddr1 = _) serverAddr1() = Addr.Bound(Address("127.1", 5432)) eventually { assert(clientAddr1 == serverAddr1.sample()) } serverAddr1() = Addr.Bound(Address("127.1", 6543)) eventually { assert(clientAddr1 == serverAddr1.sample()) } } test("delegation") { val id = Path.read("/io.l5d.w00t") val namer = new Namer { def lookup(path: Path) = { path match { case Path.Utf8("woop") => Activity.value(NameTree.Leaf(Name.Bound( Var( Addr.Bound( Set(Address("localhost", 9000)), Addr.Metadata(Metadata.authority -> "acme.co") ) ), Path.read("/io.l5d.w00t/woop"), Path.empty ))) case _ => Activity.value(NameTree.Neg) } } } val namers = Seq(id -> namer) def interpreter(ns: String) = new ConfiguredDtabNamer( Activity.value(Dtab.read("/srv => /io.l5d.w00t; /host => /srv; /svc => /host")), namers ) val service = new HttpControlService(NullDtabStore, interpreter, namers.toMap) val client = new StreamingNamerClient(service, ns) val tree = await(client.delegate( Dtab.read("/host/poop => /srv/woop"), Path.read("/svc/poop") )) assert(tree == DelegateTree.Delegate( Path.read("/svc/poop"), Dentry.nop, DelegateTree.Alt( Path.read("/host/poop"), Dentry.read("/svc=>/host"), List( DelegateTree.Delegate( Path.read("/srv/woop"), Dentry.read("/host/poop=>/srv/woop"), DelegateTree.Leaf( Path.read("/io.l5d.w00t/woop"), Dentry.read("/srv=>/io.l5d.w00t"), Path.read("/io.l5d.w00t/woop") ) ), DelegateTree.Delegate( Path.read("/srv/poop"), Dentry.read("/host=>/srv"), DelegateTree.Neg( Path.read("/io.l5d.w00t/poop"), Dentry.read("/srv=>/io.l5d.w00t") ) ) ): _* ) )) } test("use last good bind data") { val id = Path.read("/io.l5d.w00t") val (act, witness) = Activity[NameTree[Name]]() val namer = new Namer { def lookup(path: Path) = act } val namers = Seq(id -> namer) def interpreter(ns: String) = new ConfiguredDtabNamer( Activity.value(Dtab.read("/svc => /io.l5d.w00t")), namers ) val service = new HttpControlService(NullDtabStore, interpreter, namers.toMap) val client = new StreamingNamerClient(service, ns) witness.notify(Return(NameTree.Leaf(Name.Bound( Var(Addr.Bound(Address("localhost", 9000))), Path.read("/io.l5d.w00t/foo"), Path.empty )))) val bindAct = client.bind(Dtab.empty, Path.read("/svc/foo")) var bound: NameTree[Name.Bound] = null // hold activity open so that it doesn't get restarted and lose state val bindObs = bindAct.values.respond(_ => ()) try { val NameTree.Leaf(bound0) = await(bindAct.toFuture) // hold var open so that it doesn't get restarted and lose state val bound0Obs = bound0.addr.changes.respond(_ => ()) try { assert(bound0.id == Path.read("/io.l5d.w00t/foo")) assert(bound0.addr.sample == Addr.Bound(Address("localhost", 9000))) witness.notify(Throw(new Exception("bind failure"))) val NameTree.Leaf(bound1) = await(bindAct.toFuture) assert(bound1.id == Path.read("/io.l5d.w00t/foo")) assert(bound1.addr.sample == Addr.Bound(Address("localhost", 9000))) } finally await(bound0Obs.close()) } finally await(bindObs.close()) } }
linkerd/linkerd
namerd/iface/control-http/src/test/scala/io/buoyant/namerd/iface/HttpNamerEndToEndTest.scala
Scala
apache-2.0
7,095
/* * The MIT License * * Copyright (c) 2016 Fulcrum Genomics * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ package com.fulcrumgenomics.vcf import com.fulcrumgenomics.FgBioDef._ import com.fulcrumgenomics.commons.io.PathUtil import com.fulcrumgenomics.testing.UnitSpec import com.fulcrumgenomics.util.Io import com.fulcrumgenomics.vcf.HapCutType.{HapCut1, HapCut2, HapCutType} import htsjdk.variant.variantcontext.VariantContext import htsjdk.variant.vcf.VCFFileReader import org.scalatest.ParallelTestExecution /** * Tests for HapCutToVcf. */ class HapCutToVcfTest extends UnitSpec with ParallelTestExecution { private val dir = PathUtil.pathTo("src/test/resources/com/fulcrumgenomics/vcf/testdata") private val originalVcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.vcf") private val hapCut1Out = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut") private val hapCut1Vcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut.vcf") private val hapCut1GatkVcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut.gatk.vcf") private val hapCut2Out = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut2") private val hapCut2Vcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut2.vcf") private val hapCut2GatkVcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut2.gatk.vcf") // For testing HapCut2 producing phased blocks overlapping other phased blocks. private val outOfOrderIn = dir.resolve("blocks_out_of_order.vcf") private val outOfOrderOut = dir.resolve("blocks_out_of_order.hapcut2") private val outOfOrderOutVcf = dir.resolve("blocks_out_of_order.hapcut2.vcf") // For testing HapCut2 with missing variants in the input VCF private val missingVariantsIn = dir.resolve("missing_leading_variants.vcf") private val missingVariantsOut = dir.resolve("missing_leading_variants.hapcut2") // For testing HapCut2 with missing genotype info private val missingGenotyeInfoIn = dir.resolve("hapcut2_for_missing_genotype_info.vcf") private val noSwitchErrorsIn = dir.resolve("no_switch_errors.hapcut2") private val skipPruneIn = dir.resolve("skip_prune.hapcut2") private val withSwitchErrors = dir.resolve("with_switch_errors.hapcut2") // For testing HapCutToVcf with IUPAC codes private val withIupacIn = dir.resolve("with_iupac.vcf") private val withIupacOut = dir.resolve("with_iupac.hapcut") private val withIupacOutVcf = dir.resolve("with_iupac.hapcut.vcf") private def countVcfRecords(vcf: PathToVcf): Int = { val vcfReader = new VCFFileReader(vcf.toFile, false) yieldAndThen(vcfReader.iterator().length)(vcfReader.close()) } private def compareVcfs(newVcf: PathToVcf, originalVcf: PathToVcf): Unit = { val newVcfReader = new VCFFileReader(newVcf.toFile, false) val originalVcfReader = new VCFFileReader(originalVcf.toFile, false) for (newVariantCtx <- newVcfReader) { originalVcfReader.exists { originalVariantCtx => originalVariantCtx.getContig == newVariantCtx.getContig && originalVariantCtx.getStart == newVariantCtx.getStart && originalVariantCtx.getEnd == newVariantCtx.getEnd } shouldBe true } } private def isPhased(ctx: VariantContext, gatkPhasingFormat: Boolean): Boolean = { if (gatkPhasingFormat) ctx.isNotFiltered // are marked as passed filter else ctx.getGenotypes.exists(_.isPhased) // are marked as phased } private def getNumPhasedFromVcf(path: PathToVcf, gatkPhasingFormat: Boolean): Int = { val vcfReader = new VCFFileReader(path.toFile, false) val numPhased = vcfReader.iterator().count { ctx => isPhased(ctx, gatkPhasingFormat) } vcfReader.close() numPhased } private def hasPhasingSetFormatTagButUnphased(path: PathToVcf, gatkPhasingFormat: Boolean): Boolean = { val vcfReader = new VCFFileReader(path.toFile, false) val hasPhasingSetTag = vcfReader .iterator() .filterNot { ctx => isPhased(ctx, gatkPhasingFormat) } .exists { ctx => ctx.getGenotypes.exists(_.hasExtendedAttribute(HapCut1VcfHeaderLines.PhaseSetFormatTag)) } vcfReader.close() hasPhasingSetTag } private def checkHapCutReader(path: FilePath, hapCutType: HapCutType): Unit = { val reader = HapCutReader(path) reader.hapCutType shouldBe hapCutType val allCalls = reader.toSeq val calls = allCalls.flatMap(_.call) allCalls.length shouldBe 342 // 342 total variants calls.length shouldBe 237 // 237 phased variants calls.map(_.phaseSet).distinct.length shouldBe 8 // eight phased blocks // Check the second block (two variants). The first variants is 1/0 while the second is 0/1. { val call = calls(3) call.phaseSet shouldBe 41106449 call.hap1Allele shouldBe 1 call.hap2Allele shouldBe 0 val ctx = call.toVariantContext("Sample") ctx.getGenotype(0).isPhased shouldBe true ctx.getGenotype(0).getAlleles.map(_.getBaseString).toList should contain theSameElementsInOrderAs Seq("CT", "C") } { val call = calls(4) call.phaseSet shouldBe 41106449 call.hap1Allele shouldBe 0 call.hap2Allele shouldBe 1 val ctx = call.toVariantContext("Sample") ctx.getGenotype(0).isPhased shouldBe true ctx.getGenotype(0).getAlleles.map(_.getBaseString).toList should contain theSameElementsInOrderAs Seq("T", "G") } } "HapCutReader" should "read in a HAPCUT1 file" in { checkHapCutReader(hapCut1Out, HapCut1) } it should "read in a HAPCUT2 file" in { checkHapCutReader(hapCut2Out, HapCut2) } it should "read in a HAPCUT1 file that has phased genotypes" in { val input = dir.resolve("block_has_phased_genotypes.hapcut") val reader = HapCutReader(input) val allCalls = reader.toSeq val calls = allCalls.flatMap(_.call) allCalls.length shouldBe 8 // 8 total variants calls.length shouldBe 3 // 3 phased variants calls.map(_.phaseSet).distinct.length shouldBe 1 // a single phased block reader.close() } "HapCutToVcf" should "convert a HAPCUT1 file to VCF in both GATK and VCF-spec phasing format" in { Iterator(true, false).foreach { gatkPhasingFormat => val expectedOutput = if (gatkPhasingFormat) hapCut1GatkVcf else hapCut1Vcf val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf") new HapCutToVcf( vcf = originalVcf, input = hapCut1Out, output = out, gatkPhasingFormat = gatkPhasingFormat ).execute() // check that we have the same # of records in the output as the input countVcfRecords(out) shouldBe countVcfRecords(originalVcf) // check that all records in the output are found in the input compareVcfs(out, originalVcf) // get the # of phased variants from the output val numPhasedFromOut = getNumPhasedFromVcf(out, gatkPhasingFormat) // check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut val hapCutReader = HapCutReader(hapCut1Out) val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length numPhasedFromOut shouldBe numPhasedFromHapCut hapCutReader.close() // check that the # of variants phased in the output agrees with the # of phased calls in the expected output numPhasedFromOut shouldBe getNumPhasedFromVcf(expectedOutput, gatkPhasingFormat) // check that if a variant is not phased it does not have a PS tag hasPhasingSetFormatTagButUnphased(out, gatkPhasingFormat) shouldBe false } } it should "convert a HAPCUT2 file to VCF in both GATK and VCF-spec phasing format" in { Iterator(true, false).foreach { gatkPhasingFormat => val expectedOutput = if (gatkPhasingFormat) hapCut2GatkVcf else hapCut2Vcf val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf") new HapCutToVcf( vcf = originalVcf, input = hapCut2Out, output = out, gatkPhasingFormat = gatkPhasingFormat ).execute() // check that we have the same # of records in the output as the input countVcfRecords(out) shouldBe countVcfRecords(originalVcf) // check that all records in the output are found in the input compareVcfs(out, originalVcf) // get the # of phased variants from the output val numPhasedFromOut = getNumPhasedFromVcf(out, gatkPhasingFormat) // check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut val hapCutReader = HapCutReader(hapCut2Out) val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length numPhasedFromOut shouldBe numPhasedFromHapCut hapCutReader.close() // check that the # of variants phased in the output agrees with the # of phased calls in the expected output numPhasedFromOut shouldBe getNumPhasedFromVcf(expectedOutput, gatkPhasingFormat) // check that if a variant is not phased it does not have a PS tag hasPhasingSetFormatTagButUnphased(out, gatkPhasingFormat) shouldBe false } } it should "convert an HAPCUT2 file to VCF when there are overlapping phase blocks" in { val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf") new HapCutToVcf( vcf = outOfOrderIn, input = outOfOrderOut, output = out, gatkPhasingFormat = false ).execute() // check that we have the same # of records in the output as the input countVcfRecords(out) shouldBe countVcfRecords(outOfOrderIn) // check that all records in the output are found in the input compareVcfs(out, outOfOrderIn) // get the # of phased variants from the output val numPhasedFromOut = getNumPhasedFromVcf(out, false) // check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut val hapCutReader = HapCutReader(outOfOrderOut) val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length numPhasedFromOut shouldBe numPhasedFromHapCut hapCutReader.close() // check that the # of variants phased in the output agrees with the # of phased calls in the expected output numPhasedFromOut shouldBe getNumPhasedFromVcf(outOfOrderOutVcf, false) // check that if a variant is not phased it does not have a PS tag hasPhasingSetFormatTagButUnphased(out, false) shouldBe false } it should "convert an empty HAPCUT1/HAPCUT2 file to VCF in both GATK and VCF-spec phasing format" in { Iterator(true, false).foreach { gatkPhasingFormat => val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf") val hapCutOut = makeTempFile("hap_cut_to_vcf.hapcut", ".hapcut") Io.writeLines(hapCutOut, Seq.empty) new HapCutToVcf( vcf = originalVcf, input = hapCutOut, output = out, gatkPhasingFormat = gatkPhasingFormat ).execute() // check that we have the same # of records in the output as the input countVcfRecords(out) shouldBe countVcfRecords(originalVcf) // check that all records in the output are found in the input compareVcfs(out, originalVcf) // get the # of phased variants from the output getNumPhasedFromVcf(out, gatkPhasingFormat) shouldBe 0 // check that if a variant is not phased it does not have a PS tag hasPhasingSetFormatTagButUnphased(out, gatkPhasingFormat) shouldBe false } } it should "fail when there are missing variants in the input VCF" in { val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf") an[Exception] should be thrownBy new HapCutToVcf( vcf = missingVariantsIn, input = missingVariantsOut, output = out, gatkPhasingFormat = false ).execute() } it should "support missing genotype info in HapCut2" in { Seq(noSwitchErrorsIn, skipPruneIn, withSwitchErrors).foreach { hapCut2In => val expectedOutput = PathUtil.replaceExtension(hapCut2In, ".vcf") val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf") new HapCutToVcf( vcf = missingGenotyeInfoIn, input = hapCut2In, output = out, gatkPhasingFormat = false ).execute() // check that we have the same # of records in the output as the input countVcfRecords(out) shouldBe countVcfRecords(missingGenotyeInfoIn) // check that all records in the output are found in the input compareVcfs(out, missingGenotyeInfoIn) // get the # of phased variants from the output val numPhasedFromOut = getNumPhasedFromVcf(out, false) // check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut val hapCutReader = HapCutReader(hapCut2In) val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length numPhasedFromOut shouldBe numPhasedFromHapCut hapCutReader.close() // check that the # of variants phased in the output agrees with the # of phased calls in the expected output //Files.copy(out, expectedOutput, StandardCopyOption.REPLACE_EXISTING) numPhasedFromOut shouldBe getNumPhasedFromVcf(expectedOutput, false) // check that if a variant is not phased it does not have a PS tag hasPhasingSetFormatTagButUnphased(out, false) shouldBe false } } it should "fail when IUPAC codes are found in the VCF" in { val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf") an[Exception] should be thrownBy new HapCutToVcf( vcf = withIupacIn, input = withIupacOut, output = out, gatkPhasingFormat = false, fixAmbiguousReferenceAlleles = false ).execute() } it should "succeed when IUPAC codes are found in the VCF and --fix-ambiguous-reference-alleles is specified" in { val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf") new HapCutToVcf( vcf = withIupacIn, input = withIupacOut, output = out, gatkPhasingFormat = false, fixAmbiguousReferenceAlleles = true ).execute() // check the reference alleles are not IUPAC val referenceBases = Io.readLines(out).filterNot(_.startsWith("#")).flatMap(_.split('\\t')(3)).mkString("") referenceBases shouldBe "CCTGCCTG" // last G changed from K } "HapCutToVcf.HapCut2GenotypeInfo" should "ignore the values of pruned, SE, and NE when they are '.'" in { // HapCut2 run with default options: switch error scores are blank HapCut2GenotypeInfo(info="0\\t.\\t16.00", missingAlleles=false, thresholdPruning=false).asInstanceOf[HapCut2GenotypeInfo] shouldBe HapCut2GenotypeInfo(pruned=Some(false), None, Some(16.00)) // HapCut2 run with error analysis: switch error scores are present HapCut2GenotypeInfo(info="0\\t1.00\\t16.00", missingAlleles=false, thresholdPruning=false).asInstanceOf[HapCut2GenotypeInfo] shouldBe HapCut2GenotypeInfo(pruned=Some(false), Some(1.00), Some(16.00)) // HapCut2 with skip prune: all info are blank HapCut2GenotypeInfo(info=".\\t.\\t.", missingAlleles=false, thresholdPruning=false).asInstanceOf[HapCut2GenotypeInfo] shouldBe HapCut2GenotypeInfo(pruned=None, None, None) } }
fulcrumgenomics/fgbio
src/test/scala/com/fulcrumgenomics/vcf/HapCutToVcfTest.scala
Scala
mit
16,626
package com.adamsresearch.mbs.fanniemae.monthlyfiles import java.text.SimpleDateFormat import java.util.Date /** * Created by wma on 2/2/15. * * TODO: parse these COBOL PICTURE elements... */ class FixedRateQuartile case class FixedQuartilesHeader(quartileRecordType: String, poolNumber: String, prefix: String, reportingPeriod: Date, // yyyyMMdd cusipNumber: String, issueDate: Date) // yyyyMMdd extends FixedRateQuartile case class FixedQuartilesDetails(quartileRecordType: String) extends FixedRateQuartile object FixedRateQuartile { val recordLength = 199 // parses a record, returning either a Header or Detail-type record // depending on the quartileRecordType field, or None if we have supplied // something that doesn't work. def parseFixedRateQuartile(record: String): Option[FixedRateQuartile] = { try { record.length match { case `recordLength` => // look at first char, which tells us if this is a header or details record: record.substring(0, 1) match { case "1" => Some(new FixedQuartilesHeader(record.substring(0, 1), record.substring(1, 7).trim, record.substring(7, 10).trim, new SimpleDateFormat("yyyyMMdd").parse(record.substring(10, 16)), record.substring(16, 25).trim, new SimpleDateFormat("yyyyMMdd").parse(record.substring(25, 34)) )) case "2" => Some(new FixedQuartilesDetails(record.substring(0, 1))) case _ => None } case _ => None } } catch { case ex: Exception => None } } }
waynemadams/mbs-parser
src/main/scala/com/adamsresearch/mbs/fanniemae/monthlyfiles/FixedRateQuartile.scala
Scala
apache-2.0
1,973
package org.adridadou.ethereum.propeller.values import org.adridadou.ethereum.propeller.keystore.AccountProvider import org.ethereum.crypto.ECKey import org.scalacheck.Arbitrary._ import org.scalacheck.Prop._ import org.scalatest.check.Checkers import org.scalatest.{Matchers, _} import scala.util.{Failure, Success, Try} /** * Created by davidroon on 26.03.17. * This code is released under Apache 2 license */ class EthAccountTest extends FlatSpec with Matchers with Checkers { "An ethereum account" should "generate the same key than the one in ethereumJ if given a specific seed" in { check(forAll(arbitrary[BigInt])(checkSameAddressGenerated)) } it should "be able to generate and then recover from a random account" in { val account = AccountProvider.random() val data = account.getDataPrivateKey val account2 = AccountProvider.fromPrivateKey(data) account.getAddress shouldEqual account2.getAddress } private def checkSameAddressGenerated(seed: BigInt) = { if(seed === 0 ){ Try(ECKey.fromPrivate(seed.bigInteger)) match { case Success(_) => throw new RuntimeException("it should not be possible to create a private key from int 0") case Failure(ex) => ex.getMessage shouldEqual "Public key must not be a point at infinity, probably your private key is incorrect" true } } else { val ethjVersion = ECKey.fromPrivate(seed.bigInteger) val propellerVersion = new EthAccount(seed.bigInteger) val ethjAddress = EthAddress.of(ethjVersion.getAddress) ethjVersion.getPubKeyPoint shouldEqual propellerVersion.getPublicKey ethjAddress shouldEqual propellerVersion.getAddress true } } }
adridadou/eth-propeller-core
src/test/scala/org/adridadou/ethereum/propeller/values/EthAccountTest.scala
Scala
apache-2.0
1,735
package is.hail.methods import is.hail.HailContext import is.hail.annotations._ import is.hail.expr.ir._ import is.hail.expr.ir.functions.MatrixToTableFunction import is.hail.types.physical.{PCanonicalString, PCanonicalStruct, PFloat64, PInt64, PString, PStruct} import is.hail.types.virtual.{TFloat64, TStruct} import is.hail.types.{MatrixType, TableType} import is.hail.rvd.RVDContext import is.hail.sparkextras.ContextRDD import is.hail.utils._ import is.hail.variant.{Call, Genotype, HardCallView} import org.apache.spark.rdd.RDD import org.apache.spark.sql.Row import scala.language.higherKinds object IBDInfo { def apply(Z0: Double, Z1: Double, Z2: Double): IBDInfo = { IBDInfo(Z0, Z1, Z2, Z1 / 2 + Z2) } val pType = PCanonicalStruct(("Z0", PFloat64()), ("Z1", PFloat64()), ("Z2", PFloat64()), ("PI_HAT", PFloat64())) def fromRegionValue(offset: Long): IBDInfo = { val Z0 = Region.loadDouble(pType.loadField(offset, 0)) val Z1 = Region.loadDouble(pType.loadField(offset, 1)) val Z2 = Region.loadDouble(pType.loadField(offset, 2)) val PI_HAT = Region.loadDouble(pType.loadField(offset, 3)) IBDInfo(Z0, Z1, Z2, PI_HAT) } } case class IBDInfo(Z0: Double, Z1: Double, Z2: Double, PI_HAT: Double) { def pointwiseMinus(that: IBDInfo): IBDInfo = IBDInfo(Z0 - that.Z0, Z1 - that.Z1, Z2 - that.Z2, PI_HAT - that.PI_HAT) def hasNaNs: Boolean = Array(Z0, Z1, Z2, PI_HAT).exists(_.isNaN) def toAnnotation: Annotation = Annotation(Z0, Z1, Z2, PI_HAT) def toRegionValue(rvb: RegionValueBuilder) { rvb.addDouble(Z0) rvb.addDouble(Z1) rvb.addDouble(Z2) rvb.addDouble(PI_HAT) } } object ExtendedIBDInfo { val pType = PCanonicalStruct(("ibd", IBDInfo.pType), ("ibs0", PInt64()), ("ibs1", PInt64()), ("ibs2", PInt64())) def fromRegionValue(offset: Long): ExtendedIBDInfo = { val ibd = IBDInfo.fromRegionValue(pType.loadField(offset, 0)) val ibs0 = Region.loadLong(pType.loadField(offset, 1)) val ibs1 = Region.loadLong(pType.loadField(offset, 2)) val ibs2 = Region.loadLong(pType.loadField(offset, 3)) ExtendedIBDInfo(ibd, ibs0, ibs1, ibs2) } } case class ExtendedIBDInfo(ibd: IBDInfo, ibs0: Long, ibs1: Long, ibs2: Long) { def pointwiseMinus(that: ExtendedIBDInfo): ExtendedIBDInfo = ExtendedIBDInfo(ibd.pointwiseMinus(that.ibd), ibs0 - that.ibs0, ibs1 - that.ibs1, ibs2 - that.ibs2) def hasNaNs: Boolean = ibd.hasNaNs def makeRow(i: Any, j: Any): Row = Row(i, j, ibd.toAnnotation, ibs0, ibs1, ibs2) def toRegionValue(rvb: RegionValueBuilder) { rvb.startStruct() ibd.toRegionValue(rvb) rvb.endStruct() rvb.addLong(ibs0) rvb.addLong(ibs1) rvb.addLong(ibs2) } } case class IBSExpectations( E00: Double, E10: Double, E20: Double, E11: Double, E21: Double, E22: Double = 1, nonNaNCount: Int = 1) { def hasNaNs: Boolean = Array(E00, E10, E20, E11, E21).exists(_.isNaN) def normalized: IBSExpectations = IBSExpectations(E00 / nonNaNCount, E10 / nonNaNCount, E20 / nonNaNCount, E11 / nonNaNCount, E21 / nonNaNCount, E22, this.nonNaNCount) def scaled(N: Long): IBSExpectations = IBSExpectations(E00 * N, E10 * N, E20 * N, E11 * N, E21 * N, E22 * N, this.nonNaNCount) def join(that: IBSExpectations): IBSExpectations = if (this.hasNaNs) that else if (that.hasNaNs) this else IBSExpectations(E00 + that.E00, E10 + that.E10, E20 + that.E20, E11 + that.E11, E21 + that.E21, nonNaNCount = nonNaNCount + that.nonNaNCount) } object IBSExpectations { def empty: IBSExpectations = IBSExpectations(0, 0, 0, 0, 0, nonNaNCount = 0) } object IBD { def indicator(b: Boolean): Int = if (b) 1 else 0 def countRefs(gtIdx: Int): Int = { val gt = Genotype.allelePair(gtIdx) indicator(gt.j == 0) + indicator(gt.k == 0) } def ibsForGenotypes(gs: HardCallView, maybeMaf: Option[Double]): IBSExpectations = { def calculateCountsFromMAF(maf: Double) = { var count = 0 var i = 0 while (i < gs.getLength) { gs.setGenotype(i) if (gs.hasGT) count += 1 i += 1 } val Na = count * 2.0 val p = 1 - maf val q = maf val x = Na * p val y = Na * q (Na, x, y, p, q) } def estimateFrequenciesFromSample = { var na = 0 var x = 0.0 var i = 0 while (i < gs.getLength) { gs.setGenotype(i) if (gs.hasGT) { na += 2 x += countRefs(Call.unphasedDiploidGtIndex(gs.getGT)) } i += 1 } val Na = na.toDouble val y = Na - x val p = x / Na val q = y / Na (Na, x, y, p, q) } val (na, x, y, p, q) = maybeMaf.map(calculateCountsFromMAF).getOrElse(estimateFrequenciesFromSample) val Na = na val a00 = 2 * p * p * q * q * ((x - 1) / x * (y - 1) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) val a10 = 4 * p * p * p * q * ((x - 1) / x * (x - 2) / x * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) + 4 * p * q * q * q * ((y - 1) / y * (y - 2) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) val a20 = q * q * q * q * ((y - 1) / y * (y - 2) / y * (y - 3) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) + p * p * p * p * ((x - 1) / x * (x - 2) / x * (x - 3) / x * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) + 4 * p * p * q * q * ((x - 1) / x * (y - 1) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) val a11 = 2 * p * p * q * ((x - 1) / x * Na / (Na - 1) * Na / (Na - 2)) + 2 * p * q * q * ((y - 1) / y * Na / (Na - 1) * Na / (Na - 2)) val a21 = p * p * p * ((x - 1) / x * (x - 2) / x * Na / (Na - 1) * Na / (Na - 2)) + q * q * q * ((y - 1) / y * (y - 2) / y * Na / (Na - 1) * Na / (Na - 2)) + p * p * q * ((x - 1) / x * Na / (Na - 1) * Na / (Na - 2)) + p * q * q * ((y - 1) / y * Na / (Na - 1) * Na / (Na - 2)) IBSExpectations(a00, a10, a20, a11, a21) } def calculateIBDInfo(N0: Long, N1: Long, N2: Long, ibse: IBSExpectations, bounded: Boolean): ExtendedIBDInfo = { val ibseN = ibse.scaled(N0 + N1 + N2) val Z0 = N0 / ibseN.E00 val Z1 = (N1 - Z0 * ibseN.E10) / ibseN.E11 val Z2 = (N2 - Z0 * ibseN.E20 - Z1 * ibseN.E21) / ibseN.E22 val ibd = if (bounded) { if (Z0 > 1) { IBDInfo(1, 0, 0) } else if (Z1 > 1) { IBDInfo(0, 1, 0) } else if (Z2 > 1) { IBDInfo(0, 0, 1) } else if (Z0 < 0) { val S = Z1 + Z2 IBDInfo(0, Z1 / S, Z2 / S) } else if (Z1 < 0) { val S = Z0 + Z2 IBDInfo(Z0 / S, 0, Z2 / S) } else if (Z2 < 0) { val S = Z0 + Z1 IBDInfo(Z0 / S, Z1 / S, 0) } else { IBDInfo(Z0, Z1, Z2) } } else { IBDInfo(Z0, Z1, Z2) } ExtendedIBDInfo(ibd, N0, N1, N2) } final val chunkSize = 1024 def computeIBDMatrix(input: MatrixValue, computeMaf: Option[(RegionValue) => Double], min: Option[Double], max: Option[Double], sampleIds: IndexedSeq[String], bounded: Boolean): ContextRDD[Long] = { val nSamples = input.nCols val rowPType = input.rvRowPType val unnormalizedIbse = input.rvd.mapPartitions { (ctx, it) => val rv = RegionValue(ctx.r) val view = HardCallView(rowPType) it.map { ptr => rv.setOffset(ptr) view.set(ptr) ibsForGenotypes(view, computeMaf.map(f => f(rv))) } }.fold(IBSExpectations.empty)(_ join _) val ibse = unnormalizedIbse.normalized val chunkedGenotypeMatrix = input.rvd.mapPartitions { (_, it) => val view = HardCallView(rowPType) it.map { ptr => view.set(ptr) Array.tabulate[Byte](view.getLength) { i => view.setGenotype(i) if (view.hasGT) IBSFFI.gtToCRep(Call.unphasedDiploidGtIndex(view.getGT)) else IBSFFI.missingGTCRep } } } .zipWithIndex() .flatMap { case (gts, variantId) => val vid = (variantId % chunkSize).toInt gts.grouped(chunkSize) .zipWithIndex .map { case (gtGroup, i) => ((i, variantId / chunkSize), (vid, gtGroup)) } } .aggregateByKey(Array.fill(chunkSize * chunkSize)(IBSFFI.missingGTCRep))({ case (x, (vid, gs)) => for (i <- gs.indices) x(vid * chunkSize + i) = gs(i) x }, { case (x, y) => for (i <- y.indices) if (x(i) == IBSFFI.missingGTCRep) x(i) = y(i) x }) .map { case ((s, v), gs) => (v, (s, IBSFFI.pack(chunkSize, chunkSize, gs))) } val joined = ContextRDD.weaken(chunkedGenotypeMatrix.join(chunkedGenotypeMatrix) // optimization: Ignore chunks below the diagonal .filter { case (_, ((i, _), (j, _))) => j >= i } .map { case (_, ((s1, gs1), (s2, gs2))) => ((s1, s2), IBSFFI.ibs(chunkSize, chunkSize, gs1, gs2)) } .reduceByKey { (a, b) => var i = 0 while (i != a.length) { a(i) += b(i) i += 1 } a }) joined .cmapPartitions { (ctx, it) => val rvb = new RegionValueBuilder(ctx.region) for { ((iChunk, jChunk), ibses) <- it si <- (0 until chunkSize).iterator sj <- (0 until chunkSize).iterator i = iChunk * chunkSize + si j = jChunk * chunkSize + sj if j > i && j < nSamples && i < nSamples idx = si * chunkSize + sj eibd = calculateIBDInfo(ibses(idx * 3), ibses(idx * 3 + 1), ibses(idx * 3 + 2), ibse, bounded) if min.forall(eibd.ibd.PI_HAT >= _) && max.forall(eibd.ibd.PI_HAT <= _) } yield { rvb.start(ibdPType) rvb.startStruct() rvb.addString(sampleIds(i)) rvb.addString(sampleIds(j)) eibd.toRegionValue(rvb) rvb.endStruct() rvb.end() } } } private val ibdPType = PCanonicalStruct(required = true, Array(("i", PCanonicalString()), ("j", PCanonicalString())) ++ ExtendedIBDInfo.pType.fields.map(f => (f.name, f.typ)): _*) private val ibdKey = FastIndexedSeq("i", "j") private[methods] def generateComputeMaf(input: MatrixValue, fieldName: String): (RegionValue) => Double = { val rvRowType = input.rvRowType val rvRowPType = input.rvRowPType val field = rvRowType.field(fieldName) assert(field.typ == TFloat64) val rowKeysF = input.typ.extractRowKey val entriesIdx = input.entriesIdx val idx = rvRowType.fieldIdx(fieldName) (rv: RegionValue) => { val isDefined = rvRowPType.isFieldDefined(rv.offset, idx) val maf = Region.loadDouble(rvRowPType.loadField(rv.offset, idx)) if (!isDefined) { val row = new UnsafeRow(rvRowPType, rv).deleteField(entriesIdx) fatal(s"The minor allele frequency expression evaluated to NA at ${ rowKeysF(row) }.") } if (maf < 0.0 || maf > 1.0) { val row = new UnsafeRow(rvRowPType, rv).deleteField(entriesIdx) fatal(s"The minor allele frequency expression for ${ rowKeysF(row) } evaluated to $maf which is not in [0,1].") } maf } } } case class IBD( mafFieldName: Option[String] = None, bounded: Boolean = true, min: Option[Double] = None, max: Option[Double] = None) extends MatrixToTableFunction { min.foreach(min => optionCheckInRangeInclusive(0.0, 1.0)("minimum", min)) max.foreach(max => optionCheckInRangeInclusive(0.0, 1.0)("maximum", max)) min.liftedZip(max).foreach { case (min, max) => if (min > max) { fatal(s"minimum must be less than or equal to maximum: ${ min }, ${ max }") } } def preservesPartitionCounts: Boolean = false def typ(childType: MatrixType): TableType = TableType(IBD.ibdPType.virtualType, IBD.ibdKey, TStruct.empty) def execute(ctx: ExecuteContext, input: MatrixValue): TableValue = { input.requireUniqueSamples("ibd") val computeMaf = mafFieldName.map(IBD.generateComputeMaf(input, _)) val crdd = IBD.computeIBDMatrix(input, computeMaf, min, max, input.stringSampleIds, bounded) TableValue(ctx, IBD.ibdPType, IBD.ibdKey, crdd) } }
danking/hail
hail/src/main/scala/is/hail/methods/IBD.scala
Scala
mit
12,135
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.nn import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity} import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.{NumericWildcard, TensorNumeric} import com.intel.analytics.bigdl.dllib.utils.{T, Table} import scala.reflect.ClassTag /** * Stacks a list of n-dimensional tensors into one (n+1)-dimensional tensor. * @param dimension the dimension to stack along * @tparam T Numeric type. Only support float/double now */ @SerialVersionUID(3457313421501931556L) class Pack[T: ClassTag] (val dimension: Int)(implicit ev: TensorNumeric[T]) extends AbstractModule[Activity, Tensor[_], T] { private def getPositiveDimension(input: Table): Int = { var nDim = this.dimension val firstInput: Tensor[_] = input(1) if (nDim < 0) { nDim = firstInput.dim() + nDim + 1 } require(nDim <= firstInput.dim() + 1, "dimension exceeds input dimensions" + s"dimension $nDim, inputDimension ${firstInput.dim()}") nDim } override def updateOutput(input: Activity): Tensor[_] = { val tableInput = input match { case t: Tensor[_] => T(t) case t: Table => t } val dimension = getPositiveDimension(tableInput) val firstInput: Tensor[_] = tableInput(1) val nDim = firstInput.nDimension() val size: Array[Int] = new Array[Int](nDim + 1) var i = 1 while(i <= nDim + 1) { if (i < dimension) { size(i-1) = firstInput.size(i) } else if (i == dimension) { size(i-1) = tableInput.length() } else { size(i-1) = firstInput.size(i - 1) } i = i + 1 } if (output.getType() != firstInput.getType()) { output = firstInput.emptyInstance() } output.resize(size) i = 1 while (i <= tableInput.length()) { val currentOutput = tableInput[Tensor[NumericWildcard]](i) output.narrow(dimension, i, 1).asInstanceOf[Tensor[NumericWildcard]] .copy(currentOutput) i += 1 } output } override def updateGradInput(input: Activity, gradOutput: Tensor[_]): Activity = { val tableInput = input match { case t: Tensor[_] => T(t) case t: Table => t } val dimension = getPositiveDimension(tableInput) val firstInput = tableInput[Tensor[_]](1) if (input.isTensor) { if (gradInput == null || gradInput.asInstanceOf[Tensor[_]].getType() != firstInput.getType()) { gradInput = firstInput.emptyInstance() } val gradInputTensor = gradInput.asInstanceOf[Tensor[NumericWildcard]] gradInputTensor.resizeAs(firstInput) gradInputTensor.copy(firstInput.asInstanceOf[Tensor[NumericWildcard]]) } else { if (gradInput == null) gradInput = T() val gradInputTable = gradInput.toTable var i = 1 while (i <= tableInput.length()) { if (!gradInputTable.contains(i)) { gradInputTable(i) = gradOutput.emptyInstance() } gradInputTable[Tensor[_]](i).resizeAs(tableInput(i)) i += 1 } i = 1 while (i <= tableInput.length()) { val currentGradInput = gradOutput.select(dimension, i).asInstanceOf[Tensor[NumericWildcard]] gradInputTable[Tensor[NumericWildcard]](i).copy(currentGradInput) i += 1 } } gradInput } } object Pack { def apply[T: ClassTag]( dimension: Int)(implicit ev: TensorNumeric[T]): Pack[T] = { new Pack[T](dimension) } }
intel-analytics/BigDL
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/Pack.scala
Scala
apache-2.0
4,126
package play.api.libs.ws import org.specs2.mutable._ import org.specs2.mock.Mockito import com.ning.http.client.{ Response => AHCResponse, Cookie => AHCCookie } import java.util object WSSpec extends Specification with Mockito { "WS" should { "support several query string values for a parameter" in { val req = WS.url("http://playframework.com/") .withQueryString("foo"->"foo1", "foo"->"foo2") .prepare("GET").build req.getQueryParams.get("foo").contains("foo1") must beTrue req.getQueryParams.get("foo").contains("foo2") must beTrue req.getQueryParams.get("foo").size must equalTo (2) } } "WS Response" should { "get cookies from an AHC response" in { val ahcResponse : AHCResponse = mock[AHCResponse] val (domain, name, value, path, maxAge, secure) = ("example.com", "someName", "someValue", "/", 1000, false) val ahcCookie : AHCCookie = new AHCCookie(domain, name, value, path, maxAge, secure) ahcResponse.getCookies returns util.Arrays.asList(ahcCookie) val response = Response(ahcResponse) val cookies : Seq[Cookie] = response.cookies val cookie = cookies(0) cookie.domain must ===("example.com") cookie.name must beSome("someName") cookie.value must beSome("someValue") cookie.path must ===("/") cookie.maxAge must ===(1000) cookie.secure must beFalse } "get a single cookie from an AHC response" in { val ahcResponse : AHCResponse = mock[AHCResponse] val (domain, name, value, path, maxAge, secure) = ("example.com", "someName", "someValue", "/", 1000, false) val ahcCookie : AHCCookie = new AHCCookie(domain, name, value, path, maxAge, secure) ahcResponse.getCookies returns util.Arrays.asList(ahcCookie) val response = Response(ahcResponse) val optionCookie = response.cookie("someName") optionCookie must beSome[Cookie].which { cookie => cookie.domain must ===("example.com") cookie.name must beSome("someName") cookie.value must beSome("someValue") cookie.path must ===("/") cookie.maxAge must ===(1000) cookie.secure must beFalse } } } }
michaelahlers/team-awesome-wedding
vendor/play-2.2.1/framework/src/play/src/test/scala/play/api/libs/ws/WSSpec.scala
Scala
mit
2,223
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, AttributeReference, Expression, Literal, SortOrder, UnsafeRow} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.plans.physical.{ClusteredDistribution, Distribution} import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.streaming.GroupStateImpl.NO_TIMESTAMP import org.apache.spark.sql.execution.streaming.state._ import org.apache.spark.sql.streaming.{GroupStateTimeout, OutputMode} import org.apache.spark.sql.types.IntegerType import org.apache.spark.util.CompletionIterator /** * Physical operator for executing `FlatMapGroupsWithState.` * * @param func function called on each group * @param keyDeserializer used to extract the key object for each group. * @param valueDeserializer used to extract the items in the iterator from an input row. * @param groupingAttributes used to group the data * @param dataAttributes used to read the data * @param outputObjAttr used to define the output object * @param stateEncoder used to serialize/deserialize state before calling `func` * @param outputMode the output mode of `func` * @param timeoutConf used to timeout groups that have not received data in a while * @param batchTimestampMs processing timestamp of the current batch. */ case class FlatMapGroupsWithStateExec( func: (Any, Iterator[Any], LogicalGroupState[Any]) => Iterator[Any], keyDeserializer: Expression, valueDeserializer: Expression, groupingAttributes: Seq[Attribute], dataAttributes: Seq[Attribute], outputObjAttr: Attribute, stateInfo: Option[StatefulOperatorStateInfo], stateEncoder: ExpressionEncoder[Any], outputMode: OutputMode, timeoutConf: GroupStateTimeout, batchTimestampMs: Option[Long], override val eventTimeWatermark: Option[Long], child: SparkPlan ) extends UnaryExecNode with ObjectProducerExec with StateStoreWriter with WatermarkSupport { import GroupStateImpl._ private val isTimeoutEnabled = timeoutConf != NoTimeout private val timestampTimeoutAttribute = AttributeReference("timeoutTimestamp", dataType = IntegerType, nullable = false)() private val stateAttributes: Seq[Attribute] = { val encSchemaAttribs = stateEncoder.schema.toAttributes if (isTimeoutEnabled) encSchemaAttribs :+ timestampTimeoutAttribute else encSchemaAttribs } // Get the serializer for the state, taking into account whether we need to save timestamps private val stateSerializer = { val encoderSerializer = stateEncoder.namedExpressions if (isTimeoutEnabled) { encoderSerializer :+ Literal(GroupStateImpl.NO_TIMESTAMP) } else { encoderSerializer } } // Get the deserializer for the state. Note that this must be done in the driver, as // resolving and binding of deserializer expressions to the encoded type can be safely done // only in the driver. private val stateDeserializer = stateEncoder.resolveAndBind().deserializer private val watermarkPresent = child.output.exists { case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => true case _ => false } /** Distribute by grouping attributes */ override def requiredChildDistribution: Seq[Distribution] = ClusteredDistribution(groupingAttributes, stateInfo.map(_.numPartitions)) :: Nil /** Ordering needed for using GroupingIterator */ override def requiredChildOrdering: Seq[Seq[SortOrder]] = Seq(groupingAttributes.map(SortOrder(_, Ascending))) override def keyExpressions: Seq[Attribute] = groupingAttributes override protected def doExecute(): RDD[InternalRow] = { metrics // force lazy init at driver // Throw errors early if parameters are not as expected timeoutConf match { case ProcessingTimeTimeout => require(batchTimestampMs.nonEmpty) case EventTimeTimeout => require(eventTimeWatermark.nonEmpty) // watermark value has been populated require(watermarkExpression.nonEmpty) // input schema has watermark attribute case _ => } child.execute().mapPartitionsWithStateStore[InternalRow]( getStateInfo, groupingAttributes.toStructType, stateAttributes.toStructType, indexOrdinal = None, sqlContext.sessionState, Some(sqlContext.streams.stateStoreCoordinator)) { case (store, iter) => val updater = new StateStoreUpdater(store) // If timeout is based on event time, then filter late data based on watermark val filteredIter = watermarkPredicateForData match { case Some(predicate) if timeoutConf == EventTimeTimeout => iter.filter(row => !predicate.eval(row)) case _ => iter } // Generate a iterator that returns the rows grouped by the grouping function // Note that this code ensures that the filtering for timeout occurs only after // all the data has been processed. This is to ensure that the timeout information of all // the keys with data is updated before they are processed for timeouts. val outputIterator = updater.updateStateForKeysWithData(filteredIter) ++ updater.updateStateForTimedOutKeys() // Return an iterator of all the rows generated by all the keys, such that when fully // consumed, all the state updates will be committed by the state store CompletionIterator[InternalRow, Iterator[InternalRow]]( outputIterator, { store.commit() setStoreMetrics(store) } ) } } /** Helper class to update the state store */ class StateStoreUpdater(store: StateStore) { // Converters for translating input keys, values, output data between rows and Java objects private val getKeyObj = ObjectOperator.deserializeRowToObject(keyDeserializer, groupingAttributes) private val getValueObj = ObjectOperator.deserializeRowToObject(valueDeserializer, dataAttributes) private val getOutputRow = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType) // Converters for translating state between rows and Java objects private val getStateObjFromRow = ObjectOperator.deserializeRowToObject( stateDeserializer, stateAttributes) private val getStateRowFromObj = ObjectOperator.serializeObjectToRow(stateSerializer) // Index of the additional metadata fields in the state row private val timeoutTimestampIndex = stateAttributes.indexOf(timestampTimeoutAttribute) // Metrics private val numUpdatedStateRows = longMetric("numUpdatedStateRows") private val numOutputRows = longMetric("numOutputRows") /** * For every group, get the key, values and corresponding state and call the function, * and return an iterator of rows */ def updateStateForKeysWithData(dataIter: Iterator[InternalRow]): Iterator[InternalRow] = { val groupedIter = GroupedIterator(dataIter, groupingAttributes, child.output) groupedIter.flatMap { case (keyRow, valueRowIter) => val keyUnsafeRow = keyRow.asInstanceOf[UnsafeRow] callFunctionAndUpdateState( keyUnsafeRow, valueRowIter, store.get(keyUnsafeRow), hasTimedOut = false) } } /** Find the groups that have timeout set and are timing out right now, and call the function */ def updateStateForTimedOutKeys(): Iterator[InternalRow] = { if (isTimeoutEnabled) { val timeoutThreshold = timeoutConf match { case ProcessingTimeTimeout => batchTimestampMs.get case EventTimeTimeout => eventTimeWatermark.get case _ => throw new IllegalStateException( s"Cannot filter timed out keys for $timeoutConf") } val timingOutKeys = store.getRange(None, None).filter { rowPair => val timeoutTimestamp = getTimeoutTimestamp(rowPair.value) timeoutTimestamp != NO_TIMESTAMP && timeoutTimestamp < timeoutThreshold } timingOutKeys.flatMap { rowPair => callFunctionAndUpdateState(rowPair.key, Iterator.empty, rowPair.value, hasTimedOut = true) } } else Iterator.empty } /** * Call the user function on a key's data, update the state store, and return the return data * iterator. Note that the store updating is lazy, that is, the store will be updated only * after the returned iterator is fully consumed. * * @param keyRow Row representing the key, cannot be null * @param valueRowIter Iterator of values as rows, cannot be null, but can be empty * @param prevStateRow Row representing the previous state, can be null * @param hasTimedOut Whether this function is being called for a key timeout */ private def callFunctionAndUpdateState( keyRow: UnsafeRow, valueRowIter: Iterator[InternalRow], prevStateRow: UnsafeRow, hasTimedOut: Boolean): Iterator[InternalRow] = { val keyObj = getKeyObj(keyRow) // convert key to objects val valueObjIter = valueRowIter.map(getValueObj.apply) // convert value rows to objects val stateObj = getStateObj(prevStateRow) val keyedState = GroupStateImpl.createForStreaming( Option(stateObj), batchTimestampMs.getOrElse(NO_TIMESTAMP), eventTimeWatermark.getOrElse(NO_TIMESTAMP), timeoutConf, hasTimedOut, watermarkPresent) // Call function, get the returned objects and convert them to rows val mappedIterator = func(keyObj, valueObjIter, keyedState).map { obj => numOutputRows += 1 getOutputRow(obj) } // When the iterator is consumed, then write changes to state def onIteratorCompletion: Unit = { val currentTimeoutTimestamp = keyedState.getTimeoutTimestamp // If the state has not yet been set but timeout has been set, then // we have to generate a row to save the timeout. However, attempting serialize // null using case class encoder throws - // java.lang.NullPointerException: Null value appeared in non-nullable field: // If the schema is inferred from a Scala tuple / case class, or a Java bean, please // try to use scala.Option[_] or other nullable types. if (!keyedState.exists && currentTimeoutTimestamp != NO_TIMESTAMP) { throw new IllegalStateException( "Cannot set timeout when state is not defined, that is, state has not been" + "initialized or has been removed") } if (keyedState.hasRemoved) { store.remove(keyRow) numUpdatedStateRows += 1 } else { val previousTimeoutTimestamp = getTimeoutTimestamp(prevStateRow) val stateRowToWrite = if (keyedState.hasUpdated) { getStateRow(keyedState.get) } else { prevStateRow } val hasTimeoutChanged = currentTimeoutTimestamp != previousTimeoutTimestamp val shouldWriteState = keyedState.hasUpdated || hasTimeoutChanged if (shouldWriteState) { if (stateRowToWrite == null) { // This should never happen because checks in GroupStateImpl should avoid cases // where empty state would need to be written throw new IllegalStateException("Attempting to write empty state") } setTimeoutTimestamp(stateRowToWrite, currentTimeoutTimestamp) store.put(keyRow, stateRowToWrite) numUpdatedStateRows += 1 } } } // Return an iterator of rows such that fully consumed, the updated state value will be saved CompletionIterator[InternalRow, Iterator[InternalRow]](mappedIterator, onIteratorCompletion) } /** Returns the state as Java object if defined */ def getStateObj(stateRow: UnsafeRow): Any = { if (stateRow != null) getStateObjFromRow(stateRow) else null } /** Returns the row for an updated state */ def getStateRow(obj: Any): UnsafeRow = { assert(obj != null) getStateRowFromObj(obj) } /** Returns the timeout timestamp of a state row is set */ def getTimeoutTimestamp(stateRow: UnsafeRow): Long = { if (isTimeoutEnabled && stateRow != null) { stateRow.getLong(timeoutTimestampIndex) } else NO_TIMESTAMP } /** Set the timestamp in a state row */ def setTimeoutTimestamp(stateRow: UnsafeRow, timeoutTimestamps: Long): Unit = { if (isTimeoutEnabled) stateRow.setLong(timeoutTimestampIndex, timeoutTimestamps) } } }
brad-kaiser/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FlatMapGroupsWithStateExec.scala
Scala
apache-2.0
13,642
package philosophy.finallytagless import language.higherKinds import scala.scalajs.js.JSApp import org.scalajs.dom._ import cats.{~>, Id, Monad} import cats.syntax.flatMap._ import cats.syntax.functor._ import philosophy.{Graph, wikiapi, RFuture} import philosophy.RFuture._ import philosophy.IO._ import philosophy.crawlStates._ import philosophy.finallytagless.Interpreter.~ trait Wiki[F[_]] { def randomPage : F[String] def nextLinks(current: String ) : F[List[String]] } object Wiki { def randomPage : Term[Wiki,String] = Term[Wiki] { _.randomPage } def nextLinks(current: String ) : Term[Wiki,List[String]] = Term[Wiki] { _.nextLinks(current) } } object JsonpWiki extends Wiki[RFuture] { def randomPage: RFuture[String] = (ec) => wikiapi.randomPage(ec) def nextLinks(current: String): RFuture[List[String]] = (ec) => wikiapi.nextLinks(current)(ec) } object TestWiki extends Wiki[IO] { def randomPage: IO[String] = Io{ wikiapi.testrandom } def nextLinks(current: String): IO[List[String]] = Io{ wikiapi.testnextlink(current) } } trait Output[F[_]] { def firstPage( name: String ) : F[Unit] def pageStep( from:String, to:String, idx : Int ) : F[Unit] def statusMsg( msg:String ) : F[Unit] } object ConsoleOutput extends Output[IO] { def firstPage(name: String): IO[Unit] = Io{ println( s"1: $name") } def pageStep(from: String, to: String, idx: Int): IO[Unit] = Io{ println(s"$idx: $to") } def statusMsg(msg: String): IO[Unit] = Io{ println(msg) } } class HTMLOutput( outputElement: Element ) extends Output[IO] { def firstPage( name:String ) : IO[Unit] = Io { outputElement.innerHTML = s"<p>1: <a href='http://en.wikipedia.org/wiki/$name'>$name</a></p>\\n" } def pageStep( from:String, to:String, idx: Int ) : IO[Unit] = Io { outputElement.innerHTML += s"<p>$idx: <a href='http://en.wikipedia.org/wiki/$to'>$to</a></p>\\n" } def statusMsg(msg:String ): IO[Unit] = Io { outputElement.innerHTML += s"<p>$msg</p>\\n" } } object GraphOutput extends Output[IO] { def firstPage(name: String): IO[Unit] = Io { Graph.addNode( name ) } def pageStep(from: String, to: String, idx: Int): IO[Unit] = Io { Graph.addNode( to ) Graph.addLink( from, to ) } def statusMsg(msg: String): IO[Unit] = Io { println( msg ) } } trait Input[F[_]] { def getPage : F[String] } object RandomPageInput extends ( Input ~~> Wiki ) { def embed[M[_] : Monad]( wiki: Interpreter[Wiki, M]): Input[M] = new Input[M] { def getPage: M[String] = wiki( _.randomPage ) } } class ConstInput( input:String ) extends Input[Id] { def getPage : String = input } trait UI[F[_]] { def getStartPage : F[Continue] def stepPage( state: Continue ) : F[Unit] def finished( state: CrawlStateFinished ) : F[Unit] } object UI { def getStartPage : Term[UI,Continue] = Term[UI]{ _.getStartPage } def showStep(state: Continue ) : Term[UI,Unit] = Term[UI]{ _.stepPage(state) } def finished(state: CrawlStateFinished ) : Term[UI,Unit] = Term[UI]{ _.finished(state) } } object UIToInputOutput extends ( UI ~~> (Input~Output)#Pair ) { def embed[M[_] : Monad]( inputOutput: Interpreter[(Input~Output)#Pair, M]): UI[M] = new UI[M] { val (input,output) = Interpreter.pairOf( inputOutput ) def getStartPage: M[Continue] = for { page <- input {_.getPage} _ <- output {_.firstPage(page)} } yield Continue(page, Nil) def stepPage(state: Continue): M[Unit] = output {_.pageStep(state.visitedPages.head, state.currentPage, state.visitedPages.length + 1)} def finished(state: CrawlStateFinished): M[Unit] = output{_.statusMsg(state match { case Loop(currentPage, to) => s"Page '$currentPage' loops back to $to" case Error(currentPage, e) => s"Error '$e' at Page '$currentPage'" case NoLinks(currentPage) => s"No suitable links from Page '$currentPage'" case AtPhilosophy(steps) => s"Got to Philosophy in $steps steps!" })} } } object program { type WikiAndUI[M[_]] = (Wiki~UI)#Pair[M] type PRG[X] = Term[WikiAndUI,X] def pure[X]( x:X ) : PRG[X] = Term.pure[WikiAndUI,X]( x ) def stepNext( state : Continue ) : PRG[CrawlState] = for { links <- Wiki.nextLinks( state.currentPage ).embed[WikiAndUI] nextState <- links.headOption.fold( pure( NoLinks(state.currentPage) : CrawlState ) ) { nextPage => val cont = Continue(nextPage, state.currentPage :: state.visitedPages) UI.showStep( cont ).embed[WikiAndUI].map{ ignore => if (state.visitedPages.contains(nextPage)) Loop(state.currentPage, nextPage) else if (nextPage.toLowerCase == "philosophy") AtPhilosophy(state.visitedPages.length + 2) else cont } } } yield nextState def recurseStep( state: Continue ) : PRG[CrawlStateFinished] = stepNext( state ) .flatMap { case c : Continue => recurseStep( c ) case f : CrawlStateFinished => pure( f ) } def run : PRG[CrawlStateFinished] = UI.getStartPage.embed[WikiAndUI] .flatMap( recurseStep ) .flatMap{ state => UI.finished(state).map( x => state ).embed[WikiAndUI] } }
vtoro/getting-to-philosophy
src/main/scala/philosophy/finallytagless/finallytagless.scala
Scala
mit
5,194
package com.avsystem.scex package compiler import com.avsystem.scex.parsing.PositionMapping /** * Created: 14-11-2013 * Author: ghik */ case class ExpressionDef( profile: ExpressionProfile, template: Boolean, setter: Boolean, expression: String, header: String, contextType: String, resultType: String, variableTypes: Map[String, String])( val originalExpression: String, val positionMapping: PositionMapping, val rootObjectClass: Class[_]) { }
AVSystem/scex
scex-core/src/main/scala/com/avsystem/scex/compiler/ExpressionDef.scala
Scala
mit
476
package justin.db.replica.read import java.util.UUID import justin.db.Data import justin.db.actors.protocol.{StorageNodeFailedRead, StorageNodeFoundRead, StorageNodeNotFoundRead} import justin.db.consistenthashing.NodeId import justin.db.storage.GetStorageProtocol import justin.db.storage.PluggableStorageProtocol.{DataOriginality, StorageGetData} import justin.db.vectorclocks.VectorClock import org.scalatest.concurrent.ScalaFutures import org.scalatest.{FlatSpec, Matchers} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import scala.concurrent.Future class ReplicaLocalReaderTest extends FlatSpec with Matchers with ScalaFutures { behavior of "Replica Local Reader" override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.seconds, 50.millis) it should "found data for existing key" in { // given val id = UUID.randomUUID() val data = Data(id, "value", VectorClock[NodeId]().increase(NodeId(1))) val service = new ReplicaLocalReader(new GetStorageProtocol { override def get(id: UUID)(resolveOriginality: (UUID) => DataOriginality): Future[StorageGetData] = { Future.successful(StorageGetData.Single(data)) } }) // when val result = service.apply(id, null) // then whenReady(result) { _ shouldBe StorageNodeFoundRead(data) } } it should "not found data for non-existing key" in { // given val id = UUID.randomUUID() val service = new ReplicaLocalReader(new GetStorageProtocol { override def get(id: UUID)(resolveOriginality: (UUID) => DataOriginality): Future[StorageGetData] = { Future.successful(StorageGetData.None) } }) // when val result = service.apply(id, null) // then whenReady(result) { _ shouldBe StorageNodeNotFoundRead(id) } } it should "recover failure reading" in { // given val id = UUID.randomUUID() val service = new ReplicaLocalReader(new GetStorageProtocol { override def get(id: UUID)(resolveOriginality: (UUID) => DataOriginality): Future[StorageGetData] = Future.failed(new Exception) }) // when val result = service.apply(id, null) // then whenReady(result) { _ shouldBe StorageNodeFailedRead(id) } } }
speedcom/JustinDB
justin-core/src/test/scala/justin/db/replica/read/ReplicaLocalReaderTest.scala
Scala
apache-2.0
2,273
/* * Copyright 2014-2020 Rik van der Kleij * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package intellij.haskell.psi.stubs.index import com.intellij.psi.stubs.{StringStubIndexExtension, StubIndexKey} import intellij.haskell.psi.HaskellNamedElement object HaskellAllNameIndex { val Key: StubIndexKey[String, HaskellNamedElement] = StubIndexKey.createIndexKey("haskell.all.name") val Version = 1 } class HaskellAllNameIndex extends StringStubIndexExtension[HaskellNamedElement] { override def getVersion: Int = { super.getVersion + HaskellAllNameIndex.Version } def getKey: StubIndexKey[String, HaskellNamedElement] = { HaskellAllNameIndex.Key } }
rikvdkleij/intellij-haskell
src/main/scala/intellij/haskell/psi/stubs/index/HaskellAllNameIndex.scala
Scala
apache-2.0
1,186
package com.xah.chat.ui.activities import android.app.Activity import android.os.Bundle import android.support.v7.app.ActionBarActivity import android.view.{View, Window} import com.xah.chat.comms.{XService, XServiceConnection} import android.content.{Context, Intent} import com.xah.chat.framework.TraitActivityContext import scala.language.implicitConversions import com.xah.chat.utils.DeviceUtils import com.xah.chat.datamodel.xah class BaseActivity extends Activity with TraitActivityContext[Activity] { val mConnection = new XServiceConnection val mDeviceId = DeviceUtils.getDeviceId(this) override def onCreate(savedInstanceState: Bundle): Unit = { super.onCreate(savedInstanceState) } protected def runOnUi(f: () => Unit) = this.runOnUiThread(new Runnable { override def run(): Unit = f() }) override def onStart() = { super.onStart() if (xah.Handle(this) != "") { bindService(new Intent(this, classOf[XService]), mConnection, Context.BIND_AUTO_CREATE) } } override def onDestroy() = { super.onDestroy() mConnection.mBound match { case true => unbindService(mConnection) case _ => () } } }
lemonxah/xaHChat
src/main/scala/com/xah/chat/ui/activities/BaseActivity.scala
Scala
mit
1,172
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.csv import org.apache.spark.SparkFunSuite import org.apache.spark.sql.types._ class CSVInferSchemaSuite extends SparkFunSuite { test("String fields types are inferred correctly from null types") { val options = new CSVOptions(Map.empty[String, String], false, "GMT") assert(CSVInferSchema.inferField(NullType, "", options) == NullType) assert(CSVInferSchema.inferField(NullType, null, options) == NullType) assert(CSVInferSchema.inferField(NullType, "100000000000", options) == LongType) assert(CSVInferSchema.inferField(NullType, "60", options) == IntegerType) assert(CSVInferSchema.inferField(NullType, "3.5", options) == DoubleType) assert(CSVInferSchema.inferField(NullType, "test", options) == StringType) assert(CSVInferSchema.inferField(NullType, "2015-08-20 15:57:00", options) == TimestampType) assert(CSVInferSchema.inferField(NullType, "True", options) == BooleanType) assert(CSVInferSchema.inferField(NullType, "FAlSE", options) == BooleanType) val textValueOne = Long.MaxValue.toString + "0" val decimalValueOne = new java.math.BigDecimal(textValueOne) val expectedTypeOne = DecimalType(decimalValueOne.precision, decimalValueOne.scale) assert(CSVInferSchema.inferField(NullType, textValueOne, options) == expectedTypeOne) } test("String fields types are inferred correctly from other types") { val options = new CSVOptions(Map.empty[String, String], false, "GMT") assert(CSVInferSchema.inferField(LongType, "1.0", options) == DoubleType) assert(CSVInferSchema.inferField(LongType, "test", options) == StringType) assert(CSVInferSchema.inferField(IntegerType, "1.0", options) == DoubleType) assert(CSVInferSchema.inferField(DoubleType, null, options) == DoubleType) assert(CSVInferSchema.inferField(DoubleType, "test", options) == StringType) assert(CSVInferSchema.inferField(LongType, "2015-08-20 14:57:00", options) == TimestampType) assert(CSVInferSchema.inferField(DoubleType, "2015-08-20 15:57:00", options) == TimestampType) assert(CSVInferSchema.inferField(LongType, "True", options) == BooleanType) assert(CSVInferSchema.inferField(IntegerType, "FALSE", options) == BooleanType) assert(CSVInferSchema.inferField(TimestampType, "FALSE", options) == BooleanType) val textValueOne = Long.MaxValue.toString + "0" val decimalValueOne = new java.math.BigDecimal(textValueOne) val expectedTypeOne = DecimalType(decimalValueOne.precision, decimalValueOne.scale) assert(CSVInferSchema.inferField(IntegerType, textValueOne, options) == expectedTypeOne) } test("Timestamp field types are inferred correctly via custom data format") { var options = new CSVOptions(Map("timestampFormat" -> "yyyy-mm"), false, "GMT") assert(CSVInferSchema.inferField(TimestampType, "2015-08", options) == TimestampType) options = new CSVOptions(Map("timestampFormat" -> "yyyy"), false, "GMT") assert(CSVInferSchema.inferField(TimestampType, "2015", options) == TimestampType) } test("Timestamp field types are inferred correctly from other types") { val options = new CSVOptions(Map.empty[String, String], false, "GMT") assert(CSVInferSchema.inferField(IntegerType, "2015-08-20 14", options) == StringType) assert(CSVInferSchema.inferField(DoubleType, "2015-08-20 14:10", options) == StringType) assert(CSVInferSchema.inferField(LongType, "2015-08 14:49:00", options) == StringType) } test("Boolean fields types are inferred correctly from other types") { val options = new CSVOptions(Map.empty[String, String], false, "GMT") assert(CSVInferSchema.inferField(LongType, "Fale", options) == StringType) assert(CSVInferSchema.inferField(DoubleType, "TRUEe", options) == StringType) } test("Type arrays are merged to highest common type") { assert( CSVInferSchema.mergeRowTypes(Array(StringType), Array(DoubleType)).deep == Array(StringType).deep) assert( CSVInferSchema.mergeRowTypes(Array(IntegerType), Array(LongType)).deep == Array(LongType).deep) assert( CSVInferSchema.mergeRowTypes(Array(DoubleType), Array(LongType)).deep == Array(DoubleType).deep) } test("Null fields are handled properly when a nullValue is specified") { var options = new CSVOptions(Map("nullValue" -> "null"), false, "GMT") assert(CSVInferSchema.inferField(NullType, "null", options) == NullType) assert(CSVInferSchema.inferField(StringType, "null", options) == StringType) assert(CSVInferSchema.inferField(LongType, "null", options) == LongType) options = new CSVOptions(Map("nullValue" -> "\\\\N"), false, "GMT") assert(CSVInferSchema.inferField(IntegerType, "\\\\N", options) == IntegerType) assert(CSVInferSchema.inferField(DoubleType, "\\\\N", options) == DoubleType) assert(CSVInferSchema.inferField(TimestampType, "\\\\N", options) == TimestampType) assert(CSVInferSchema.inferField(BooleanType, "\\\\N", options) == BooleanType) assert(CSVInferSchema.inferField(DecimalType(1, 1), "\\\\N", options) == DecimalType(1, 1)) } test("Merging Nulltypes should yield Nulltype.") { val mergedNullTypes = CSVInferSchema.mergeRowTypes(Array(NullType), Array(NullType)) assert(mergedNullTypes.deep == Array(NullType).deep) } test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") { val options = new CSVOptions(Map("TiMeStampFormat" -> "yyyy-mm"), false, "GMT") assert(CSVInferSchema.inferField(TimestampType, "2015-08", options) == TimestampType) } test("SPARK-18877: `inferField` on DecimalType should find a common type with `typeSoFar`") { val options = new CSVOptions(Map.empty[String, String], false, "GMT") // 9.03E+12 is Decimal(3, -10) and 1.19E+11 is Decimal(3, -9). assert(CSVInferSchema.inferField(DecimalType(3, -10), "1.19E+11", options) == DecimalType(4, -9)) // BigDecimal("12345678901234567890.01234567890123456789") is precision 40 and scale 20. val value = "12345678901234567890.01234567890123456789" assert(CSVInferSchema.inferField(DecimalType(3, -10), value, options) == DoubleType) // Seq(s"${Long.MaxValue}1", "2015-12-01 00:00:00") should be StringType assert(CSVInferSchema.inferField(NullType, s"${Long.MaxValue}1", options) == DecimalType(20, 0)) assert(CSVInferSchema.inferField(DecimalType(20, 0), "2015-12-01 00:00:00", options) == StringType) } test("DoubleType should be inferred when user defined nan/inf are provided") { val options = new CSVOptions(Map("nanValue" -> "nan", "negativeInf" -> "-inf", "positiveInf" -> "inf"), false, "GMT") assert(CSVInferSchema.inferField(NullType, "nan", options) == DoubleType) assert(CSVInferSchema.inferField(NullType, "inf", options) == DoubleType) assert(CSVInferSchema.inferField(NullType, "-inf", options) == DoubleType) } }
ahnqirage/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchemaSuite.scala
Scala
apache-2.0
7,719
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.visor.commands.cache import org.apache.ignite.cluster.{ClusterGroupEmptyException, ClusterNode} import org.apache.ignite.visor.commands.common.VisorTextTable import org.apache.ignite.visor.visor._ import org.apache.ignite.internal.visor.query._ import org.apache.ignite.internal.visor.util.VisorTaskUtils._ import scala.collection.JavaConversions._ /** * ==Overview== * Visor 'scan' command implementation. * * ====Specification==== * {{{ * cache {-id=<node-id>|-id8=<node-id8>} {-p=<page size>} -c=<cache name> -scan * }}} * * ====Arguments==== * {{{ * <node-id> * Full node ID. * <node-id8> * Node ID8. * <page size> * Number of object to fetch from cache at once. * <cache-name> * Name of the cache. * }}} * * ====Examples==== * {{{ * cache -c=cache * List entries from cache with name 'cache' from all nodes with this cache. * cache -c=@c0 -scan -p=50 * List entries from cache with name taken from 'c0' memory variable with page of 50 items * from all nodes with this cache. * cache -c=cache -scan -id8=12345678 * List entries from cache with name 'cache' and node '12345678' ID8. * }}} */ class VisorCacheScanCommand { /** * Prints error message and advise. * * @param errMsgs Error messages. */ private def scold(errMsgs: Any*) { assert(errMsgs != null) warn(errMsgs: _*) warn("Type 'help cache' to see how to use this command.") } private def error(e: Throwable) { var cause: Throwable = e while (cause.getCause != null) cause = cause.getCause scold(cause.getMessage) } /** * ===Command=== * List all entries in cache with specified name. * * ===Examples=== * <ex>cache -c=cache -scan</ex> * List entries from cache with name 'cache' from all nodes with this cache. * <br> * <ex>cache -c=@c0 -scan -p=50</ex> * List entries from cache with name taken from 'c0' memory variable with page of 50 items * from all nodes with this cache. * <br> * <ex>cache -c=cache -scan -id8=12345678</ex> * List entries from cache with name 'cache' and node '12345678' ID8. * * @param argLst Command arguments. */ def scan(argLst: ArgList, node: Option[ClusterNode]) { val pageArg = argValue("p", argLst) val cacheArg = argValue("c", argLst) var pageSize = 25 if (pageArg.isDefined) { val page = pageArg.get try pageSize = page.toInt catch { case nfe: NumberFormatException => scold("Invalid value for 'page size': " + page) return } if (pageSize < 1 || pageSize > 100) { scold("'Page size' should be in range [1..100] but found: " + page) return } } val cacheName = cacheArg match { case None => null // default cache. case Some(s) if s.startsWith("@") => warn("Can't find cache variable with specified name: " + s, "Type 'cache' to see available cache variables." ) return case Some(name) => name } val firstPage = try executeRandom(groupForDataNode(node, cacheName), classOf[VisorScanQueryTask], new VisorScanQueryTaskArg(cacheName, null, false, false, false, false, pageSize)) match { case x if x.getError != null => error(x.getError) return case x => x.getResult } catch { case e: ClusterGroupEmptyException => scold(messageNodeNotFound(node, cacheName)) return case e: Throwable => error(e) return } if (firstPage.getRows.isEmpty) { println(s"Cache: ${escapeName(cacheName)} is empty") return } var nextPage: VisorQueryResult = firstPage def render() { println("Entries in cache: " + escapeName(cacheName)) val t = VisorTextTable() t #= ("Key Class", "Key", "Value Class", "Value") nextPage.getRows.foreach(r => t += (r(0), r(1), r(2), r(3))) t.render() } render() while (nextPage.isHasMore) { ask("\\nFetch more objects (y/n) [y]:", "y") match { case "y" | "Y" => try { nextPage = executeOne(firstPage.getResponseNodeId, classOf[VisorQueryNextPageTask], new VisorQueryNextPageTaskArg(firstPage.getQueryId, pageSize)) render() } catch { case e: Exception => error(e) } case _ => return } } } } /** * Companion object that does initialization of the command. */ object VisorCacheScanCommand { /** Singleton command. */ private val cmd = new VisorCacheScanCommand /** * Singleton. */ def apply() = cmd }
vadopolski/ignite
modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheScanCommand.scala
Scala
apache-2.0
6,231
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.log import java.io.File import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock import kafka.common.{KafkaException, LogCleaningAbortedException} import kafka.metrics.KafkaMetricsGroup import kafka.server.LogDirFailureChannel import kafka.server.checkpoints.OffsetCheckpointFile import kafka.utils.CoreUtils._ import kafka.utils.{Logging, Pool} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.utils.Time import org.apache.kafka.common.errors.KafkaStorageException import scala.collection.{Iterable, Seq, mutable} private[log] sealed trait LogCleaningState private[log] case object LogCleaningInProgress extends LogCleaningState private[log] case object LogCleaningAborted extends LogCleaningState private[log] case class LogCleaningPaused(pausedCount: Int) extends LogCleaningState private[log] class LogCleaningException(val log: UnifiedLog, private val message: String, private val cause: Throwable) extends KafkaException(message, cause) /** * This class manages the state of each partition being cleaned. * LogCleaningState defines the cleaning states that a TopicPartition can be in. * 1. None : No cleaning state in a TopicPartition. In this state, it can become LogCleaningInProgress * or LogCleaningPaused(1). Valid previous state are LogCleaningInProgress and LogCleaningPaused(1) * 2. LogCleaningInProgress : The cleaning is currently in progress. In this state, it can become None when log cleaning is finished * or become LogCleaningAborted. Valid previous state is None. * 3. LogCleaningAborted : The cleaning abort is requested. In this state, it can become LogCleaningPaused(1). * Valid previous state is LogCleaningInProgress. * 4-a. LogCleaningPaused(1) : The cleaning is paused once. No log cleaning can be done in this state. * In this state, it can become None or LogCleaningPaused(2). * Valid previous state is None, LogCleaningAborted or LogCleaningPaused(2). * 4-b. LogCleaningPaused(i) : The cleaning is paused i times where i>= 2. No log cleaning can be done in this state. * In this state, it can become LogCleaningPaused(i-1) or LogCleaningPaused(i+1). * Valid previous state is LogCleaningPaused(i-1) or LogCleaningPaused(i+1). */ private[log] class LogCleanerManager(val logDirs: Seq[File], val logs: Pool[TopicPartition, UnifiedLog], val logDirFailureChannel: LogDirFailureChannel) extends Logging with KafkaMetricsGroup { import LogCleanerManager._ protected override def loggerName = classOf[LogCleaner].getName // package-private for testing private[log] val offsetCheckpointFile = "cleaner-offset-checkpoint" /* the offset checkpoints holding the last cleaned point for each log */ @volatile private var checkpoints = logDirs.map(dir => (dir, new OffsetCheckpointFile(new File(dir, offsetCheckpointFile), logDirFailureChannel))).toMap /* the set of logs currently being cleaned */ private val inProgress = mutable.HashMap[TopicPartition, LogCleaningState]() /* the set of uncleanable partitions (partitions that have raised an unexpected error during cleaning) * for each log directory */ private val uncleanablePartitions = mutable.HashMap[String, mutable.Set[TopicPartition]]() /* a global lock used to control all access to the in-progress set and the offset checkpoints */ private val lock = new ReentrantLock /* for coordinating the pausing and the cleaning of a partition */ private val pausedCleaningCond = lock.newCondition() /* gauges for tracking the number of partitions marked as uncleanable for each log directory */ for (dir <- logDirs) { newGauge("uncleanable-partitions-count", () => inLock(lock) { uncleanablePartitions.get(dir.getAbsolutePath).map(_.size).getOrElse(0) }, Map("logDirectory" -> dir.getAbsolutePath) ) } /* gauges for tracking the number of uncleanable bytes from uncleanable partitions for each log directory */ for (dir <- logDirs) { newGauge("uncleanable-bytes", () => inLock(lock) { uncleanablePartitions.get(dir.getAbsolutePath) match { case Some(partitions) => val lastClean = allCleanerCheckpoints val now = Time.SYSTEM.milliseconds partitions.iterator.map { tp => val log = logs.get(tp) val lastCleanOffset = lastClean.get(tp) val offsetsToClean = cleanableOffsets(log, lastCleanOffset, now) val (_, uncleanableBytes) = calculateCleanableBytes(log, offsetsToClean.firstDirtyOffset, offsetsToClean.firstUncleanableDirtyOffset) uncleanableBytes }.sum case None => 0 } }, Map("logDirectory" -> dir.getAbsolutePath) ) } /* a gauge for tracking the cleanable ratio of the dirtiest log */ @volatile private var dirtiestLogCleanableRatio = 0.0 newGauge("max-dirty-percent", () => (100 * dirtiestLogCleanableRatio).toInt) /* a gauge for tracking the time since the last log cleaner run, in milli seconds */ @volatile private var timeOfLastRun: Long = Time.SYSTEM.milliseconds newGauge("time-since-last-run-ms", () => Time.SYSTEM.milliseconds - timeOfLastRun) /** * @return the position processed for all logs. */ def allCleanerCheckpoints: Map[TopicPartition, Long] = { inLock(lock) { checkpoints.values.flatMap(checkpoint => { try { checkpoint.read() } catch { case e: KafkaStorageException => error(s"Failed to access checkpoint file ${checkpoint.file.getName} in dir ${checkpoint.file.getParentFile.getAbsolutePath}", e) Map.empty[TopicPartition, Long] } }).toMap } } /** * Package private for unit test. Get the cleaning state of the partition. */ private[log] def cleaningState(tp: TopicPartition): Option[LogCleaningState] = { inLock(lock) { inProgress.get(tp) } } /** * Package private for unit test. Set the cleaning state of the partition. */ private[log] def setCleaningState(tp: TopicPartition, state: LogCleaningState): Unit = { inLock(lock) { inProgress.put(tp, state) } } /** * Choose the log to clean next and add it to the in-progress set. We recompute this * each time from the full set of logs to allow logs to be dynamically added to the pool of logs * the log manager maintains. */ def grabFilthiestCompactedLog(time: Time, preCleanStats: PreCleanStats = new PreCleanStats()): Option[LogToClean] = { inLock(lock) { val now = time.milliseconds this.timeOfLastRun = now val lastClean = allCleanerCheckpoints val dirtyLogs = logs.filter { case (_, log) => log.config.compact // match logs that are marked as compacted }.filterNot { case (topicPartition, log) => // skip any logs already in-progress and uncleanable partitions inProgress.contains(topicPartition) || isUncleanablePartition(log, topicPartition) }.map { case (topicPartition, log) => // create a LogToClean instance for each try { val lastCleanOffset = lastClean.get(topicPartition) val offsetsToClean = cleanableOffsets(log, lastCleanOffset, now) // update checkpoint for logs with invalid checkpointed offsets if (offsetsToClean.forceUpdateCheckpoint) updateCheckpoints(log.parentDirFile, partitionToUpdateOrAdd = Option(topicPartition, offsetsToClean.firstDirtyOffset)) val compactionDelayMs = maxCompactionDelay(log, offsetsToClean.firstDirtyOffset, now) preCleanStats.updateMaxCompactionDelay(compactionDelayMs) LogToClean(topicPartition, log, offsetsToClean.firstDirtyOffset, offsetsToClean.firstUncleanableDirtyOffset, compactionDelayMs > 0) } catch { case e: Throwable => throw new LogCleaningException(log, s"Failed to calculate log cleaning stats for partition $topicPartition", e) } }.filter(ltc => ltc.totalBytes > 0) // skip any empty logs this.dirtiestLogCleanableRatio = if (dirtyLogs.nonEmpty) dirtyLogs.max.cleanableRatio else 0 // and must meet the minimum threshold for dirty byte ratio or have some bytes required to be compacted val cleanableLogs = dirtyLogs.filter { ltc => (ltc.needCompactionNow && ltc.cleanableBytes > 0) || ltc.cleanableRatio > ltc.log.config.minCleanableRatio } if(cleanableLogs.isEmpty) { None } else { preCleanStats.recordCleanablePartitions(cleanableLogs.size) val filthiest = cleanableLogs.max inProgress.put(filthiest.topicPartition, LogCleaningInProgress) Some(filthiest) } } } /** * Pause logs cleaning for logs that do not have compaction enabled * and do not have other deletion or compaction in progress. * This is to handle potential race between retention and cleaner threads when users * switch topic configuration between compacted and non-compacted topic. * @return retention logs that have log cleaning successfully paused */ def pauseCleaningForNonCompactedPartitions(): Iterable[(TopicPartition, UnifiedLog)] = { inLock(lock) { val deletableLogs = logs.filter { case (_, log) => !log.config.compact // pick non-compacted logs }.filterNot { case (topicPartition, _) => inProgress.contains(topicPartition) // skip any logs already in-progress } deletableLogs.foreach { case (topicPartition, _) => inProgress.put(topicPartition, LogCleaningPaused(1)) } deletableLogs } } /** * Find any logs that have compaction enabled. Mark them as being cleaned * Include logs without delete enabled, as they may have segments * that precede the start offset. */ def deletableLogs(): Iterable[(TopicPartition, UnifiedLog)] = { inLock(lock) { val toClean = logs.filter { case (topicPartition, log) => !inProgress.contains(topicPartition) && log.config.compact && !isUncleanablePartition(log, topicPartition) } toClean.foreach { case (tp, _) => inProgress.put(tp, LogCleaningInProgress) } toClean } } /** * Abort the cleaning of a particular partition, if it's in progress. This call blocks until the cleaning of * the partition is aborted. * This is implemented by first abortAndPausing and then resuming the cleaning of the partition. */ def abortCleaning(topicPartition: TopicPartition): Unit = { inLock(lock) { abortAndPauseCleaning(topicPartition) resumeCleaning(Seq(topicPartition)) } } /** * Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition. * This call blocks until the cleaning of the partition is aborted and paused. * 1. If the partition is not in progress, mark it as paused. * 2. Otherwise, first mark the state of the partition as aborted. * 3. The cleaner thread checks the state periodically and if it sees the state of the partition is aborted, it * throws a LogCleaningAbortedException to stop the cleaning task. * 4. When the cleaning task is stopped, doneCleaning() is called, which sets the state of the partition as paused. * 5. abortAndPauseCleaning() waits until the state of the partition is changed to paused. * 6. If the partition is already paused, a new call to this function * will increase the paused count by one. */ def abortAndPauseCleaning(topicPartition: TopicPartition): Unit = { inLock(lock) { inProgress.get(topicPartition) match { case None => inProgress.put(topicPartition, LogCleaningPaused(1)) case Some(LogCleaningInProgress) => inProgress.put(topicPartition, LogCleaningAborted) case Some(LogCleaningPaused(count)) => inProgress.put(topicPartition, LogCleaningPaused(count + 1)) case Some(s) => throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be aborted and paused since it is in $s state.") } while(!isCleaningInStatePaused(topicPartition)) pausedCleaningCond.await(100, TimeUnit.MILLISECONDS) } } /** * Resume the cleaning of paused partitions. * Each call of this function will undo one pause. */ def resumeCleaning(topicPartitions: Iterable[TopicPartition]): Unit = { inLock(lock) { topicPartitions.foreach { topicPartition => inProgress.get(topicPartition) match { case None => throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is not paused.") case Some(state) => state match { case LogCleaningPaused(count) if count == 1 => inProgress.remove(topicPartition) case LogCleaningPaused(count) if count > 1 => inProgress.put(topicPartition, LogCleaningPaused(count - 1)) case s => throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is in $s state.") } } } } } /** * Check if the cleaning for a partition is in a particular state. The caller is expected to hold lock while making the call. */ private def isCleaningInState(topicPartition: TopicPartition, expectedState: LogCleaningState): Boolean = { inProgress.get(topicPartition) match { case None => false case Some(state) => if (state == expectedState) true else false } } /** * Check if the cleaning for a partition is paused. The caller is expected to hold lock while making the call. */ private def isCleaningInStatePaused(topicPartition: TopicPartition): Boolean = { inProgress.get(topicPartition) match { case None => false case Some(state) => state match { case _: LogCleaningPaused => true case _ => false } } } /** * Check if the cleaning for a partition is aborted. If so, throw an exception. */ def checkCleaningAborted(topicPartition: TopicPartition): Unit = { inLock(lock) { if (isCleaningInState(topicPartition, LogCleaningAborted)) throw new LogCleaningAbortedException() } } /** * Update checkpoint file, adding or removing partitions if necessary. * * @param dataDir The File object to be updated * @param partitionToUpdateOrAdd The [TopicPartition, Long] map data to be updated. pass "none" if doing remove, not add * @param topicPartitionToBeRemoved The TopicPartition to be removed */ def updateCheckpoints(dataDir: File, partitionToUpdateOrAdd: Option[(TopicPartition, Long)] = None, partitionToRemove: Option[TopicPartition] = None): Unit = { inLock(lock) { val checkpoint = checkpoints(dataDir) if (checkpoint != null) { try { val currentCheckpoint = checkpoint.read().filter { case (tp, _) => logs.keys.contains(tp) }.toMap // remove the partition offset if any var updatedCheckpoint = partitionToRemove match { case Some(topicPartion) => currentCheckpoint - topicPartion case None => currentCheckpoint } // update or add the partition offset if any updatedCheckpoint = partitionToUpdateOrAdd match { case Some(updatedOffset) => updatedCheckpoint + updatedOffset case None => updatedCheckpoint } checkpoint.write(updatedCheckpoint) } catch { case e: KafkaStorageException => error(s"Failed to access checkpoint file ${checkpoint.file.getName} in dir ${checkpoint.file.getParentFile.getAbsolutePath}", e) } } } } /** * alter the checkpoint directory for the topicPartition, to remove the data in sourceLogDir, and add the data in destLogDir */ def alterCheckpointDir(topicPartition: TopicPartition, sourceLogDir: File, destLogDir: File): Unit = { inLock(lock) { try { checkpoints.get(sourceLogDir).flatMap(_.read().get(topicPartition)) match { case Some(offset) => debug(s"Removing the partition offset data in checkpoint file for '${topicPartition}' " + s"from ${sourceLogDir.getAbsoluteFile} directory.") updateCheckpoints(sourceLogDir, partitionToRemove = Option(topicPartition)) debug(s"Adding the partition offset data in checkpoint file for '${topicPartition}' " + s"to ${destLogDir.getAbsoluteFile} directory.") updateCheckpoints(destLogDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) case None => } } catch { case e: KafkaStorageException => error(s"Failed to access checkpoint file in dir ${sourceLogDir.getAbsolutePath}", e) } val logUncleanablePartitions = uncleanablePartitions.getOrElse(sourceLogDir.toString, mutable.Set[TopicPartition]()) if (logUncleanablePartitions.contains(topicPartition)) { logUncleanablePartitions.remove(topicPartition) markPartitionUncleanable(destLogDir.toString, topicPartition) } } } /** * Stop cleaning logs in the provided directory * * @param dir the absolute path of the log dir */ def handleLogDirFailure(dir: String): Unit = { warn(s"Stopping cleaning logs in dir $dir") inLock(lock) { checkpoints = checkpoints.filter { case (k, _) => k.getAbsolutePath != dir } } } /** * Truncate the checkpointed offset for the given partition if its checkpointed offset is larger than the given offset */ def maybeTruncateCheckpoint(dataDir: File, topicPartition: TopicPartition, offset: Long): Unit = { inLock(lock) { if (logs.get(topicPartition).config.compact) { val checkpoint = checkpoints(dataDir) if (checkpoint != null) { val existing = checkpoint.read() if (existing.getOrElse(topicPartition, 0L) > offset) checkpoint.write(mutable.Map() ++= existing += topicPartition -> offset) } } } } /** * Save out the endOffset and remove the given log from the in-progress set, if not aborted. */ def doneCleaning(topicPartition: TopicPartition, dataDir: File, endOffset: Long): Unit = { inLock(lock) { inProgress.get(topicPartition) match { case Some(LogCleaningInProgress) => updateCheckpoints(dataDir, partitionToUpdateOrAdd = Option(topicPartition, endOffset)) inProgress.remove(topicPartition) case Some(LogCleaningAborted) => inProgress.put(topicPartition, LogCleaningPaused(1)) pausedCleaningCond.signalAll() case None => throw new IllegalStateException(s"State for partition $topicPartition should exist.") case s => throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.") } } } def doneDeleting(topicPartitions: Iterable[TopicPartition]): Unit = { inLock(lock) { topicPartitions.foreach { topicPartition => inProgress.get(topicPartition) match { case Some(LogCleaningInProgress) => inProgress.remove(topicPartition) case Some(LogCleaningAborted) => inProgress.put(topicPartition, LogCleaningPaused(1)) pausedCleaningCond.signalAll() case None => throw new IllegalStateException(s"State for partition $topicPartition should exist.") case s => throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.") } } } } /** * Returns an immutable set of the uncleanable partitions for a given log directory * Only used for testing */ private[log] def uncleanablePartitions(logDir: String): Set[TopicPartition] = { var partitions: Set[TopicPartition] = Set() inLock(lock) { partitions ++= uncleanablePartitions.getOrElse(logDir, partitions) } partitions } def markPartitionUncleanable(logDir: String, partition: TopicPartition): Unit = { inLock(lock) { uncleanablePartitions.get(logDir) match { case Some(partitions) => partitions.add(partition) case None => uncleanablePartitions.put(logDir, mutable.Set(partition)) } } } private def isUncleanablePartition(log: UnifiedLog, topicPartition: TopicPartition): Boolean = { inLock(lock) { uncleanablePartitions.get(log.parentDir).exists(partitions => partitions.contains(topicPartition)) } } } /** * Helper class for the range of cleanable dirty offsets of a log and whether to update the checkpoint associated with * the log * * @param firstDirtyOffset the lower (inclusive) offset to begin cleaning from * @param firstUncleanableDirtyOffset the upper(exclusive) offset to clean to * @param forceUpdateCheckpoint whether to update the checkpoint associated with this log. if true, checkpoint should be * reset to firstDirtyOffset */ private case class OffsetsToClean(firstDirtyOffset: Long, firstUncleanableDirtyOffset: Long, forceUpdateCheckpoint: Boolean = false) { } private[log] object LogCleanerManager extends Logging { def isCompactAndDelete(log: UnifiedLog): Boolean = { log.config.compact && log.config.delete } /** * get max delay between the time when log is required to be compacted as determined * by maxCompactionLagMs and the current time. */ def maxCompactionDelay(log: UnifiedLog, firstDirtyOffset: Long, now: Long) : Long = { val dirtyNonActiveSegments = log.nonActiveLogSegmentsFrom(firstDirtyOffset) val firstBatchTimestamps = log.getFirstBatchTimestampForSegments(dirtyNonActiveSegments).filter(_ > 0) val earliestDirtySegmentTimestamp = { if (firstBatchTimestamps.nonEmpty) firstBatchTimestamps.min else Long.MaxValue } val maxCompactionLagMs = math.max(log.config.maxCompactionLagMs, 0L) val cleanUntilTime = now - maxCompactionLagMs if (earliestDirtySegmentTimestamp < cleanUntilTime) cleanUntilTime - earliestDirtySegmentTimestamp else 0L } /** * Returns the range of dirty offsets that can be cleaned. * * @param log the log * @param lastCleanOffset the last checkpointed offset * @param now the current time in milliseconds of the cleaning operation * @return OffsetsToClean containing offsets for cleanable portion of log and whether the log checkpoint needs updating */ def cleanableOffsets(log: UnifiedLog, lastCleanOffset: Option[Long], now: Long): OffsetsToClean = { // If the log segments are abnormally truncated and hence the checkpointed offset is no longer valid; // reset to the log starting offset and log the error val (firstDirtyOffset, forceUpdateCheckpoint) = { val logStartOffset = log.logStartOffset val checkpointDirtyOffset = lastCleanOffset.getOrElse(logStartOffset) if (checkpointDirtyOffset < logStartOffset) { // Don't bother with the warning if compact and delete are enabled. if (!isCompactAndDelete(log)) warn(s"Resetting first dirty offset of ${log.name} to log start offset $logStartOffset " + s"since the checkpointed offset $checkpointDirtyOffset is invalid.") (logStartOffset, true) } else if (checkpointDirtyOffset > log.logEndOffset) { // The dirty offset has gotten ahead of the log end offset. This could happen if there was data // corruption at the end of the log. We conservatively assume that the full log needs cleaning. warn(s"The last checkpoint dirty offset for partition ${log.name} is $checkpointDirtyOffset, " + s"which is larger than the log end offset ${log.logEndOffset}. Resetting to the log start offset $logStartOffset.") (logStartOffset, true) } else { (checkpointDirtyOffset, false) } } val minCompactionLagMs = math.max(log.config.compactionLagMs, 0L) // Find the first segment that cannot be cleaned. We cannot clean past: // 1. The active segment // 2. The last stable offset (including the high watermark) // 3. Any segments closer to the head of the log than the minimum compaction lag time val firstUncleanableDirtyOffset: Long = Seq( // we do not clean beyond the last stable offset Some(log.lastStableOffset), // the active segment is always uncleanable Option(log.activeSegment.baseOffset), // the first segment whose largest message timestamp is within a minimum time lag from now if (minCompactionLagMs > 0) { // dirty log segments val dirtyNonActiveSegments = log.nonActiveLogSegmentsFrom(firstDirtyOffset) dirtyNonActiveSegments.find { s => val isUncleanable = s.largestTimestamp > now - minCompactionLagMs debug(s"Checking if log segment may be cleaned: log='${log.name}' segment.baseOffset=${s.baseOffset} " + s"segment.largestTimestamp=${s.largestTimestamp}; now - compactionLag=${now - minCompactionLagMs}; " + s"is uncleanable=$isUncleanable") isUncleanable }.map(_.baseOffset) } else None ).flatten.min debug(s"Finding range of cleanable offsets for log=${log.name}. Last clean offset=$lastCleanOffset " + s"now=$now => firstDirtyOffset=$firstDirtyOffset firstUncleanableOffset=$firstUncleanableDirtyOffset " + s"activeSegment.baseOffset=${log.activeSegment.baseOffset}") OffsetsToClean(firstDirtyOffset, math.max(firstDirtyOffset, firstUncleanableDirtyOffset), forceUpdateCheckpoint) } /** * Given the first dirty offset and an uncleanable offset, calculates the total cleanable bytes for this log * @return the biggest uncleanable offset and the total amount of cleanable bytes */ def calculateCleanableBytes(log: UnifiedLog, firstDirtyOffset: Long, uncleanableOffset: Long): (Long, Long) = { val firstUncleanableSegment = log.nonActiveLogSegmentsFrom(uncleanableOffset).headOption.getOrElse(log.activeSegment) val firstUncleanableOffset = firstUncleanableSegment.baseOffset val cleanableBytes = log.logSegments(math.min(firstDirtyOffset, firstUncleanableOffset), firstUncleanableOffset).map(_.size.toLong).sum (firstUncleanableOffset, cleanableBytes) } }
lindong28/kafka
core/src/main/scala/kafka/log/LogCleanerManager.scala
Scala
apache-2.0
27,975
/* * Copyright (c) 2014-2020 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.internal.operators import monix.reactive.Observable import scala.concurrent.duration._ import scala.util.Success object DelayBySelectorSuite extends BaseOperatorSuite { def createObservable(sourceCount: Int) = Some { val source = Observable.range(0L, sourceCount.toLong) val o = source.delayOnNextBySelector(x => Observable.now(x).delayExecution(1.second)) val c = sourceCount Sample(o, c, (c * (c - 1) / 2).toLong, 1.second, 1.second) } def observableInError(sourceCount: Int, ex: Throwable) = Some { val source = createObservableEndingInError(Observable.range(0L, sourceCount.toLong), ex) val o = source.delayOnNextBySelector(x => Observable.now(x).delayExecution(1.second)) val c = sourceCount Sample(o, c - 1, (c - 1) * (c - 2) / 2, 1.second, 1.second) } def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some { val source = Observable.range(0, sourceCount.toLong + 1) val o = source.delayOnNextBySelector { x => if (x < sourceCount) Observable.now(x).delayExecution(1.second) else throw ex } val c = sourceCount Sample(o, c, (c * (c - 1) / 2).toLong, 1.second, 1.second) } override def cancelableObservables() = { val o = Observable .now(1L) .delayOnNextBySelector(x => Observable.now(x).delayExecution(1.second)) Seq(Sample(o, 0, 0, 0.seconds, 0.seconds)) } test("should terminate immediately on empty observable") { implicit s => val f = Observable .empty[Int] .delayOnNextBySelector(n => Observable.empty) .completedL .runToFuture s.tick(1.day) assertEquals(f.value, Some(Success(()))) } }
alexandru/monifu
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DelayBySelectorSuite.scala
Scala
apache-2.0
2,375
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.stomp.test import java.io.File import org.openqa.selenium.WebDriver /** * <p> * </p> * * @author <a href="http://hiramchirino.com">Hiram Chirino</a> */ trait WebDriverTrait { def create_web_driver(profileDir: File): WebDriver }
chirino/activemq-apollo
apollo-stomp/src/test/scala/org/apache/activemq/apollo/stomp/test/WebDriverTrait.scala
Scala
apache-2.0
1,070
package marge.map /** * User: mikio * Date: 4/7/11 * Time: 3:26 PM */ class OneAgainstRest[A](positive: A) extends OutputMap[A, Int] { def apply(a: A): Int = if (a == positive) 1 else -1 def unapply(b: Int): Option[A] = if (b == 1) Some(positive) else None }
mikiobraun/marge
src/main/scala/marge/map/OneAgainstRest.scala
Scala
mit
269
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.scaladsl.api.deser import akka.Done import akka.NotUsed import akka.stream.scaladsl.Source import akka.util.ByteString import com.lightbend.lagom.scaladsl.api.transport._ import play.api.libs.json._ import scala.collection.immutable import scala.util.control.NonFatal trait MessageSerializer[Message, WireFormat] { /** * The message headers that will be accepted for response serialization. */ def acceptResponseProtocols: immutable.Seq[MessageProtocol] = Nil /** * Whether this serializer serializes values that are used or not. * * If false, it means this serializer is for an empty request/response, eg, they use the * [[akka.NotUsed]] type. * * @return Whether the values this serializer serializes are used. */ def isUsed: Boolean = true /** * Whether this serializer is a streamed serializer or not. * * @return Whether this is a streamed serializer. */ def isStreamed: Boolean = false /** * Get a serializer for a client request. * * Since a client is the initiator of the request, it simply returns the default serializer for the entity. * * @return A serializer for request messages. */ def serializerForRequest: MessageSerializer.NegotiatedSerializer[Message, WireFormat] /** * Get a deserializer for an entity described by the given request or response protocol. * * @param protocol The protocol of the message request or response associated with the entity. * @return A deserializer for request/response messages. * @throws UnsupportedMediaType If the deserializer can't deserialize that protocol. */ @throws[UnsupportedMediaType] def deserializer(protocol: MessageProtocol): MessageSerializer.NegotiatedDeserializer[Message, WireFormat] /** * Negotiate a serializer for the response, given the accepted message headers. * * @param acceptedMessageProtocols The accepted message headers is a list of message headers that will be accepted by * the client. Any empty values in a message protocol, including the list itself, * indicate that any format is acceptable. * @throws NotAcceptable If the serializer can't meet the requirements of any of the accept headers. */ @throws[NotAcceptable] def serializerForResponse( acceptedMessageProtocols: immutable.Seq[MessageProtocol] ): MessageSerializer.NegotiatedSerializer[Message, WireFormat] } /** * A strict message serializer, for messages that fit and are worked with strictly in memory. * * Strict message serializers differ from streamed serializers, in that they work directly with `ByteString`, rather * than an Akka streams `Source`. */ trait StrictMessageSerializer[Message] extends MessageSerializer[Message, ByteString] /** * A streamed message serializer, for streams of messages. */ trait StreamedMessageSerializer[Message] extends MessageSerializer[Source[Message, NotUsed], Source[ByteString, NotUsed]] { override def isStreamed: Boolean = true } object MessageSerializer extends LowPriorityMessageSerializerImplicits { /** * A negotiated serializer. * * @tparam Message The type of message that this serializer serializes. * @tparam WireFormat The wire format that this serializer serializes to. */ trait NegotiatedSerializer[Message, WireFormat] { /** * Get the protocol associated with this message. */ def protocol: MessageProtocol = MessageProtocol(None, None, None) /** * Serialize the given message. * * @param message The message to serialize. * @return The serialized message. */ @throws[SerializationException] def serialize(message: Message): WireFormat } /** * A negotiated deserializer. * * @tparam Message The type of message that this serializer serializes. * @tparam WireFormat The wire format that this serializer serializes to. */ trait NegotiatedDeserializer[Message, WireFormat] { /** * Deserialize the given wire format. * * @param wire The raw wire data. * @return The deserialized message. */ @throws[DeserializationException] def deserialize(wire: WireFormat): Message } implicit val JsValueMessageSerializer: StrictMessageSerializer[JsValue] = new StrictMessageSerializer[JsValue] { private val defaultProtocol = MessageProtocol(Some("application/json"), None, None) override val acceptResponseProtocols: immutable.Seq[MessageProtocol] = immutable.Seq(defaultProtocol) private class JsValueSerializer(override val protocol: MessageProtocol) extends NegotiatedSerializer[JsValue, ByteString] { override def serialize(message: JsValue): ByteString = try { ByteString.fromString(Json.stringify(message), protocol.charset.getOrElse("utf-8")) } catch { case NonFatal(e) => throw SerializationException(e) } } private object JsValueDeserializer extends NegotiatedDeserializer[JsValue, ByteString] { override def deserialize(wire: ByteString): JsValue = try { if (wire.isEmpty) { JsNull } else { Json.parse(wire.iterator.asInputStream) } } catch { case NonFatal(e) => throw DeserializationException(e) } } override def deserializer(protocol: MessageProtocol): NegotiatedDeserializer[JsValue, ByteString] = JsValueDeserializer override def serializerForResponse( acceptedMessageProtocols: immutable.Seq[MessageProtocol] ): NegotiatedSerializer[JsValue, ByteString] = { new JsValueSerializer( acceptedMessageProtocols.find(_.contentType.contains("application/json")).getOrElse(defaultProtocol) ) } override def serializerForRequest: NegotiatedSerializer[JsValue, ByteString] = new JsValueSerializer(defaultProtocol) } implicit val StringMessageSerializer: StrictMessageSerializer[String] = new StrictMessageSerializer[String] { private val defaultProtocol = MessageProtocol(Some("text/plain"), Some("utf-8"), None) override val acceptResponseProtocols: immutable.Seq[MessageProtocol] = immutable.Seq(defaultProtocol) private class StringSerializer(override val protocol: MessageProtocol) extends NegotiatedSerializer[String, ByteString] { override def serialize(s: String) = ByteString.fromString(s, protocol.charset.getOrElse("utf-8")) } private class StringDeserializer(charset: String) extends NegotiatedDeserializer[String, ByteString] { override def deserialize(wire: ByteString) = wire.decodeString(charset) } override val serializerForRequest: NegotiatedSerializer[String, ByteString] = new StringSerializer(defaultProtocol) override def deserializer(protocol: MessageProtocol): NegotiatedDeserializer[String, ByteString] = { if (protocol.contentType.forall(_ == "text/plain")) { new StringDeserializer(protocol.charset.getOrElse("utf-8")) } else { throw UnsupportedMediaType(protocol, defaultProtocol) } } override def serializerForResponse( acceptedMessageProtocols: immutable.Seq[MessageProtocol] ): NegotiatedSerializer[String, ByteString] = { if (acceptedMessageProtocols.isEmpty) { serializerForRequest } else { acceptedMessageProtocols.collectFirst { case wildcardOrNone if wildcardOrNone.contentType.forall(ct => ct == "*" || ct == "*/*") => new StringSerializer(wildcardOrNone.withContentType("text/plain")) case textPlain if textPlain.contentType.contains("text/plain") => new StringSerializer(textPlain) } match { case Some(serializer) => serializer case None => throw NotAcceptable(acceptedMessageProtocols, defaultProtocol) } } } } implicit val NotUsedMessageSerializer: StrictMessageSerializer[NotUsed] = new StrictMessageSerializer[NotUsed] { override def serializerForRequest = new NegotiatedSerializer[NotUsed, ByteString] { override def serialize(message: NotUsed): ByteString = ByteString.empty } override def deserializer(messageProtocol: MessageProtocol) = new NegotiatedDeserializer[NotUsed, ByteString] { override def deserialize(wire: ByteString) = NotUsed } override def serializerForResponse(acceptedMessageProtocols: immutable.Seq[MessageProtocol]) = new NegotiatedSerializer[NotUsed, ByteString] { override def serialize(message: NotUsed): ByteString = ByteString.empty } override def isUsed: Boolean = false } implicit val DoneMessageSerializer: StrictMessageSerializer[Done] = new StrictMessageSerializer[Done] { override def serializerForRequest = new NegotiatedSerializer[Done, ByteString] { override def serialize(message: Done): ByteString = ByteString.empty } override def deserializer(messageProtocol: MessageProtocol) = new NegotiatedDeserializer[Done, ByteString] { override def deserialize(wire: ByteString) = Done } override def serializerForResponse(acceptedMessageProtocols: immutable.Seq[MessageProtocol]) = new NegotiatedSerializer[Done, ByteString] { override def serialize(message: Done): ByteString = ByteString.empty } } implicit val NoopMessageSerializer: StrictMessageSerializer[ByteString] = new StrictMessageSerializer[ByteString] { override def serializerForRequest = new NegotiatedSerializer[ByteString, ByteString] { override def serialize(message: ByteString): ByteString = message } override def deserializer(protocol: MessageProtocol) = new NegotiatedDeserializer[ByteString, ByteString] { override def deserialize(wire: ByteString): ByteString = wire } override def serializerForResponse(acceptedMessageProtocols: immutable.Seq[MessageProtocol]) = new NegotiatedSerializer[ByteString, ByteString] { override def serialize(message: ByteString): ByteString = message } } } trait LowPriorityMessageSerializerImplicits { import MessageSerializer._ implicit def jsValueFormatMessageSerializer[Message]( implicit jsValueMessageSerializer: MessageSerializer[JsValue, ByteString], format: Format[Message] ): StrictMessageSerializer[Message] = new StrictMessageSerializer[Message] { private class JsValueFormatSerializer(jsValueSerializer: NegotiatedSerializer[JsValue, ByteString]) extends NegotiatedSerializer[Message, ByteString] { override def protocol: MessageProtocol = jsValueSerializer.protocol override def serialize(message: Message): ByteString = { val jsValue = try { Json.toJson(message) } catch { case NonFatal(e) => throw SerializationException(e) } jsValueSerializer.serialize(jsValue) } } private class JsValueFormatDeserializer(jsValueDeserializer: NegotiatedDeserializer[JsValue, ByteString]) extends NegotiatedDeserializer[Message, ByteString] { override def deserialize(wire: ByteString): Message = { val jsValue = jsValueDeserializer.deserialize(wire) jsValue.validate[Message] match { case JsSuccess(message, _) => message case JsError(errors) => throw DeserializationException(JsResultException(errors)) } } } override def acceptResponseProtocols: immutable.Seq[MessageProtocol] = jsValueMessageSerializer.acceptResponseProtocols override def deserializer(protocol: MessageProtocol): NegotiatedDeserializer[Message, ByteString] = new JsValueFormatDeserializer(jsValueMessageSerializer.deserializer(protocol)) override def serializerForResponse( acceptedMessageProtocols: immutable.Seq[MessageProtocol] ): NegotiatedSerializer[Message, ByteString] = new JsValueFormatSerializer(jsValueMessageSerializer.serializerForResponse(acceptedMessageProtocols)) override def serializerForRequest: NegotiatedSerializer[Message, ByteString] = new JsValueFormatSerializer(jsValueMessageSerializer.serializerForRequest) } implicit def sourceMessageSerializer[Message]( implicit delegate: MessageSerializer[Message, ByteString] ): StreamedMessageSerializer[Message] = new StreamedMessageSerializer[Message] { private class SourceSerializer(delegate: NegotiatedSerializer[Message, ByteString]) extends NegotiatedSerializer[Source[Message, NotUsed], Source[ByteString, NotUsed]] { override def protocol: MessageProtocol = delegate.protocol override def serialize(messages: Source[Message, NotUsed]) = messages.map(delegate.serialize) } private class SourceDeserializer(delegate: NegotiatedDeserializer[Message, ByteString]) extends NegotiatedDeserializer[Source[Message, NotUsed], Source[ByteString, NotUsed]] { override def deserialize(wire: Source[ByteString, NotUsed]) = wire.map(delegate.deserialize) } override def acceptResponseProtocols: immutable.Seq[MessageProtocol] = delegate.acceptResponseProtocols override def deserializer( protocol: MessageProtocol ): NegotiatedDeserializer[Source[Message, NotUsed], Source[ByteString, NotUsed]] = new SourceDeserializer(delegate.deserializer(protocol)) override def serializerForResponse( acceptedMessageProtocols: immutable.Seq[MessageProtocol] ): NegotiatedSerializer[Source[Message, NotUsed], Source[ByteString, NotUsed]] = new SourceSerializer(delegate.serializerForResponse(acceptedMessageProtocols)) override def serializerForRequest: NegotiatedSerializer[Source[Message, NotUsed], Source[ByteString, NotUsed]] = new SourceSerializer(delegate.serializerForRequest) } }
lagom/lagom
service/scaladsl/api/src/main/scala/com/lightbend/lagom/scaladsl/api/deser/MessageSerializer.scala
Scala
apache-2.0
13,993
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.execution.schedulers import minitest.SimpleTestSuite import monix.execution.ExecutionModel.{AlwaysAsyncExecution, BatchedExecution, SynchronousExecution} object ExecutionModelSuite extends SimpleTestSuite { test("SynchronousExecution") { val em = SynchronousExecution assert(em.isSynchronous) assert(!em.isAlwaysAsync) assert(!em.isBatched) assertEquals(em.recommendedBatchSize, 1073741824) assertEquals(em.batchedExecutionModulus, 1073741823) for (i <- 0 until 100) assertEquals(em.nextFrameIndex(i), 1) } test("AlwaysAsyncExecution") { val em = AlwaysAsyncExecution assert(em.isAlwaysAsync) assert(!em.isSynchronous) assert(!em.isBatched) assertEquals(em.recommendedBatchSize, 1) assertEquals(em.batchedExecutionModulus, 0) for (i <- 0 until 100) assertEquals(em.nextFrameIndex(i), 0) } test("BatchedExecution") { for (i <- 2 to 512) { val em = BatchedExecution(i) assert(em.isBatched) assert(!em.isAlwaysAsync) assert(!em.isSynchronous) assert(em.recommendedBatchSize % 2 == 0) assertEquals(em.batchedExecutionModulus, em.recommendedBatchSize - 1) assert(em.recommendedBatchSize >= i) var index = 1 for (j <- 1 until em.recommendedBatchSize * 3) { index = em.nextFrameIndex(index) assert(index >= 0 && index < em.recommendedBatchSize) } } } }
monifu/monifu
monix-execution/shared/src/test/scala/monix/execution/schedulers/ExecutionModelSuite.scala
Scala
apache-2.0
2,109
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming.sources import java.net.{InetSocketAddress, SocketException} import java.nio.ByteBuffer import java.nio.channels.ServerSocketChannel import java.nio.charset.StandardCharsets import java.sql.Timestamp import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.TimeUnit._ import scala.collection.JavaConverters._ import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.connector.read.streaming.{Offset, SparkDataStream} import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.v2.StreamingDataSourceV2Relation import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.continuous._ import org.apache.spark.sql.streaming.{StreamingQueryException, StreamTest} import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ import org.apache.spark.sql.util.CaseInsensitiveStringMap class TextSocketStreamSuite extends StreamTest with SharedSparkSession { override def afterEach(): Unit = { sqlContext.streams.active.foreach(_.stop()) if (serverThread != null) { serverThread.interrupt() serverThread.join() serverThread = null } } private var serverThread: ServerThread = null case class AddSocketData(data: String*) extends AddData { override def addData(query: Option[StreamExecution]): (SparkDataStream, Offset) = { require( query.nonEmpty, "Cannot add data when there is no query for finding the active socket source") val sources = query.get.logicalPlan.collect { case r: StreamingDataSourceV2Relation if r.stream.isInstanceOf[TextSocketMicroBatchStream] => r.stream.asInstanceOf[TextSocketMicroBatchStream] } if (sources.isEmpty) { throw new Exception( "Could not find socket source in the StreamExecution logical plan to add data to") } else if (sources.size > 1) { throw new Exception( "Could not select the socket source in the StreamExecution logical plan as there" + "are multiple socket sources:\\n\\t" + sources.mkString("\\n\\t")) } val socketSource = sources.head assert(serverThread != null && serverThread.port != 0) val currOffset = socketSource.getCurrentOffset() data.foreach(serverThread.enqueue) val newOffset = LongOffset(currOffset.offset + data.size) (socketSource, newOffset) } override def toString: String = s"AddSocketData(data = $data)" } test("backward compatibility with old path") { val ds = DataSource.lookupDataSource( "org.apache.spark.sql.execution.streaming.TextSocketSourceProvider", spark.sqlContext.conf).newInstance() assert(ds.isInstanceOf[TextSocketSourceProvider], "Could not find socket source") } test("basic usage") { serverThread = new ServerThread() serverThread.start() val ref = spark import ref.implicits._ val socket = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] assert(socket.schema === StructType(StructField("value", StringType) :: Nil)) testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AddSocketData("world"), CheckLastBatch("world"), CheckAnswer("hello", "world"), StopStream ) } test("timestamped usage") { serverThread = new ServerThread() serverThread.start() val socket = spark .readStream .format("socket") .options(Map( "host" -> "localhost", "port" -> serverThread.port.toString, "includeTimestamp" -> "true")) .load() assert(socket.schema === StructType(StructField("value", StringType) :: StructField("timestamp", TimestampType) :: Nil)) var batch1Stamp: Timestamp = null var batch2Stamp: Timestamp = null val curr = System.currentTimeMillis() testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswerRowsByFunc( rows => { assert(rows.size === 1) assert(rows.head.getAs[String](0) === "hello") batch1Stamp = rows.head.getAs[Timestamp](1) Thread.sleep(10) }, true), AddSocketData("world"), CheckAnswerRowsByFunc( rows => { assert(rows.size === 1) assert(rows.head.getAs[String](0) === "world") batch2Stamp = rows.head.getAs[Timestamp](1) }, true), StopStream ) // Timestamp for rate stream is round to second which leads to milliseconds lost, that will // make batch1stamp smaller than current timestamp if both of them are in the same second. // Comparing by second to make sure the correct behavior. assert(batch1Stamp.getTime >= SECONDS.toMillis(MILLISECONDS.toSeconds(curr))) assert(!batch2Stamp.before(batch1Stamp)) } test("params not given") { val provider = new TextSocketSourceProvider intercept[AnalysisException] { provider.getTable(CaseInsensitiveStringMap.empty()) } intercept[AnalysisException] { provider.getTable(new CaseInsensitiveStringMap(Map("host" -> "localhost").asJava)) } intercept[AnalysisException] { provider.getTable(new CaseInsensitiveStringMap(Map("port" -> "1234").asJava)) } } test("non-boolean includeTimestamp") { val provider = new TextSocketSourceProvider val params = Map("host" -> "localhost", "port" -> "1234", "includeTimestamp" -> "fasle") intercept[AnalysisException] { provider.getTable(new CaseInsensitiveStringMap(params.asJava)) } } test("user-specified schema given") { val userSpecifiedSchema = StructType( StructField("name", StringType) :: StructField("area", StringType) :: Nil) val params = Map("host" -> "localhost", "port" -> "1234") val exception = intercept[UnsupportedOperationException] { spark.readStream.schema(userSpecifiedSchema).format("socket").options(params).load() } assert(exception.getMessage.contains( "TextSocketSourceProvider source does not support user-specified schema")) } test("input row metrics") { serverThread = new ServerThread() serverThread.start() val ref = spark import ref.implicits._ val socket = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] assert(socket.schema === StructType(StructField("value", StringType) :: Nil)) testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AssertOnQuery { q => val numRowMetric = q.lastExecution.executedPlan.collectLeaves().head.metrics.get("numOutputRows") numRowMetric.nonEmpty && numRowMetric.get.value == 1 }, StopStream ) } test("verify ServerThread only accepts the first connection") { serverThread = new ServerThread() serverThread.start() val ref = spark import ref.implicits._ val socket = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] assert(socket.schema === StructType(StructField("value", StringType) :: Nil)) testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AddSocketData("world"), CheckLastBatch("world"), CheckAnswer("hello", "world"), StopStream ) // we are trying to connect to the server once again which should fail try { val socket2 = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] testStream(socket2)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AddSocketData("world"), CheckLastBatch("world"), CheckAnswer("hello", "world"), StopStream ) fail("StreamingQueryException is expected!") } catch { case e: StreamingQueryException if e.cause.isInstanceOf[SocketException] => // pass } } test("continuous data") { serverThread = new ServerThread() serverThread.start() val stream = new TextSocketContinuousStream( host = "localhost", port = serverThread.port, numPartitions = 2, options = CaseInsensitiveStringMap.empty()) val partitions = stream.planInputPartitions(stream.initialOffset()) assert(partitions.length == 2) val numRecords = 10 val data = scala.collection.mutable.ListBuffer[Int]() val offsets = scala.collection.mutable.ListBuffer[Int]() val readerFactory = stream.createContinuousReaderFactory() import org.scalatest.time.SpanSugar._ failAfter(5.seconds) { // inject rows, read and check the data and offsets for (i <- 0 until numRecords) { serverThread.enqueue(i.toString) } partitions.foreach { case t: TextSocketContinuousInputPartition => val r = readerFactory.createReader(t).asInstanceOf[TextSocketContinuousPartitionReader] for (i <- 0 until numRecords / 2) { r.next() offsets.append(r.getOffset().asInstanceOf[ContinuousRecordPartitionOffset].offset) data.append(r.get().getString(0).toInt) // commit the offsets in the middle and validate if processing continues if (i == 2) { commitOffset(t.partitionId, i + 1) } } assert(offsets.toSeq == Range.inclusive(1, 5)) assert(data.toSeq == Range(t.partitionId, 10, 2)) offsets.clear() data.clear() case _ => throw new IllegalStateException("Unexpected task type") } assert(stream.startOffset.offsets == List(3, 3)) stream.commit(TextSocketOffset(List(5, 5))) assert(stream.startOffset.offsets == List(5, 5)) } def commitOffset(partition: Int, offset: Int): Unit = { val offsetsToCommit = stream.startOffset.offsets.updated(partition, offset) stream.commit(TextSocketOffset(offsetsToCommit)) assert(stream.startOffset.offsets == offsetsToCommit) } } test("continuous data - invalid commit") { serverThread = new ServerThread() serverThread.start() val stream = new TextSocketContinuousStream( host = "localhost", port = serverThread.port, numPartitions = 2, options = CaseInsensitiveStringMap.empty()) stream.startOffset = TextSocketOffset(List(5, 5)) assertThrows[IllegalStateException] { stream.commit(TextSocketOffset(List(6, 6))) } } test("continuous data with timestamp") { serverThread = new ServerThread() serverThread.start() val stream = new TextSocketContinuousStream( host = "localhost", port = serverThread.port, numPartitions = 2, options = new CaseInsensitiveStringMap(Map("includeTimestamp" -> "true").asJava)) val partitions = stream.planInputPartitions(stream.initialOffset()) assert(partitions.size == 2) val numRecords = 4 // inject rows, read and check the data and offsets for (i <- 0 until numRecords) { serverThread.enqueue(i.toString) } val readerFactory = stream.createContinuousReaderFactory() partitions.foreach { case t: TextSocketContinuousInputPartition => val r = readerFactory.createReader(t).asInstanceOf[TextSocketContinuousPartitionReader] for (_ <- 0 until numRecords / 2) { r.next() assert(r.get().numFields === 2) // just try to read columns one by one - it would throw error if the row is corrupted r.get().getString(0) r.get().getLong(1) } case _ => throw new IllegalStateException("Unexpected task type") } } /** * This class tries to mimic the behavior of netcat, so that we can ensure * TextSocketStream supports netcat, which only accepts the first connection * and exits the process when the first connection is closed. * * Please refer SPARK-24466 for more details. */ private class ServerThread extends Thread with Logging { private val serverSocketChannel = ServerSocketChannel.open() serverSocketChannel.bind(new InetSocketAddress(0)) private val messageQueue = new LinkedBlockingQueue[String]() val port = serverSocketChannel.socket().getLocalPort override def run(): Unit = { try { val clientSocketChannel = serverSocketChannel.accept() // Close server socket channel immediately to mimic the behavior that // only first connection will be made and deny any further connections // Note that the first client socket channel will be available serverSocketChannel.close() clientSocketChannel.configureBlocking(false) clientSocketChannel.socket().setTcpNoDelay(true) while (true) { val line = messageQueue.take() + "\\n" clientSocketChannel.write(ByteBuffer.wrap(line.getBytes(StandardCharsets.UTF_8))) } } catch { case e: InterruptedException => } finally { // no harm to call close() again... serverSocketChannel.close() } } def enqueue(line: String): Unit = { messageQueue.put(line) } } }
ueshin/apache-spark
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketStreamSuite.scala
Scala
apache-2.0
14,444
object Test extends App { Macros.foo1 Macros.foo2 }
som-snytt/dotty
tests/disabled/macro/run/macroPlugins-typedMacroBody/Test_3.scala
Scala
apache-2.0
55
/* * Copyright (C) 2015 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stratio.sparta.serving.api.service.http import javax.ws.rs.Path import akka.pattern.ask import com.stratio.sparta.serving.api.actor.DriverActor._ import com.stratio.sparta.serving.api.constants.HttpConstant import com.stratio.sparta.serving.core.config.SpartaConfig import com.stratio.sparta.serving.core.constants.AppConstant import com.stratio.sparta.serving.core.models.dto.LoggedUser import com.stratio.sparta.serving.core.models.files.SpartaFilesResponse import com.stratio.spray.oauth2.client.OauthClient import com.wordnik.swagger.annotations._ import spray.http._ import spray.httpx.unmarshalling.{FormDataUnmarshallers, Unmarshaller} import spray.routing.Route import scala.util.{Failure, Success, Try} @Api(value = HttpConstant.DriverPath, description = "Operations over plugins: now only to upload/download jars.") trait DriverHttpService extends BaseHttpService with OauthClient { implicit def unmarshaller[T: Manifest]: Unmarshaller[MultipartFormData] = FormDataUnmarshallers.MultipartFormDataUnmarshaller override def routes(user: Option[LoggedUser] = None): Route = upload(user) ~ download(user) ~ getAll(user) ~ deleteAllFiles(user) ~ deleteFile(user) @Path("") @ApiOperation(value = "Upload a file to driver directory.", notes = "Creates a file in the server filesystem with the uploaded jar.", httpMethod = "PUT") @ApiImplicitParams(Array( new ApiImplicitParam(name = "file", value = "The jar", dataType = "file", required = true, paramType = "formData") )) def upload(user: Option[LoggedUser]): Route = { path(HttpConstant.DriverPath) { put { entity(as[MultipartFormData]) { form => complete { for { response <- (supervisor ? UploadDrivers(form.fields)).mapTo[SpartaFilesResponse] } yield response match { case SpartaFilesResponse(Success(newFilesUris)) => newFilesUris case SpartaFilesResponse(Failure(exception)) => throw exception } } } } } } @Path("/{fileName}") @ApiOperation(value = "Download a file from the driver directory.", httpMethod = "GET") @ApiImplicitParams(Array( new ApiImplicitParam(name = "fileName", value = "Name of the jar", dataType = "String", required = true, paramType = "path") )) def download(user: Option[LoggedUser]): Route = get { pathPrefix(HttpConstant.DriverPath) { getFromDirectory( Try(SpartaConfig.getDetailConfig.get.getString(AppConstant.DriverPackageLocation)) .getOrElse(AppConstant.DefaultDriverPackageLocation)) } } @Path("") @ApiOperation(value = "Browse all drivers uploaded", notes = "Finds all drivers.", httpMethod = "GET") @ApiResponses(Array( new ApiResponse(code = HttpConstant.NotFound, message = HttpConstant.NotFoundMessage) )) def getAll(user: Option[LoggedUser]): Route = path(HttpConstant.DriverPath) { get { complete { for { response <- (supervisor ? ListDrivers).mapTo[SpartaFilesResponse] } yield response match { case SpartaFilesResponse(Success(filesUris)) => filesUris case SpartaFilesResponse(Failure(exception)) => throw exception } } } } @Path("") @ApiOperation(value = "Delete all drivers uploaded", notes = "Delete all drivers.", httpMethod = "DELETE") @ApiResponses(Array( new ApiResponse(code = HttpConstant.NotFound, message = HttpConstant.NotFoundMessage) )) def deleteAllFiles(user: Option[LoggedUser]): Route = path(HttpConstant.DriverPath) { delete { complete { for { response <- (supervisor ? DeleteDrivers).mapTo[DriverResponse] } yield response match { case DriverResponse(Success(_)) => StatusCodes.OK case DriverResponse(Failure(exception)) => throw exception } } } } @Path("/{fileName}") @ApiOperation(value = "Delete one driver uploaded", notes = "Delete one driver.", httpMethod = "DELETE") @ApiImplicitParams(Array( new ApiImplicitParam(name = "fileName", value = "Name of the jar", dataType = "String", required = true, paramType = "path") )) @ApiResponses(Array( new ApiResponse(code = HttpConstant.NotFound, message = HttpConstant.NotFoundMessage) )) def deleteFile(user: Option[LoggedUser]): Route = { path(HttpConstant.DriverPath / Segment) { file => delete { complete { for { response <- (supervisor ? DeleteDriver(file)).mapTo[DriverResponse] } yield response match { case DriverResponse(Success(_)) => StatusCodes.OK case DriverResponse(Failure(exception)) => throw exception } } } } } }
Stratio/Sparta
serving-api/src/main/scala/com/stratio/sparta/serving/api/service/http/DriverHttpService.scala
Scala
apache-2.0
5,554
/* * Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.wegtam.tensei.agent import java.io.File import akka.testkit.{ TestActorRef, TestFSMRef } import com.wegtam.tensei.adt._ import com.wegtam.tensei.agent.Processor.ProcessorMessages import com.wegtam.tensei.agent.Processor.ProcessorMessages.StartProcessingMessage import com.wegtam.tensei.agent.adt._ import com.wegtam.tensei.agent.parsers.{ BaseParserMessages, FileParser } class ProcessorTest extends ActorSpec with XmlTestHelpers { describe("Processor") { describe("when given a simple data tree and a simple source and target dfasdl") { val sourceData = getClass.getResource("/com/wegtam/tensei/agent/processors/simple-dfasdl-data.csv").toURI val dfasdl = DFASDL( "SIMPLE-DFASDL", scala.io.Source .fromInputStream( getClass.getResourceAsStream("/com/wegtam/tensei/agent/processors/simple-dfasdl.xml") ) .mkString ) val sourceElements = List( ElementReference(dfasdl.id, "firstname"), ElementReference(dfasdl.id, "lastname"), ElementReference(dfasdl.id, "email"), ElementReference(dfasdl.id, "birthday") ) val targetElements = List( ElementReference(dfasdl.id, "firstname"), ElementReference(dfasdl.id, "lastname"), ElementReference(dfasdl.id, "email"), ElementReference(dfasdl.id, "birthday") ) val mapping = MappingTransformation(sourceElements, targetElements) val recipe = new Recipe("COPY-COLUMNS", Recipe.MapOneToOne, List(mapping)) val cookbook = Cookbook("COOKBOOK", List(dfasdl), Option(dfasdl), List(recipe)) val source = ConnectionInformation(sourceData, Option(DFASDLReference(cookbook.id, dfasdl.id))) val targetData = File.createTempFile("ProcessorTest", "tmpData").toURI val target = ConnectionInformation(targetData, Option(DFASDLReference(cookbook.id, dfasdl.id))) it("should process the data correctly") { val dataTree = TestActorRef(DataTreeDocument.props(dfasdl, Option("ProcessorTest"), Set.empty[String])) val fileParser = TestActorRef(FileParser.props(source, cookbook, dataTree, Option("ProcessorTest"))) fileParser ! BaseParserMessages.SubParserInitialize expectMsg(BaseParserMessages.SubParserInitialized) fileParser ! BaseParserMessages.Start val response = expectMsgType[ParserStatusMessage] response.status should be(ParserStatus.COMPLETED) fileParser ! BaseParserMessages.Stop val processor = TestFSMRef(new Processor(Option("ProcessorTest"))) val msg = new AgentStartTransformationMessage(List(source), target, cookbook, Option("1-2-3-4")) val pmsg = StartProcessingMessage(msg, List(dataTree)) processor ! pmsg expectMsg(ProcessorMessages.Completed) val actualData = scala.io.Source.fromURI(targetData).mkString val expectedData = scala.io.Source .fromInputStream( getClass.getResourceAsStream( "/com/wegtam/tensei/agent/processors/simple-dfasdl-data-expected-target.csv" ) ) .mkString actualData shouldEqual expectedData } } describe("when processing a simple data source into a simple target") { describe("having source fields that are too long for the target") { it("should truncate the appropriate target fields") { val sourceData = getClass.getResource("/com/wegtam/tensei/agent/processors/long-column.csv").toURI val sourceDfasdl = DFASDL("SOURCE-01", scala.io.Source .fromInputStream( getClass.getResourceAsStream( "/com/wegtam/tensei/agent/processors/long-column-source.xml" ) ) .mkString) val targetDfasdl = DFASDL("TARGET", scala.io.Source .fromInputStream( getClass.getResourceAsStream( "/com/wegtam/tensei/agent/processors/long-column-target.xml" ) ) .mkString) val sourceElements = List( ElementReference(sourceDfasdl.id, "birthday"), ElementReference(sourceDfasdl.id, "notes") ) val targetElements = List( ElementReference(targetDfasdl.id, "birthday"), ElementReference(targetDfasdl.id, "notes") ) val mapping = MappingTransformation(sourceElements, targetElements) val recipe = new Recipe("COPY-COLUMNS", Recipe.MapOneToOne, List(mapping)) val cookbook = Cookbook("COOKBOOK", List(sourceDfasdl), Option(targetDfasdl), List(recipe)) val source = ConnectionInformation(sourceData, Option(DFASDLReference(cookbook.id, sourceDfasdl.id))) val targetData = File.createTempFile("ProcessorTest", "tmpData").toURI val target = ConnectionInformation(targetData, Option(DFASDLReference(cookbook.id, targetDfasdl.id))) val dataTree = TestActorRef( DataTreeDocument.props(sourceDfasdl, Option("ProcessorTest"), Set.empty[String]) ) val fileParser = TestActorRef(FileParser.props(source, cookbook, dataTree, Option("ProcessorTest"))) fileParser ! BaseParserMessages.SubParserInitialize expectMsg(BaseParserMessages.SubParserInitialized) fileParser ! BaseParserMessages.Start val response = expectMsgType[ParserStatusMessage] response.status should be(ParserStatus.COMPLETED) fileParser ! BaseParserMessages.Stop val processor = TestFSMRef(new Processor(Option("ProcessorTest"))) val msg = new AgentStartTransformationMessage(List(source), target, cookbook, Option("1-2-3-4")) val pmsg = StartProcessingMessage(msg, List(dataTree)) processor ! pmsg expectMsg(ProcessorMessages.Completed) val actualData = scala.io.Source.fromURI(targetData).mkString val expectedData = scala.io.Source .fromInputStream( getClass.getResourceAsStream( "/com/wegtam/tensei/agent/processors/long-column-expected-target.csv" ) ) .mkString actualData shouldEqual expectedData } } } describe("when given a file that contains no data") { it("should work") { val sourceData = getClass.getResource("/com/wegtam/tensei/agent/processors/empty.csv").toURI val sourceDfasdl = DFASDL( "SOURCE-01", scala.io.Source .fromInputStream( getClass .getResourceAsStream("/com/wegtam/tensei/agent/processors/long-column-source.xml") ) .mkString ) val targetDfasdl = DFASDL( "TARGET", scala.io.Source .fromInputStream( getClass .getResourceAsStream("/com/wegtam/tensei/agent/processors/long-column-target.xml") ) .mkString ) val sourceElements = List( ElementReference(sourceDfasdl.id, "birthday"), ElementReference(sourceDfasdl.id, "notes") ) val targetElements = List( ElementReference(targetDfasdl.id, "birthday"), ElementReference(targetDfasdl.id, "notes") ) val mapping = MappingTransformation(sourceElements, targetElements) val recipe = new Recipe("COPY-COLUMNS", Recipe.MapOneToOne, List(mapping)) val cookbook = Cookbook("COOKBOOK", List(sourceDfasdl), Option(targetDfasdl), List(recipe)) val source = ConnectionInformation(sourceData, Option(DFASDLReference(cookbook.id, sourceDfasdl.id))) val targetData = File.createTempFile("ProcessorTest", "tmpData").toURI val target = ConnectionInformation(targetData, Option(DFASDLReference(cookbook.id, targetDfasdl.id))) val dataTree = TestActorRef( DataTreeDocument.props(sourceDfasdl, Option("ProcessorTest"), Set.empty[String]) ) val fileParser = TestActorRef(FileParser.props(source, cookbook, dataTree, Option("ProcessorTest"))) fileParser ! BaseParserMessages.SubParserInitialize expectMsg(BaseParserMessages.SubParserInitialized) fileParser ! BaseParserMessages.Start val response = expectMsgType[ParserStatusMessage] response.status should be(ParserStatus.COMPLETED) fileParser ! BaseParserMessages.Stop val processor = TestFSMRef(new Processor(Option("ProcessorTest"))) val msg = new AgentStartTransformationMessage(List(source), target, cookbook, Option("1-2-3-4")) val pmsg = StartProcessingMessage(msg, List(dataTree)) processor ! pmsg expectMsg(ProcessorMessages.Completed) val actualData = scala.io.Source.fromURI(targetData).mkString val expectedData = scala.io.Source .fromInputStream( getClass .getResourceAsStream("/com/wegtam/tensei/agent/processors/empty-expected-target.csv") ) .mkString actualData shouldEqual expectedData } } } }
Tensei-Data/tensei-agent
src/test/scala/com/wegtam/tensei/agent/ProcessorTest.scala
Scala
agpl-3.0
10,252
package org.dama.datasynth.common.generators.property.dummy import org.dama.datasynth.common.generators.property.PropertyGenerator /** * Created by aprat on 11/04/17. * * Dummy property generator that produces a Long */ class DummyLongPropertyGenerator( num : Long ) extends PropertyGenerator[Long] { override def run(id: Long, random: Long, dependencies: Any*) : Long = num }
joangui/DataSynth
src/main/scala/org/dama/datasynth/common/generators/property/dummy/DummyLongPropertyGenerator.scala
Scala
gpl-3.0
392
package fpinscala.laziness import Stream._ trait Stream[+A] { def foldRight[B](z: => B)(f: (A, => B) => B): B = // The arrow `=>` in front of the argument type `B` means that the function `f` takes its second argument by name and may choose not to evaluate it. this match { case Cons(h,t) => f(h(), t().foldRight(z)(f)) // If `f` doesn't evaluate its second argument, the recursion never occurs. case _ => z } def exists(p: A => Boolean): Boolean = foldRight(false)((a, b) => p(a) || b) // Here `b` is the unevaluated recursive step that folds the tail of the stream. If `p(a)` returns `true`, `b` will never be evaluated and the computation terminates early. @annotation.tailrec final def find(f: A => Boolean): Option[A] = this match { case Empty => None case Cons(h, t) => if (f(h())) Some(h()) else t().find(f) } def toList: List[A] = { @annotation.tailrec def rec(xs: List[A], st: Stream[A]) : List[A] = st match { case Cons(h, t) => rec(h() :: xs, t()) case _ => xs } rec(List(), this).reverse } def take(n: Int): Stream[A] = sys.error("todo") def takeViaUnfold(n: Int): Stream[A] = sys.error("todo") def drop(n: Int): Stream[A] = sys.error("todo") def takeWhile(p: A => Boolean): Stream[A] = sys.error("todo") def takeWhileViaUnfold(p: A => Boolean): Stream[A] = sys.error("todo") def forAll(p: A => Boolean): Boolean = sys.error("todo") def takeWhileViaFoldRight(p: A => Boolean): Stream[A] = sys.error("todo") def headOption: Option[A] = sys.error("todo") def map[B](f: A => B): Stream[B] = sys.error("todo") def mapViaUnfold[B](f: A => B): Stream[B] = sys.error("todo") def filter(p: A => Boolean): Stream[A] = sys.error("todo") def append[B>:A](other: Stream[B]): Stream[B] = sys.error("todo") def flatMap[B](f: A => Stream[B]): Stream[B] = sys.error("todo") def zipWith[B,C](s2: Stream[B])(f: (A,B) => C): Stream[C] = sys.error("todo") def zipAll[B](s2: Stream[B]): Stream[(Option[A], Option[B])] = sys.error("todo") def startsWith[B](s: Stream[B]): Boolean = sys.error("todo") def tails: Stream[Stream[A]] = sys.error("todo using unfold") def scanRight[B](s: B)(f: (A, B) => B): Stream[B] = sys.error("todo") } case object Empty extends Stream[Nothing] case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A] object Stream { def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = { lazy val head = hd lazy val tail = tl Cons(() => head, () => tail) } def empty[A]: Stream[A] = Empty def apply[A](as: A*): Stream[A] = if (as.isEmpty) empty else cons(as.head, apply(as.tail: _*)) val ones: Stream[Int] = Stream.cons(1, ones) def constant[A](a: A): Stream[A] = sys.error("todo") def from(n: Int): Stream[Int] = sys.error("todo") lazy val fibs: Stream[Int] = sys.error("todo") def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = sys.error("todo") lazy val fibsViaUnfold: Stream[Int] = sys.error("todo") def fromViaUnfold(n: Int): Stream[Int] = sys.error("todo") def constantViaUnfold[A](a: A): Stream[A] = sys.error("todo") lazy val onesViaUnfold: Stream[Int] = sys.error("todo") }
fpinscala-muc/fpinscala-mhofsche
exercises/src/main/scala/fpinscala/laziness/Stream.scala
Scala
mit
3,204
import java.io._ import java.net._ import java.security._ import java.util._ /** * Scala client for BaseX. * Works with BaseX 7.x (but not with BaseX 8.0 and later) * Does not support all bindings yet; your extensions are welcome. * * Documentation: http://docs.basex.org/wiki/Clients * * (C) BaseX Team 2005-12, BSD License */ /** * Session constructor. * @param host server name * @param port server port * @param usern user name * @param pw password * @throws IOException I/O exception */ class BaseXClient(host: String, port: Int, usern: String, pw: String) { var inf = "" val socket = new Socket socket.connect(new InetSocketAddress(host, port), 5000) val in = new BufferedInputStream(socket.getInputStream) val out = socket.getOutputStream val ts = receive send(usern) send(md5(md5(pw) + ts)) if(!ok) throw new IOException("Access denied.") /** * Executes a command and serializes the result to an output stream. * @param cmd command * @param os output stream * @throws IOException I/O Exception */ def execute(cmd: String, os: OutputStream) { send(cmd) receive(in, os) inf = receive if(!ok) throw new IOException(inf) } /** * Executes a command and returns the result. * @param cmd command * @return result * @throws IOException I/O Exception */ def execute(cmd: String) : String = { val os = new ByteArrayOutputStream execute(cmd, os) os.toString("UTF-8") } /** * Creates a query object. * @param query query string * @return query * @throws IOException I/O Exception */ def query(query: String) : Query = { new Query(query) } /** * Creates a database. * @param name name of database * @param input xml input * @throws IOException I/O exception */ def create(name: String, input: InputStream) { out.write(8) send(name) send(input) } /** * Adds a database. * @param path source path * @param input xml input * @throws IOException I/O exception */ def add(path: String, input: InputStream) { out.write(9) send(path) send(input) } /** * Replaces a resource. * @param path source path * @param input xml input * @throws IOException I/O exception */ def replace(path: String, input: InputStream) { out.write(12) send(path) send(input) } /** * Stores a binary resource. * @param path source path * @param input binary input * @throws IOException I/O exception */ def store(path: String, input: InputStream) { out.write(13) send(path) send(input) } /** * Returns command information. * @return string info */ def info() : String = { inf } /** * Closes the session. * @throws IOException I/O Exception */ def close() { send("exit") out.flush socket.close } /** * Sends an input stream to the server. * @param input xml input * @throws IOException I/O exception */ private def send(input: InputStream) { val is = new BufferedInputStream(input) val os = new BufferedOutputStream(out) var b = 0 while({ b = is.read; b != -1 }) os.write(b) os.write(0) os.flush inf = receive if(!ok) throw new IOException(inf) } /** * Checks the next success flag. * @return value of check * @throws IOException I/O Exception */ private def ok() : Boolean = { out.flush in.read == 0 } /** * Returns the next received string. * @return String result or info * @throws IOException I/O exception */ private def receive() : String = { val os = new ByteArrayOutputStream receive(in, os) os.toString("UTF-8") } /** * Sends a string to the server. * @param s string to be sent * @throws IOException I/O exception */ private def send(s: String) { out.write((s + '\\0').getBytes("UTF8")) } /** * Receives a string and writes it to the specified output stream. * @param bis input stream * @param o output stream * @throws IOException I/O exception */ private def receive(is: InputStream, os: OutputStream) { var b = 0 while({ b = is.read; b != 0 && b != -1 }) os.write(b) } /** * Returns an MD5 hash. * @param pw String * @return String */ private def md5(pw: String) : String = { val sb = new StringBuilder try { val md = MessageDigest.getInstance("MD5") md.update(pw.getBytes) for(b <- md.digest) { val s = Integer.toHexString(b & 0xFF) if(s.length == 1) sb.append('0') sb.append(s) } } catch { case ex: NoSuchAlgorithmException => ex.printStackTrace case ex : Exception => throw ex } sb.toString } /** * Query constructor. * @param query query string * @throws IOException I/O exception */ class Query(query: String) { val id = exec(0, query) /** * Binds a variable. * @param name name of variable * @param value value * @throws IOException I/O exception */ def bind(name: String, value: String) { bind(name, value, "") } /** * Binds a variable with a specific data type. * @param name name of variable * @param value value * @param type data type * @throws IOException I/O exception */ def bind(name: String, value: String, type: String) { exec(3, id + '\\0' + name + '\\0' + value + '\\0') } /** * Binds the context item. * @param value value * @throws IOException I/O exception */ def context(value: String) { context(value, "") } /** * Binds the context item with a specific data type. * @param value value * @param type data type * @throws IOException I/O exception */ def context(value: String, type: String) { exec(14, id + '\\0' + value + '\\0') } /** * Returns the whole result of the query. * @return query result * @throws IOException I/O Exception */ def execute() : String = { exec(5, id) } /** * Returns query info as a string, regardless of whether an output stream * was specified. * @return query info * @throws IOException I/O exception */ def info() : String = { exec(6, id) } /** * Closes the query. * @return result footer * @throws IOException I/O exception */ def close() { exec(2, id) } /** * Executes the specified command. * @param cmd command * @param arg argument * @return resulting string * @throws IOException I/O exception */ private def exec(cmd: Int, arg: String) : String = { out.write(cmd) send(arg) val s = receive if(!ok) throw new IOException(receive) s } } }
drmacro/basex
basex-api/src/main/scala/basexclient.scala
Scala
bsd-3-clause
7,149
package com.twitter.finagle.util import java.net.InetSocketAddress object InetSocketAddressUtil { /** * Parses a comma or space-delimited string of hostname and port pairs. For example, * * InetSocketAddressUtil.parseHosts("127.0.0.1:11211") => Seq(new InetSocketAddress("127.0.0.1", 11211)) * * @param hosts a comma or space-delimited string of hostname and port pairs. * @throws IllegalArgumentException if host and port are not both present * */ def parseHosts(hosts: String): Seq[InetSocketAddress] = { val hostPorts = hosts split Array(' ', ',') filter (!_.isEmpty) map (_.split(":")) hostPorts map { hp => require(hp.size == 2, "You must specify host and port") new InetSocketAddress(hp(0), hp(1).toInt) } toList } }
enachb/finagle_2.9_durgh
finagle-core/src/main/scala/com/twitter/finagle/util/InetSocketAddressUtil.scala
Scala
apache-2.0
785
/* * Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, and * you may not use this file except in compliance with the Apache License * Version 2.0. You may obtain a copy of the Apache License Version 2.0 at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the Apache License Version 2.0 is distributed on an "AS * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the Apache License Version 2.0 for the specific language * governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow package collectors package scalastream // Akka and Spray import akka.actor.{ActorSystem, Props} import akka.io.IO import spray.can.Http // Java import java.io.File // Argot import org.clapper.argot._ // Config import com.typesafe.config.{Config, ConfigException, ConfigFactory} // Logging import org.slf4j.LoggerFactory // Snowplow import com.snowplowanalytics.snowplow.collectors.scalastream.sinks._ // Main entry point of the Scala collector. object ScalaCollector extends App with CamelSslConfiguration{ lazy val log = LoggerFactory.getLogger(getClass()) // Argument specifications val parser = new ArgotParser( programName = generated.Settings.name, compactUsage = true, preUsage = Some("%s: Version %s. Copyright (c) 2015, %s.".format( generated.Settings.name, generated.Settings.version, generated.Settings.organization) ) ) // Mandatory config argument val config = parser.option[Config](List("config"), "filename", "Configuration file.") { (c, opt) => val file = new File(c) if (file.exists) { ConfigFactory.parseFile(file) } else { parser.usage("Configuration file \\"%s\\" does not exist".format(c)) ConfigFactory.empty() } } parser.parse(args) /** Load configuration file*/ val rawConf = config.value.getOrElse(throw new RuntimeException("--config option must be provided")) val collectorConfig = new CollectorConfig(rawConf) implicit val system = ActorSystem.create("scala-stream-collector", rawConf) /** If the Sink is set to Kinesis, we MUST provide 3 kinesis streams * for goodRecords,badRecords and mapingID*/ val sinks = collectorConfig.sinkEnabled match { case Sink.Kinesis => { val good = KinesisSink.createAndInitialize(collectorConfig, InputType.Good) val bad = KinesisSink.createAndInitialize(collectorConfig, InputType.Bad) val map = KinesisSink.createAndInitialize(collectorConfig, InputType.Map) CollectorSinks(good, bad,map) } case Sink.Stdout => { val good = new StdoutSink(InputType.Good) val bad = new StdoutSink(InputType.Bad) val map = new StdoutSink(InputType.Map) CollectorSinks(good, bad,map) } } // The handler actor replies to incoming HttpRequests. val handler = system.actorOf( Props(classOf[CollectorServiceActor], collectorConfig, sinks), name = "handler" ) IO(Http) ! Http.Bind(handler, interface=collectorConfig.interface, port=collectorConfig.port) } // Return Options from the configuration. object Helper { implicit class RichConfig(val underlying: Config) extends AnyVal { def getOptionalString(path: String): Option[String] = try { Some(underlying.getString(path)) } catch { case e: ConfigException.Missing => None } } } // Instead of comparing strings and validating every time // the sink is accessed, validate the string here and // store this enumeration. object Sink extends Enumeration { type Sink = Value val Kinesis, Stdout, Test = Value } // Rigidly load the configuration file here to error when // the collector process starts rather than later. class CollectorConfig(config: Config) { import Helper.RichConfig private val collector = config.getConfig("collector") val interface = collector.getString("interface") val port = collector.getInt("port") val production = collector.getBoolean("production") private val p3p = collector.getConfig("p3p") val p3pPolicyRef = p3p.getString("policyref") val p3pCP = p3p.getString("CP") private val cookie = collector.getConfig("cookie") val cookieExpiration = cookie.getMilliseconds("expiration") val cookieEnabled = cookieExpiration != 0 val cookieDomain = cookie.getOptionalString("domain") private val sink = collector.getConfig("sink") // TODO: either change this to ADTs or switch to withName generation val sinkEnabled = sink.getString("enabled") match { case "kinesis" => Sink.Kinesis case "stdout" => Sink.Stdout case "test" => Sink.Test case _ => throw new RuntimeException("collector.sink.enabled unknown.") } private val kinesis = sink.getConfig("kinesis") private val aws = kinesis.getConfig("aws") val awsAccessKey = aws.getString("access-key") val awsSecretKey = aws.getString("secret-key") private val stream = kinesis.getConfig("stream") val streamGoodName = stream.getString("good") val streamBadName = stream.getString("bad") val streamMapName = stream.getString("map") private val streamRegion = stream.getString("region") val streamEndpoint = s"https://kinesis.${streamRegion}.amazonaws.com" val threadpoolSize = kinesis.hasPath("thread-pool-size") match { case true => kinesis.getInt("thread-pool-size") case _ => 10 } val buffer = kinesis.getConfig("buffer") val byteLimit = buffer.getInt("byte-limit") val recordLimit = buffer.getInt("record-limit") val timeLimit = buffer.getInt("time-limit") val backoffPolicy = kinesis.getConfig("backoffPolicy") val minBackoff = backoffPolicy.getLong("minBackoff") val maxBackoff = backoffPolicy.getLong("maxBackoff") //parametrage supplementaire pour activer/désactiver le redirect /** ################################# * REDIRECT CONF *#################################*/ val redirect = collector.getConfig("redirect") val allowRedirect = redirect.getBoolean("allow-redirect") val pathToRedirect= allowRedirect match{ case (true) => redirect.getString("path-to-redirect") case (false) => "" } val redirectId= allowRedirect match{ case true => redirect.getString("redirect-id") case false => None } /** ################################# * SSL + CERTIFICATE CONF *#################################*/ val sprayCanConf = config.getConfig("spray.can.server") val sslEncryption = sprayCanConf.getString("ssl-encryption") val certificate= collector.getConfig("certificate") val pathToCertificate= sslEncryption match{ case "on" => certificate.getString("path-to-certificate") case "off" => "" } val keystorePassword = sslEncryption match{ case "on" => certificate.getString("keystore-password") case "off" => ""} val filePassword = sslEncryption match{ case "on" => certificate.getString("file-password") case "off" => ""} }
ClaraVista-IT/snowplow
2-collectors/scala-stream-collector/src/main/scala/com.snowplowanalytics.snowplow.collectors/scalastream/ScalaCollectorApp.scala
Scala
apache-2.0
7,091
/* * Copyright 2009-2010 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ccf.tree.indexing case class TreeIndex(val indexPath: Int*) extends Indexable { def shift(count: Int, other: Indexable) = other match { case other: TreeIndex if (other.level > level) => TreeIndex(indexPath: _*) case other: TreeIndex if (other.level == 0) => TreeIndex(indexPath: _*) case other: TreeIndex => { val shiftLevel = other.indexPath.size val shiftedPath = indexPath.toList.take(shiftLevel - 1) ++ List(indexPath(shiftLevel-1) + count) ++ indexPath.toList.drop(shiftLevel) TreeIndex(shiftedPath: _*) } case _ => TreeIndex(indexPath: _*) } def <(other: Indexable) = matchSupported(other) { case UndefinedIndex() => false case other: TreeIndex if (level < other.level) => this < other.parent.shift(1) case other: TreeIndex if (level == other.level) => indexInLevel < other.indexInLevel case other: TreeIndex if (level > other.level) => parent.shift(1) < other } def ==(other: Indexable) = matchSupported(other) { case UndefinedIndex() => false case other: TreeIndex => indexPath.toList == other.indexPath.toList // comparison needs to be done as lists } def isAffectedBy(other: Indexable) = matchSupported(other) { case other: TreeIndex if (other.level < level) => true case other: TreeIndex if (other.level == level) => parent == other.parent // if parents match case other: TreeIndex if (other.level > level) => false case UndefinedIndex() => false } def isDescendantOf(other: Indexable) = matchSupported(other) { case UndefinedIndex() => false case other: TreeIndex if (other.level >= level) =>false case other: TreeIndex if (other.level == level-1) => other == parent case other: TreeIndex if (other.level < level) => parent.isDescendantOf(other) } protected def matchSupported(other: Indexable)(pf: PartialFunction[Indexable, Boolean]) = { if (pf isDefinedAt other) pf(other) else throw new UnsupportedOperationException("Unsupported index combination with "+other) } def parent = if (level <= 1) TreeIndex() else TreeIndex(indexPath.toList.dropRight(1) : _*) def append(index: Int) = TreeIndex((indexPath.toList ::: List(index)): _*) def append(backlogTreeIndex: TreeIndex) = TreeIndex((indexPath.toList ::: backlogTreeIndex.indexPath.toList): _*) def level = indexPath.size def indexInLevel = if (level == 0) 0 else indexPath.last def increment(count: Int) = parent.append(indexInLevel + count) def decrement(count: Int) = parent.append(indexInLevel - count) def encode: List[Int] = indexPath.toList }
akisaarinen/ccf
ccf/src/main/scala/ccf/tree/indexing/TreeIndex.scala
Scala
apache-2.0
3,215
/* * Accio is a platform to launch computer science experiments. * Copyright (C) 2016-2018 Vincent Primault <[email protected]> * * Accio is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Accio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Accio. If not, see <http://www.gnu.org/licenses/>. */ package fr.cnrs.liris.infra.thriftserver import java.util.UUID import com.google.inject.{Inject, Singleton} import com.twitter.finagle.Service import com.twitter.finagle.thrift.ClientId import com.twitter.finatra.thrift.{ThriftFilter, ThriftRequest} import com.twitter.util.Future @Singleton final class AuthFilter @Inject()(chain: AuthChain) extends ThriftFilter { override def apply[T, Rep](request: ThriftRequest[T], service: Service[ThriftRequest[T], Rep]): Future[Rep] = { if (request.clientId.contains(AuthFilter.MasterClientId)) { UserInfo.let(AuthFilter.MasterUserInfo)(service(request)) } else { val credentials = request.clientId.map(_.name) chain.authenticate(credentials).flatMap { case None => Future.exception(ServerException.Unauthenticated) case Some(userInfo) => UserInfo.let(userInfo)(service(request)) } } } } object AuthFilter { val MasterClientId = ClientId("master:" + UUID.randomUUID().toString.replace("-", "")) val MasterUserInfo = UserInfo("system:master", None, Set("system:master", "system:authenticated")) }
privamov/accio
accio/java/fr/cnrs/liris/infra/thriftserver/AuthFilter.scala
Scala
gpl-3.0
1,866
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package toolkit.neuralnetwork.function import libcog._ import toolkit.neuralnetwork.{ComputeTests, DifferentiableField, UnitSpec} /** Tests the self-consistency of the forward/jacobian/jacobianAdjoint functions of the add-constant operator. * * @author Dick Carter */ class AddConstantSpec extends UnitSpec with ComputeTests { val inputLens = Seq(31) val batchSizes = Seq(7) def closure(c: Float) = { (s: Seq[DifferentiableField]) => s.head + c } "The add-by-constant operator" should "support positive constants" in { val inputShapes = Seq(Shape(24, 22)) def fn = closure(22f/7) jacobian(fn, inputShapes, inputLens, batchSizes) jacobianAdjoint(fn, inputShapes, inputLens, batchSizes) } it should "support negative constants" in { val inputShapes = Seq(Shape(24, 22)) def fn = closure(-12.345f) jacobian(fn, inputShapes, inputLens, batchSizes) jacobianAdjoint(fn, inputShapes, inputLens, batchSizes) } it should "support zero constants" in { val inputShapes = Seq(Shape(24, 22)) def fn = closure(0f) jacobian(fn, inputShapes, inputLens, batchSizes) jacobianAdjoint(fn, inputShapes, inputLens, batchSizes) } it should "support infix notation" in { val inputShapes = Seq(Shape(24, 22)) def fn(s: Seq[DifferentiableField]) = s.head + 2.345f jacobian(fn, inputShapes, inputLens, batchSizes) jacobianAdjoint(fn, inputShapes, inputLens, batchSizes) } }
hpe-cct/cct-nn
src/test/scala/toolkit/neuralnetwork/function/AddConstantSpec.scala
Scala
apache-2.0
2,082
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.classification import org.apache.hadoop.fs.Path import org.json4s.{DefaultFormats, JObject} import org.json4s.JsonDSL._ import org.apache.spark.annotation.{Experimental, Since} import org.apache.spark.ml.param.ParamMap import org.apache.spark.ml.tree._ import org.apache.spark.ml.tree.DecisionTreeModelReadWrite._ import org.apache.spark.ml.tree.impl.RandomForest import org.apache.spark.ml.util._ import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors} import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, Strategy => OldStrategy} import org.apache.spark.mllib.tree.model.{DecisionTreeModel => OldDecisionTreeModel} import org.apache.spark.rdd.RDD import org.apache.spark.sql.Dataset /** * :: Experimental :: * [[http://en.wikipedia.org/wiki/Decision_tree_learning Decision tree]] learning algorithm * for classification. * It supports both binary and multiclass labels, as well as both continuous and categorical * features. */ @Since("1.4.0") @Experimental class DecisionTreeClassifier @Since("1.4.0") ( @Since("1.4.0") override val uid: String) extends ProbabilisticClassifier[Vector, DecisionTreeClassifier, DecisionTreeClassificationModel] with DecisionTreeClassifierParams with DefaultParamsWritable { @Since("1.4.0") def this() = this(Identifiable.randomUID("dtc")) // Override parameter setters from parent trait for Java API compatibility. @Since("1.4.0") override def setMaxDepth(value: Int): this.type = super.setMaxDepth(value) @Since("1.4.0") override def setMaxBins(value: Int): this.type = super.setMaxBins(value) @Since("1.4.0") override def setMinInstancesPerNode(value: Int): this.type = super.setMinInstancesPerNode(value) @Since("1.4.0") override def setMinInfoGain(value: Double): this.type = super.setMinInfoGain(value) @Since("1.4.0") override def setMaxMemoryInMB(value: Int): this.type = super.setMaxMemoryInMB(value) @Since("1.4.0") override def setCacheNodeIds(value: Boolean): this.type = super.setCacheNodeIds(value) @Since("1.4.0") override def setCheckpointInterval(value: Int): this.type = super.setCheckpointInterval(value) @Since("1.4.0") override def setImpurity(value: String): this.type = super.setImpurity(value) @Since("1.6.0") override def setSeed(value: Long): this.type = super.setSeed(value) override protected def train(dataset: Dataset[_]): DecisionTreeClassificationModel = { val categoricalFeatures: Map[Int, Int] = MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol))) val numClasses: Int = getNumClasses(dataset) val oldDataset: RDD[LabeledPoint] = extractLabeledPoints(dataset, numClasses) val strategy = getOldStrategy(categoricalFeatures, numClasses) val instr = Instrumentation.create(this, oldDataset) instr.logParams(params: _*) val trees = RandomForest.run(oldDataset, strategy, numTrees = 1, featureSubsetStrategy = "all", seed = $(seed), instr = Some(instr), parentUID = Some(uid)) val m = trees.head.asInstanceOf[DecisionTreeClassificationModel] instr.logSuccess(m) m } /** (private[ml]) Train a decision tree on an RDD */ private[ml] def train(data: RDD[LabeledPoint], oldStrategy: OldStrategy): DecisionTreeClassificationModel = { val instr = Instrumentation.create(this, data) instr.logParams(params: _*) val trees = RandomForest.run(data, oldStrategy, numTrees = 1, featureSubsetStrategy = "all", seed = 0L, instr = Some(instr), parentUID = Some(uid)) val m = trees.head.asInstanceOf[DecisionTreeClassificationModel] instr.logSuccess(m) m } /** (private[ml]) Create a Strategy instance to use with the old API. */ private[ml] def getOldStrategy( categoricalFeatures: Map[Int, Int], numClasses: Int): OldStrategy = { super.getOldStrategy(categoricalFeatures, numClasses, OldAlgo.Classification, getOldImpurity, subsamplingRate = 1.0) } @Since("1.4.1") override def copy(extra: ParamMap): DecisionTreeClassifier = defaultCopy(extra) } @Since("1.4.0") @Experimental object DecisionTreeClassifier extends DefaultParamsReadable[DecisionTreeClassifier] { /** Accessor for supported impurities: entropy, gini */ @Since("1.4.0") final val supportedImpurities: Array[String] = TreeClassifierParams.supportedImpurities @Since("2.0.0") override def load(path: String): DecisionTreeClassifier = super.load(path) } /** * :: Experimental :: * [[http://en.wikipedia.org/wiki/Decision_tree_learning Decision tree]] model for classification. * It supports both binary and multiclass labels, as well as both continuous and categorical * features. */ @Since("1.4.0") @Experimental class DecisionTreeClassificationModel private[ml] ( @Since("1.4.0")override val uid: String, @Since("1.4.0")override val rootNode: Node, @Since("1.6.0")override val numFeatures: Int, @Since("1.5.0")override val numClasses: Int) extends ProbabilisticClassificationModel[Vector, DecisionTreeClassificationModel] with DecisionTreeModel with DecisionTreeClassifierParams with MLWritable with Serializable { require(rootNode != null, "DecisionTreeClassificationModel given null rootNode, but it requires a non-null rootNode.") /** * Construct a decision tree classification model. * @param rootNode Root node of tree, with other nodes attached. */ private[ml] def this(rootNode: Node, numFeatures: Int, numClasses: Int) = this(Identifiable.randomUID("dtc"), rootNode, numFeatures, numClasses) override protected def predict(features: Vector): Double = { rootNode.predictImpl(features).prediction } override protected def predictRaw(features: Vector): Vector = { Vectors.dense(rootNode.predictImpl(features).impurityStats.stats.clone()) } override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = { rawPrediction match { case dv: DenseVector => ProbabilisticClassificationModel.normalizeToProbabilitiesInPlace(dv) dv case sv: SparseVector => throw new RuntimeException("Unexpected error in DecisionTreeClassificationModel:" + " raw2probabilityInPlace encountered SparseVector") } } @Since("1.4.0") override def copy(extra: ParamMap): DecisionTreeClassificationModel = { copyValues(new DecisionTreeClassificationModel(uid, rootNode, numFeatures, numClasses), extra) .setParent(parent) } @Since("1.4.0") override def toString: String = { s"DecisionTreeClassificationModel (uid=$uid) of depth $depth with $numNodes nodes" } /** * Estimate of the importance of each feature. * * This generalizes the idea of "Gini" importance to other losses, * following the explanation of Gini importance from "Random Forests" documentation * by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn. * * This feature importance is calculated as follows: * - importance(feature j) = sum (over nodes which split on feature j) of the gain, * where gain is scaled by the number of instances passing through node * - Normalize importances for tree to sum to 1. * * Note: Feature importance for single decision trees can have high variance due to * correlated predictor variables. Consider using a [[RandomForestClassifier]] * to determine feature importance instead. */ @Since("2.0.0") lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(this, numFeatures) /** Convert to spark.mllib DecisionTreeModel (losing some information) */ override private[spark] def toOld: OldDecisionTreeModel = { new OldDecisionTreeModel(rootNode.toOld(1), OldAlgo.Classification) } @Since("2.0.0") override def write: MLWriter = new DecisionTreeClassificationModel.DecisionTreeClassificationModelWriter(this) } @Since("2.0.0") object DecisionTreeClassificationModel extends MLReadable[DecisionTreeClassificationModel] { @Since("2.0.0") override def read: MLReader[DecisionTreeClassificationModel] = new DecisionTreeClassificationModelReader @Since("2.0.0") override def load(path: String): DecisionTreeClassificationModel = super.load(path) private[DecisionTreeClassificationModel] class DecisionTreeClassificationModelWriter(instance: DecisionTreeClassificationModel) extends MLWriter { override protected def saveImpl(path: String): Unit = { val extraMetadata: JObject = Map( "numFeatures" -> instance.numFeatures, "numClasses" -> instance.numClasses) DefaultParamsWriter.saveMetadata(instance, path, sc, Some(extraMetadata)) val (nodeData, _) = NodeData.build(instance.rootNode, 0) val dataPath = new Path(path, "data").toString sqlContext.createDataFrame(nodeData).write.parquet(dataPath) } } private class DecisionTreeClassificationModelReader extends MLReader[DecisionTreeClassificationModel] { /** Checked against metadata when loading model */ private val className = classOf[DecisionTreeClassificationModel].getName override def load(path: String): DecisionTreeClassificationModel = { implicit val format = DefaultFormats val metadata = DefaultParamsReader.loadMetadata(path, sc, className) val numFeatures = (metadata.metadata \\ "numFeatures").extract[Int] val numClasses = (metadata.metadata \\ "numClasses").extract[Int] val root = loadTreeNodes(path, metadata, sqlContext) val model = new DecisionTreeClassificationModel(metadata.uid, root, numFeatures, numClasses) DefaultParamsReader.getAndSetParams(model, metadata) model } } /** Convert a model from the old API */ private[ml] def fromOld( oldModel: OldDecisionTreeModel, parent: DecisionTreeClassifier, categoricalFeatures: Map[Int, Int], numFeatures: Int = -1): DecisionTreeClassificationModel = { require(oldModel.algo == OldAlgo.Classification, s"Cannot convert non-classification DecisionTreeModel (old API) to" + s" DecisionTreeClassificationModel (new API). Algo is: ${oldModel.algo}") val rootNode = Node.fromOld(oldModel.topNode, categoricalFeatures) val uid = if (parent != null) parent.uid else Identifiable.randomUID("dtc") // Can't infer number of features from old model, so default to -1 new DecisionTreeClassificationModel(uid, rootNode, numFeatures, -1) } }
xieguobin/Spark_2.0.0_cn1
ml/classification/DecisionTreeClassifier.scala
Scala
apache-2.0
11,324
/* * This file is part of the "silex" library of helpers for Apache Spark. * * Copyright (c) 2016 Red Hat, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package com.redhat.et.silex.rdd.lineage import org.scalatest._ import com.redhat.et.silex.testing.PerTestSparkContext class LineageRDDFunctionsSpec extends FlatSpec with Matchers with PerTestSparkContext { import com.redhat.et.silex.testing.matchers._ import com.redhat.et.silex.rdd.lineage.implicits._ it should "iterate correctly over lineage" in { val rdd1 = context.parallelize(1 to 10) val rdd2 = context.parallelize(1 to 10) val rdd3 = rdd1 ++ rdd2 val rdd4 = rdd1 ++ rdd3 rdd1.name = "rdd1" rdd2.name = "rdd2" rdd3.name = "rdd3" rdd4.name = "rdd4" val lineage = rdd4.lineage.toVector lineage.map(_.rdd.name) should beEqSeq(Seq("rdd1", "rdd3", "rdd1", "rdd2")) lineage.map(_.depth) should beEqSeq(Seq(1, 1, 2, 2)) lineage.map(_.successor.name) should beEqSeq(Seq("rdd4", "rdd4", "rdd3", "rdd3")) } }
willb/silex
src/test/scala/com/redhat/et/silex/rdd/lineage.scala
Scala
apache-2.0
1,545
package models.plan import scalaz._ import Scalaz._ import models._ case class KnittingState( bedNeedles: Map[Bed, Map[Needle, NeedleState]], carriageState: CarriageStates, output: Knitted, output3D: Knitted3D, yarnAttachments: Map[YarnPiece, YarnAttachment]) { def nextDirection(carriage: Carriage) = carriageState(carriage).position match { case CarriageRemoved => "Cannot find direction for removed carriage".fail case pos => val working = this.workingNeedles.toSet val overlappedWorking = carriage.over(pos).filter(working.contains) if (overlappedWorking.nonEmpty) s"carriage still over working needeles ${overlappedWorking.mkString(",")}".fail else pos.directionTo(workingNeedles.headOption.getOrElse(Needle.middle)).success } def modifyCarriage(state: CarriageState) = copy(carriageState = carriageState + state) def moveCarriage(carriage: Carriage, in: Direction): KnittingState = moveCarriage(carriage, if (in == ToLeft) CarriageLeft(0) else CarriageRight(0)) def moveCarriage(carriage: Carriage, to: LeftRight): KnittingState = moveCarriage(carriage, if (to == Left) CarriageLeft(0) else CarriageRight(0)) def moveCarriage(carriage: Carriage, to: CarriagePosition) = modifyCarriage(carriage match { case KCarriage => carriageState(KCarriage).copy(position = to) case LCarriage => carriageState(LCarriage).copy(position = to) case GCarriage => carriageState(GCarriage).copy(position = to) }) def needles(bed: Bed): NeedleStateRow = bedNeedles.getOrElse(bed, _ => NeedleState(NeedleA)) def workingNeedles: Seq[Needle] = { Beds.all.map(workingNeedles). foldLeft(Set.empty[Needle])(_ ++ _). toSeq.sorted } def workingNeedles(bed: Bed): Seq[Needle] = Needle.all.filter(n => needles(bed)(n).position.isWorking) def moveNeedles(bed: Bed, positions: Needle => NeedlePosition) = { def value(n: Needle) = n -> needles(bed)(n).copy(position = positions(n)) modifyNeedles(bed, Needle.all.map(value).toMap) } def modifyNeedles(bed: Bed, newNeedles: NeedleStateRow) = copy(bedNeedles = bedNeedles + (bed -> newNeedles.toMap)) def knit(main: Needle => Stitch, double: Needle => Stitch) = copy(output = output +(main, double)) def pushRow(forNeedles: Needle => Boolean) = { val changed = yarnAttachments.collect { case (yarn, ya) if forNeedles(ya.needle) => (yarn, ya.copy(rowDistance = ya.rowDistance + 1)) } copy(yarnAttachments = yarnAttachments ++ changed, output3D = output3D.pushDown) } def attachYarn(ya: YarnAttachment) = copy(yarnAttachments = yarnAttachments + (ya.yarn.start -> ya)) def detachYarn(yarn: YarnPiece) = copy(yarnAttachments = yarnAttachments - yarn) def knit2(f: Knitted3D => Knitted3D) = copy(output3D = f(output3D)) def sameOutput(other: KnittingState) = output == other.output def sameState(other: KnittingState) = bedNeedles == other.bedNeedles && carriageState == other.carriageState } object KnittingState { val initial = KnittingState(Map.empty, CarriageStates.empty, Knitted.empty, Knitted3D.empty, Map.empty) private def allNeedlesA(n: Needle) = NeedleState(NeedleA) } sealed trait CarriageStates { protected val data: Map[Carriage, CarriageState] def +(state: CarriageState) = { val data2 = data + (carriageFor(state) -> state) new CarriageStates { override val data = data2 } } def apply(carriage: Carriage) = { data.get(carriage).getOrElse(carriage.initialState). asInstanceOf[carriage.State] } private def carriageFor(state: CarriageState) = state match { case _: KCarriage.State => KCarriage case _: LCarriage.State => LCarriage case _: GCarriage.State => GCarriage } override def hashCode = data.hashCode override def equals(o: Any) = o match { case o: CarriageStates => data == o.data case _ => false } override def toString = data.toString } object CarriageStates { def empty = new CarriageStates { override val data = Map.empty[Carriage, CarriageState] } } case class YarnAttachment(yarn: YarnFlow, needle: Needle, bed: Bed, rowDistance: Int = 0)
knittery/knittery-ui
app/models/plan/KnittingState.scala
Scala
gpl-2.0
4,146
package pl.gosub.akka.online import akka.NotUsed import akka.actor.{Actor, ActorRef, ActorSystem, PoisonPill, Props} import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpMethods, HttpRequest, HttpResponse, Uri} import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source, SourceQueueWithComplete} import akka.stream._ import scala.concurrent.Future import akka.pattern.pipe import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} import com.abahgat.suffixtree.GeneralizedSuffixTree import com.google.common.hash.{BloomFilter, Funnels} import org.apache.spark.streamdm.classifiers.trees.HoeffdingTree import org.apache.spark.streamdm.core.specification._ import org.apache.spark.streamdm.core.{Example, ExampleParser, Instance, TextInstance} import scala.concurrent.Await import scala.concurrent.duration.Duration import scala.util.Random object Main { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import scala.concurrent.ExecutionContext.Implicits.global def main(args: Array[String]): Unit = { println("Hello from main") val reqResponseFlow = Flow[HttpRequest].map[HttpResponse] (_ match { case HttpRequest(HttpMethods.GET, Uri.Path("/"), _, _, _) => HttpResponse(200, entity = "Hello!") case _ => HttpResponse(200, entity = "Ooops, not found") }) Http().bindAndHandle(reqResponseFlow, "localhost", 8888) // system.scheduler.schedule(Duration(100, "millisecond"), Duration(50, "millisecond"), myActor, "KABOOM !!!") // // val stdoutSink = new StdoutSink // // val done = // Source // .repeat("Hello") // .zip(Source.fromIterator(() => Iterator.from(0))) // .take(7) // .mapConcat{ // case (s, n) => // val i = " " * n // f"$i$s%n" // } // .throttle(42, Duration(1500, "millisecond"), 1, ThrottleMode.Shaping) // .runWith(Sink.actorRefWithAck(myActor, "test", "ack", "end", _ => "fail")) //done.onComplete(_ => system.terminate()) val crossStage = new BloomFilterCrossStage // Source.repeat(1).take(100).map(_ => Random.nextInt(1100) - 1000).via(kadaneStage).runWith(Sink.foreach(println)) // val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder: GraphDSL.Builder[NotUsed] => // import GraphDSL.Implicits._ // val inData = Source.repeat(1).take(1000).map(_ => Random.nextInt(1000)).throttle(1, Duration(100, "millisecond"), 1, ThrottleMode.shaping) // val outData = Sink.foreach(println) // val inControl = Source.repeat(1).take(100).map(_ => Random.nextInt(2000) - 1000).throttle(1, Duration(1500, "millisecond"), 1, ThrottleMode.shaping) // val outControl = Sink.foreach(println) // // val cross = builder.add(crossStage) // // inData ~> cross.in1; cross.out1 ~> outData // inControl ~> cross.in2; cross.out2 ~> outControl // ClosedShape // }).run() println("Now trying the Hoeffding Tree") println("arffStuff") val specParser = new SpecificationParser val exampleSpec = specParser.fromArff("/home/janek/Downloads/arff/elecNormNew.arff") val example1 = ExampleParser.fromArff("0,2,0.085106,0.042482,0.251116,0.003467,0.422915,0.414912,DOWN", exampleSpec) val example2 = ExampleParser.fromArff("0,2,0.255319,0.051489,0.298721,0.003467,0.422915,0.414912,UP", exampleSpec) println("example Arff " + example1.in.toString + " / " + example1.out.toString) println("example Arff2 " + example2.in.toString + " / " + example2.out.toString) println("Spec " + exampleSpec.in.size() + " " + exampleSpec.out.size() + " " + exampleSpec.out.isNominal(0)) println("after arff") val hTree = new HoeffdingTree hTree.init(exampleSpec) hTree.trainIncremental(example1) println(hTree.predictSingle(example1)._2) hTree.trainIncremental(example2) println(hTree.predictSingle(example2)._2) println("now suffix tree") val suffixTree = new GeneralizedSuffixTree() suffixTree.put("cacao", 0) println("Searching: " + suffixTree.search("cac")) println("Searching: " + suffixTree.search("caco")) Await.ready(system.whenTerminated, Duration.Inf) } }
gosubpl/akka-online
src/main/scala/pl/gosub/akka/online/Main.scala
Scala
apache-2.0
4,301
package org.example import scalax.chart.api._ import org.specs2._ class ToolTipGeneratorSpec extends Specification { def is = s2""" Tool Tip Generator Specification Category Tool Tip Generator Creation from ordinary function $cat1 using companion to avoid typing $cat2 using companion value to String function $cat3 using companion default $cat4 Tool Tip Generation $catg Pie Tool Tip Generator Creation from ordinary function $pie1 using companion to avoid typing $pie2 using companion value to String function $pie3 using companion default $pie4 Tool Tip Generation $pieg XY Tool Tip Generator Creation from ordinary function $xy1 using companion to avoid typing $xy2 using companion value to String function $xy3 using companion (x,y) to String function $xy4 using companion default $xy5 Tool Tip Generation $xyg """ // ----------------------------------------------------------------------------------------------- // tests // ----------------------------------------------------------------------------------------------- def cat1 = { val chart = catchart chart.tooltipGenerator = (dataset: CategoryDataset, series: Comparable[_], category: Comparable[_]) => { dataset.getValue(series,category).toString } chart.tooltipGenerator must beSome } def cat2 = { val chart = catchart chart.tooltipGenerator = CategoryToolTipGenerator(_.getValue(_,_).toString) chart.tooltipGenerator must beSome } def cat3 = { val chart = catchart chart.tooltipGenerator = CategoryToolTipGenerator(_.toString) chart.tooltipGenerator must beSome } def cat4 = { val chart = catchart chart.tooltipGenerator = CategoryToolTipGenerator.Default chart.tooltipGenerator must beSome } def catg = { val chart = catchart val dataset = chart.plot.getDataset chart.tooltipGenerator = CategoryToolTipGenerator.Default chart.tooltipGenerator.map(_.apply(dataset, "Series 1", "3")) must beSome("3.0") } def pie1 = { val chart = piechart chart.tooltipGenerator = (dataset: PieDataset, key: Comparable[_]) => { dataset.getValue(key).toString } chart.tooltipGenerator must beSome } def pie2 = { val chart = piechart chart.tooltipGenerator = PieToolTipGenerator(_.getValue(_).toString) chart.tooltipGenerator must beSome } def pie3 = { val chart = piechart chart.tooltipGenerator = PieToolTipGenerator(_.toString) chart.tooltipGenerator must beSome } def pie4 = { val chart = piechart chart.tooltipGenerator = PieToolTipGenerator.Default chart.tooltipGenerator must beSome } def pieg = { val chart = piechart val dataset = chart.plot.getDataset chart.tooltipGenerator = PieToolTipGenerator.Default chart.tooltipGenerator.map(_.apply(dataset, "4")) must beSome("4.0") } def xy1 = { val chart = xychart chart.tooltipGenerator = (dataset: XYDataset, series: Comparable[_], item: Int) => { val idx = dataset.indexOf(series) dataset.getY(idx, item).toString } chart.tooltipGenerator must beSome } def xy2 = { val chart = xychart chart.tooltipGenerator = XYToolTipGenerator { (dataset, series, item) => val idx = dataset.indexOf(series) dataset.getY(idx, item).toString } chart.tooltipGenerator must beSome } def xy3 = { val chart = xychart chart.tooltipGenerator = XYToolTipGenerator(_.toString) chart.tooltipGenerator must beSome } def xy4 = { val chart = xychart chart.tooltipGenerator = XYToolTipGenerator((x,y) => s"""($x,$y)""") chart.tooltipGenerator must beSome } def xy5 = { val chart = xychart chart.tooltipGenerator = XYToolTipGenerator.Default chart.tooltipGenerator must beSome } def xyg = { val chart = xychart val dataset = chart.plot.getDataset chart.tooltipGenerator = XYToolTipGenerator.Default chart.tooltipGenerator.map(_.apply(dataset, "Series 1", 3)) must beSome("(4.0,4.0)") } // ----------------------------------------------------------------------------------------------- // util // ----------------------------------------------------------------------------------------------- def catchart = { val data = for { series <- List("Series 1", "Series 2") } yield { val catvals = for (i <- 1 to 5) yield (i.toString,i) series -> catvals } BarChart(data) } def piechart = { val data = for (i <- 1 to 5) yield (i.toString,i) PieChart(data) } def xychart = { val data = for { series <- List("Series 1", "Series 2") } yield { val xys = for (i <- 1 to 5) yield (i,i) series -> xys } XYLineChart(data) } }
wookietreiber/scala-chart
src/test/scala/scalax/chart/ToolTipGeneratorSpec.scala
Scala
lgpl-3.0
5,826
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml import org.mockito.Matchers.{any, eq => meq} import org.mockito.Mockito.when import org.scalatest.FunSuite import org.scalatest.mock.MockitoSugar.mock import org.apache.spark.ml.param.ParamMap import org.apache.spark.sql.DataFrame class PipelineSuite extends FunSuite { abstract class MyModel extends Model[MyModel] test("pipeline") { val estimator0 = mock[Estimator[MyModel]] val model0 = mock[MyModel] val transformer1 = mock[Transformer] val estimator2 = mock[Estimator[MyModel]] val model2 = mock[MyModel] val transformer3 = mock[Transformer] val dataset0 = mock[DataFrame] val dataset1 = mock[DataFrame] val dataset2 = mock[DataFrame] val dataset3 = mock[DataFrame] val dataset4 = mock[DataFrame] when(estimator0.fit(meq(dataset0), any[ParamMap]())).thenReturn(model0) when(model0.transform(meq(dataset0), any[ParamMap]())).thenReturn(dataset1) when(model0.parent).thenReturn(estimator0) when(transformer1.transform(meq(dataset1), any[ParamMap])).thenReturn(dataset2) when(estimator2.fit(meq(dataset2), any[ParamMap]())).thenReturn(model2) when(model2.transform(meq(dataset2), any[ParamMap]())).thenReturn(dataset3) when(model2.parent).thenReturn(estimator2) when(transformer3.transform(meq(dataset3), any[ParamMap]())).thenReturn(dataset4) val pipeline = new Pipeline() .setStages(Array(estimator0, transformer1, estimator2, transformer3)) val pipelineModel = pipeline.fit(dataset0) assert(pipelineModel.stages.size === 4) assert(pipelineModel.stages(0).eq(model0)) assert(pipelineModel.stages(1).eq(transformer1)) assert(pipelineModel.stages(2).eq(model2)) assert(pipelineModel.stages(3).eq(transformer3)) assert(pipelineModel.getModel(estimator0).eq(model0)) assert(pipelineModel.getModel(estimator2).eq(model2)) intercept[NoSuchElementException] { pipelineModel.getModel(mock[Estimator[MyModel]]) } val output = pipelineModel.transform(dataset0) assert(output.eq(dataset4)) } test("pipeline with duplicate stages") { val estimator = mock[Estimator[MyModel]] val pipeline = new Pipeline() .setStages(Array(estimator, estimator)) val dataset = mock[DataFrame] intercept[IllegalArgumentException] { pipeline.fit(dataset) } } }
trueyao/spark-lever
mllib/src/test/scala/org/apache/spark/ml/PipelineSuite.scala
Scala
apache-2.0
3,155
package com.twitter.finagle.postgres.connection import com.twitter.finagle.postgres.messages.{PgResponse, DataRow, Field} import scala.collection.mutable.ListBuffer /* * Connection states. */ sealed trait State case object SimpleQuery extends State case object RequestingSsl extends State case object AwaitingSslResponse extends State case object AuthenticationRequired extends State case object AuthenticationInProgress extends State case object AwaitingPassword extends State case class AggregatingAuthData(statuses: Map[String, String], processId: Int, secretKey: Int) extends State case object Connected extends State case object Syncing extends State // All of the extended query states - Sync can be issued while in these states sealed trait ExtendedQueryState extends State case object Parsing extends ExtendedQueryState case object Binding extends ExtendedQueryState case object ExecutePreparedStatement extends ExtendedQueryState case object AwaitParamsDescription extends ExtendedQueryState case class AggregateRows(fields: IndexedSeq[Field], buff: ListBuffer[DataRow] = ListBuffer()) extends ExtendedQueryState case class AggregateRowsWithoutFields(buff: ListBuffer[DataRow] = ListBuffer()) extends ExtendedQueryState case class AwaitRowDescription(types: IndexedSeq[Int]) extends ExtendedQueryState case class EmitOnReadyForQuery[R <: PgResponse](emit: R) extends ExtendedQueryState
jeremyrsmith/finagle-postgres
src/main/scala/com/twitter/finagle/postgres/connection/States.scala
Scala
apache-2.0
1,420
/** * This file is part of the TA Buddy project. * Copyright (c) 2013-2014 Alexey Aksenov [email protected] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Affero General Global License version 3 * as published by the Free Software Foundation with the addition of the * following permission added to Section 15 as permitted in Section 7(a): * FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED * BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS», * Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS * THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Global License for more details. * You should have received a copy of the GNU Affero General Global License * along with this program; if not, see http://www.gnu.org/licenses or write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA, 02110-1301 USA, or download the license from the following URL: * http://www.gnu.org/licenses/agpl.html * * The interactive user interfaces in modified source and object code versions * of this program must display Appropriate Legal Notices, as required under * Section 5 of the GNU Affero General Global License. * * In accordance with Section 7(b) of the GNU Affero General Global License, * you must retain the producer line in every report, form or document * that is created or manipulated using TA Buddy. * * You can be released from the requirements of the license by purchasing * a commercial license. Buying such a license is mandatory as soon as you * develop commercial activities involving the TA Buddy software without * disclosing the source code of your own applications. * These activities include: offering paid services to customers, * serving files in a web or/and network application, * shipping TA Buddy with a closed source product. * * For more information, please contact Digimead Team at this * address: [email protected] */ package org.digimead.tabuddy.desktop.logic import akka.actor.{ Inbox, PoisonPill, Terminated } import java.io.File import org.digimead.digi.lib.{ DependencyInjection, Disposable } import org.digimead.digi.lib.api.XDependencyInjection import org.digimead.digi.lib.log.api.XLoggable import org.digimead.tabuddy.desktop.core import org.digimead.tabuddy.desktop.core.support.App import org.digimead.tabuddy.desktop.core.support.Timeout import org.digimead.tabuddy.desktop.logic.script.{ Cache, Loader } import org.digimead.tabuddy.desktop.logic.script.Script import org.osgi.framework.{ BundleActivator, BundleContext } import scala.concurrent.Future import scala.ref.WeakReference /** * OSGi entry point. */ class Activator extends BundleActivator with XLoggable { /** Akka execution context. */ implicit lazy val ec = App.system.dispatcher /** Start bundle. */ def start(context: BundleContext) = Activator.startStopLock.synchronized { if (Option(Activator.disposable).isEmpty) throw new IllegalStateException("Bundle is already disposed. Please reinstall it before activation.") log.debug("Start TABuddy Desktop logic.") // Setup DI for this bundle val diReady = Option(context.getServiceReference(classOf[org.digimead.digi.lib.api.XDependencyInjection])). map { currencyServiceRef ⇒ (currencyServiceRef, context.getService(currencyServiceRef)) } match { case Some((reference, diService)) ⇒ diService.getDependencyValidator.foreach { validator ⇒ val invalid = DependencyInjection.validate(validator, this) if (invalid.nonEmpty) throw new IllegalArgumentException("Illegal DI keys found: " + invalid.mkString(",")) } context.ungetService(reference) Some(diService.getDependencyInjection()) case None ⇒ log.warn("DI service not found.") None } diReady match { case Some(di) ⇒ DependencyInjection.reset() DependencyInjection(di, false) case None ⇒ log.warn("Skip DI initialization in test environment.") } // Start component actors hierarchy val f = Future { Activator.startStopLock.synchronized { App.watch(Activator).once.makeBeforeStop('logic_Activator__WaitingForContext) { // This hook is hold Activator.stop() while initialization is incomplete. App.watch(context).waitForStart(Timeout.normal) // Initialization complete. App.watch(context).off() } on { // Execute autoexec val autoexec = new File(Logic.scriptContainer, "autoexec.scala") if (autoexec.exists()) try { val content = Loader(autoexec) val unique = Script.unique(content) log.info(s"Evaluate autoexec script with unique id ${unique}.") Script[Unit](content, unique, false).run() } catch { case e: Throwable ⇒ log.error("Error while executing autoexec.scala: " + e.getMessage(), e) } // Start logic Logic.actor } } } f onFailure { case e: Throwable ⇒ log.error("Error while starting Logic: " + e.getMessage(), e) } f onComplete { case _ ⇒ App.watch(context).on() } // Prevents stop Core bundle before this one. App.watch(core.Activator).once.makeBeforeStop('logic_Activator__LockCore) { if (!App.isDevelopmentMode) App.watch(Activator).waitForStop(Timeout.short) } } /** Stop bundle. */ def stop(context: BundleContext) = Activator.startStopLock.synchronized { log.debug("Stop TABuddy Desktop logic.") Logic ! App.Message.Inconsistent(Logic, None) App.watch(Activator) off {} App.watch(Logic).waitForStop(Timeout.long) try { // Stop component actors. val inbox = Inbox.create(App.system) inbox.watch(Logic) Logic ! PoisonPill if (inbox.receive(Timeout.long).isInstanceOf[Terminated]) log.debug("Logic actors hierarchy is terminated.") else log.fatal("Unable to shutdown Logic actors hierarchy.") } catch { case e if App.system == null ⇒ log.debug("Skip Akka cleanup: ecosystem is already shut down.") } Activator.dispose() } override def toString = "logic.Activator" } /** * Disposable manager. There is always only one singleton per class loader. */ object Activator extends Disposable.Manager with XLoggable { @volatile private var disposable = Seq[WeakReference[Disposable]]() private val disposableLock = new Object private val startStopLock = new Object /** Register the disposable instance. */ def register(disposable: Disposable) = disposableLock.synchronized { this.disposable = this.disposable :+ WeakReference(disposable) } /** Dispose all registered instances. */ protected def dispose() = disposableLock.synchronized { log.debug(s"Dispose ${disposable.size} instance(s).") disposable.reverse.foreach(_.get.foreach { disposable ⇒ callDispose(disposable) }) disposable = null } override def toString = "logic.Activator[Singleton]" }
digimead/digi-TABuddy-desktop
part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/Activator.scala
Scala
agpl-3.0
7,406
import java.util.Objects import org.infinispan.distribution.group.{Group, Grouper} import org.infinispan.manager.DefaultCacheManager object InfinispanGroupApi { def main(args: Array[String]): Unit = { val manager = new DefaultCacheManager("infinispan.xml") val cacheAsCustomizedKeyClass = manager.getCache[KeyClass, KeyClass]("cacheAsCustomizedKeyClass") val cacheAsGrouper = manager.getCache[NoGroupKeyClass, NoGroupKeyClass]("cacheAsGrouper") val cacheAsGrouperSimple = manager.getCache[String, String]("cacheAsGrouperSimple") try { val groupKeyValues = Array(KeyClass("10000", "Sato", 20), KeyClass("10000", "Tanaka", 25), KeyClass("10000", "Suzuki", 30), KeyClass("20000", "Momimoto", 22), KeyClass("20000", "Hanada", 27), KeyClass("20000", "Yamamoto", 19), KeyClass("30000", "Ken", 22), KeyClass("30000", "Mike", 23), KeyClass("30000", "Jusmine", 21), KeyClass("40000", "hoge", 20), KeyClass("40000", "foo", 20), KeyClass("40000", "bar", 20), KeyClass("50000", "Java", 18), KeyClass("50000", "Scala", 10), KeyClass("50000", "Clojure", 6) ) for (keyValue <- groupKeyValues) cacheAsCustomizedKeyClass.put(keyValue, keyValue) for (keyValue <- groupKeyValues) { val dm = cacheAsCustomizedKeyClass.getAdvancedCache.getDistributionManager println(s"PrimaryLocation: ${dm.getPrimaryLocation(keyValue)}, Locate:${dm.locate(keyValue)}") println(s" $keyValue:${cacheAsCustomizedKeyClass.get(keyValue)}") } println("====================") val keyValues = Array(NoGroupKeyClass("10000", "Sato", 20), NoGroupKeyClass("10000", "Tanaka", 25), NoGroupKeyClass("10000", "Suzuki", 30), NoGroupKeyClass("20000", "Momimoto", 22), NoGroupKeyClass("20000", "Hanada", 27), NoGroupKeyClass("20000", "Yamamoto", 19), NoGroupKeyClass("30000", "Ken", 22), NoGroupKeyClass("30000", "Mike", 23), NoGroupKeyClass("30000", "Jusmine", 21), NoGroupKeyClass("40000", "hoge", 20), NoGroupKeyClass("40000", "foo", 20), NoGroupKeyClass("40000", "bar", 20), NoGroupKeyClass("50000", "Java", 18), NoGroupKeyClass("50000", "Scala", 10), NoGroupKeyClass("50000", "Clojure", 6) ) for (keyValue <- keyValues) cacheAsGrouper.put(keyValue, keyValue) for (keyValue <- keyValues) { val dm = cacheAsGrouper.getAdvancedCache.getDistributionManager println(s"PrimaryLocation: ${dm.getPrimaryLocation(keyValue)}, Locate:${dm.locate(keyValue)}") println(s" $keyValue:${cacheAsGrouper.get(keyValue)}") } println("====================") val simpleKeyValues = Array(("10001", "Sato"), ("10002", "Tanaka"), ("10003", "Suzuki"), ("20001", "Momimoto"), ("20002", "Hanada"), ("20003", "Yamamoto"), ("30001", "Ken"), ("30002", "Mike"), ("30003", "Jusmine"), ("40001", "hoge"), ("40002", "foo"), ("40003", "bar"), ("50001", "Java"), ("50002", "Scala"), ("50003", "Clojure") ) for ((key, value) <- simpleKeyValues) cacheAsGrouperSimple.put(key, value) for ((key, _) <- simpleKeyValues) { val dm = cacheAsGrouperSimple.getAdvancedCache.getDistributionManager println(s"PrimaryLocation: ${dm.getPrimaryLocation(key)}, Locate:${dm.locate(key)}") println(s" $key:${cacheAsGrouperSimple.get(key)}") } } finally { cacheAsCustomizedKeyClass.stop() cacheAsGrouper.stop() cacheAsGrouperSimple.stop() manager.stop() } } } /** 自分でGroup制御を行うキークラス **/ object KeyClass { def apply(code: String, name: String, age: Int): KeyClass = new KeyClass(code, name, age) } @SerialVersionUID(1L) class KeyClass(val code: String, val name: String, val age: Int) extends Serializable { @Group def getCode: String = code override def equals(other: Any): Boolean = other match { case o: KeyClass => code == o.code && name == o.name && age == o.age case _ => false } override def hashCode: Int = Objects.hashCode(code, name, age) override def toString: String = s"KeyClass => code:[$code], name:[$name], age:[$age]" } /** 外部のGrouperでGroup制御を行うキークラス **/ object NoGroupKeyClass { def apply(code: String, name: String, age: Int): NoGroupKeyClass = new NoGroupKeyClass(code, name, age) } @SerialVersionUID(1L) class NoGroupKeyClass(val code: String, val name: String, val age: Int) extends Serializable { override def equals(other: Any): Boolean = other match { case o: NoGroupKeyClass => code == o.code && name == o.name && age == o.age case _ => false } override def hashCode: Int = Objects.hashCode(code, name, age) override def toString: String = s"NoGroupKey => code:[$code], name:[$name], age:[$age]" } class MyGrouper extends Grouper[NoGroupKeyClass] { def computeGroup(key: NoGroupKeyClass, group: String): String = key.code def getKeyType: Class[NoGroupKeyClass] = classOf[NoGroupKeyClass] } class SimpleGrouper extends Grouper[String] { def computeGroup(key: String, group: String): String = key.head.toString def getKeyType: Class[String] = classOf[String] }
kazuhira-r/infinispan-examples
infinispan-group-api/src/main/scala/InfinispanGroupApi.scala
Scala
mit
5,708
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.connector.catalog import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.catalog.BucketSpec import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.connector.expressions.{BucketTransform, IdentityTransform, LogicalExpressions, Transform} import org.apache.spark.sql.internal.SQLConf /** * Conversion helpers for working with v2 [[CatalogPlugin]]. */ private[sql] object CatalogV2Implicits { import LogicalExpressions._ implicit class PartitionTypeHelper(colNames: Seq[String]) { def asTransforms: Array[Transform] = { colNames.map(col => identity(reference(Seq(col)))).toArray } } implicit class BucketSpecHelper(spec: BucketSpec) { def asTransform: BucketTransform = { if (spec.sortColumnNames.nonEmpty) { throw new AnalysisException( s"Cannot convert bucketing with sort columns to a transform: $spec") } val references = spec.bucketColumnNames.map(col => reference(Seq(col))) bucket(spec.numBuckets, references.toArray) } } implicit class TransformHelper(transforms: Seq[Transform]) { def asPartitionColumns: Seq[String] = { val (idTransforms, nonIdTransforms) = transforms.partition(_.isInstanceOf[IdentityTransform]) if (nonIdTransforms.nonEmpty) { throw new AnalysisException("Transforms cannot be converted to partition columns: " + nonIdTransforms.map(_.describe).mkString(", ")) } idTransforms.map(_.asInstanceOf[IdentityTransform]).map(_.reference).map { ref => val parts = ref.fieldNames if (parts.size > 1) { throw new AnalysisException(s"Cannot partition by nested column: $ref") } else { parts(0) } } } } implicit class CatalogHelper(plugin: CatalogPlugin) { def asTableCatalog: TableCatalog = plugin match { case tableCatalog: TableCatalog => tableCatalog case _ => throw new AnalysisException(s"Cannot use catalog ${plugin.name}: not a TableCatalog") } def asNamespaceCatalog: SupportsNamespaces = plugin match { case namespaceCatalog: SupportsNamespaces => namespaceCatalog case _ => throw new AnalysisException( s"Cannot use catalog ${plugin.name}: does not support namespaces") } } implicit class NamespaceHelper(namespace: Array[String]) { def quoted: String = namespace.map(quoteIfNeeded).mkString(".") } implicit class IdentifierHelper(ident: Identifier) { def quoted: String = { if (ident.namespace.nonEmpty) { ident.namespace.map(quoteIfNeeded).mkString(".") + "." + quoteIfNeeded(ident.name) } else { quoteIfNeeded(ident.name) } } def asMultipartIdentifier: Seq[String] = ident.namespace :+ ident.name def asTableIdentifier: TableIdentifier = ident.namespace match { case ns if ns.isEmpty => TableIdentifier(ident.name) case Array(dbName) => TableIdentifier(ident.name, Some(dbName)) case _ => throw new AnalysisException( s"$quoted is not a valid TableIdentifier as it has more than 2 name parts.") } def asFunctionIdentifier: FunctionIdentifier = ident.namespace() match { case ns if ns.isEmpty => FunctionIdentifier(ident.name()) case Array(dbName) => FunctionIdentifier(ident.name(), Some(dbName)) case _ => throw new AnalysisException( s"$quoted is not a valid FunctionIdentifier as it has more than 2 name parts.") } } implicit class MultipartIdentifierHelper(parts: Seq[String]) { if (parts.isEmpty) { throw new AnalysisException("multi-part identifier cannot be empty.") } def asIdentifier: Identifier = Identifier.of(parts.init.toArray, parts.last) def asTableIdentifier: TableIdentifier = parts match { case Seq(tblName) => TableIdentifier(tblName) case Seq(dbName, tblName) => TableIdentifier(tblName, Some(dbName)) case _ => throw new AnalysisException( s"$quoted is not a valid TableIdentifier as it has more than 2 name parts.") } def quoted: String = parts.map(quoteIfNeeded).mkString(".") } def quoteIfNeeded(part: String): String = { if (part.contains(".") || part.contains("`")) { s"`${part.replace("`", "``")}`" } else { part } } private lazy val catalystSqlParser = new CatalystSqlParser(SQLConf.get) def parseColumnPath(name: String): Seq[String] = { catalystSqlParser.parseMultipartIdentifier(name) } }
dbtsai/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/connector/catalog/CatalogV2Implicits.scala
Scala
apache-2.0
5,470
package structures package laws trait SemigroupLaws[A] { implicit val typeClass: Semigroup[A] import Semigroup.ops._ def combineAssociativity(x: A, y: A, z: A): IsEqual[A] = ((x |+| y) |+| z) =?= (x |+| (y |+| z)) } object SemigroupLaws { def apply[A: Semigroup]: SemigroupLaws[A] = new SemigroupLaws[A] { val typeClass = Semigroup[A] } }
mpilquist/Structures
laws/src/main/scala/structures/laws/SemigroupLaws.scala
Scala
bsd-3-clause
362
/** ____ __ ____ ____ ____,,___ ____ __ __ ____ * ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read * ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt * (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt */ import com.mongodb.casbah.Imports.ObjectId import com.mongodb.casbah.MongoConnection import play.api.Application import play.api.mvc.{Request, WithFilters} import razie.db.RTable import razie.db.RazMongo import razie.wiki.Services import razie.wiki.WikiConfig import razie.wiki.model._ import razie.wiki.parser.WikiParserT /** customize some global handling errors */ object Global extends WithFilters { override def beforeStart(app: Application) { Services.config = new WikiConfig { override def getTheme (user:Option[WikiUser], request:Option[Request[_]]) = "light" override def reloadUrlMap = {} } /************** MONGO INIT *************/ RazMongo.setInstance { lazy val conn = MongoConnection("localhost") /** the actual database - done this way to run upgrades before other code uses it */ com.mongodb.casbah.commons.conversions.scala.RegisterConversionHelpers() com.mongodb.casbah.commons.conversions.scala.RegisterJodaTimeConversionHelpers() // authenticate val db = conn("wikireactor") if (!db.authenticate("user", "password")) { throw new Exception("Cannot authenticate. Login failed.") } db } // OPTIONAL - customize the reactor/wiki/parser Services.mkReactor = { (realm, fallBacks, we)=> new MyReactor(realm, fallBacks, we) } // create the default page if (!Wikis.find(WID.fromPath("Admin:Sample1").get).isDefined) { razie.db.tx { implicit txn => new WikiEntry("Admin", "Sample1", "Sample1", "md", """ Congratulations - you made it work! This is a sample first page. Try to create a new page: [[Admin:Sample1-1]]. """, new ObjectId()).create } } } } /** OPTIONAL: my own reactor - customize the customizables */ class MyReactor (realm:String, fallBacks:List[Reactor], we:Option[WikiEntry]) extends Reactor (realm, Nil, we) { /** my wiki - used to compose my own parser */ class MyWikiInst (realm:String, fallBacks:List[WikiInst]) extends WikiInst(realm, fallBacks) { class MyWikiParserCls(val realm: String) extends WikiParserT { } override def mkParser = new MyWikiParserCls(realm) } override val wiki: WikiInst = new MyWikiInst(realm, Nil) }
razie/diesel-hydra
samples/sample1/app/Global.scala
Scala
apache-2.0
2,549
/** * Copyright (C) 2010-2011 LShift Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.lshift.diffa.kernel.differencing import org.hibernate.cfg.Configuration import org.junit.Assert._ import net.lshift.diffa.kernel.util.DateUtils._ import net.lshift.diffa.kernel.util.FullDateTimes._ import net.lshift.diffa.kernel.events._ import collection.mutable.{ListBuffer, HashMap} import net.lshift.diffa.kernel.participants._ import net.lshift.diffa.kernel.indexing.{LuceneVersionCorrelationStore, LuceneVersionCorrelationStoreFactory} import scala.collection.JavaConversions._ import org.apache.lucene.store.{MMapDirectory, FSDirectory, RAMDirectory} import java.io.File import org.junit.runner.RunWith import org.junit.experimental.theories.{DataPoints, Theory, DataPoint, Theories} import org.easymock.EasyMock import org.joda.time.{LocalDate, DateTime} import net.lshift.diffa.adapter.scanning._ import net.lshift.diffa.kernel.config.system.SystemConfigStore import org.slf4j.LoggerFactory import net.lshift.diffa.kernel.diag.DiagnosticsManager import net.lshift.diffa.kernel.util.{CategoryChange, UpstreamEndpoint} import net.lshift.diffa.kernel.config._ import org.junit.{Ignore, Before, Test} import net.lshift.diffa.kernel.frontend.DomainPairDef /** * Test cases for the Hibernate backed VersionCorrelationStore. */ @RunWith(classOf[Theories]) class LuceneVersionCorrelationStoreTest { import LuceneVersionCorrelationStoreTest._ private val emptyAttributes:Map[String, TypedAttribute] = Map() private val emptyStrAttributes:Map[String, String] = Map() val log = LoggerFactory.getLogger(getClass) val store = stores(pair) val otherStore = stores(otherPair) val storeWithUnicodeOrder = stores(pairWithUnicodeOrder) @Before def cleanupStore { store.reset otherStore.reset storeWithUnicodeOrder.reset } @Test def matchedPairs = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), emptyAttributes, DEC_31_2009, "upstreamVsn", None) writer.storeDownstreamVersion(VersionID(pair, "id1"), emptyAttributes, DEC_31_2009, "upstreamVsn", "downstreamVsn", None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(0, unmatched.size) } @Test def rollbackChanges = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), emptyAttributes, DEC_31_2009, "upstreamVsn", None) writer.flush writer.storeDownstreamVersion(VersionID(pair, "id1"), emptyAttributes, DEC_31_2009, "upstreamVsn", "downstreamVsn", None) writer.rollback() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(1, unmatched.size) assertEquals("id1", unmatched(0).id) } @Test def versionsShouldBeDeleteable = { val writer = store.openWriter() val id = VersionID(pair, "id1") writer.storeUpstreamVersion(id, emptyAttributes, DEC_31_2009, "uvsn", None) writer.flush def verifyUnmatched(expectation:Int, writer:ExtendedVersionCorrelationWriter) = { val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(expectation, unmatched.size) } verifyUnmatched(1, writer) writer.clearUpstreamVersion(id, None) writer.flush() verifyUnmatched(0, writer) writer.clearTombstones() verifyUnmatched(0, writer) } @Test def identicalVersionsShouldNotUpdateMaterialTimestamp { val writer = store.openWriter() val id = VersionID(pair, "id1") writer.storeUpstreamVersion(id, dateTimeAttributes, JUL_1_2010_1, "v1", None) val meaninglessUpdateTimestamp = JUL_1_2010_1.plusMinutes(1) writer.storeUpstreamVersion(id, dateTimeAttributes, meaninglessUpdateTimestamp , "v1", None) writer.flush() validateLastMaterialUpdate(id, JUL_1_2010_1) val meaningfulUpdateTimestamp1 = JUL_1_2010_1.plusMinutes(2) writer.storeUpstreamVersion(id, dateTimeAttributes, meaningfulUpdateTimestamp1 , "v2", None) writer.flush() validateLastMaterialUpdate(id, meaningfulUpdateTimestamp1) val meaningfulUpdateTimestamp2 = JUL_1_2010_1.plusMinutes(3) writer.storeUpstreamVersion(id, excludedByLaterDateTimeAttributes, meaningfulUpdateTimestamp2 , "v2", None) writer.flush() validateLastMaterialUpdate(id, meaningfulUpdateTimestamp2) } @Test def loadTest = { val writer = store.openWriter() val iterations = System.getProperty("lucene.loadtest.iterations","10000").toInt val start = System.currentTimeMillis() for (i <- 0 to iterations) { writer.storeUpstreamVersion(VersionID(pair, "id-" + i), dateTimeAttributes, JUL_1_2010_1, "v-" + i, None) if (i % 1000 == 0) { log.info("%sth iteration".format(i)) } } writer.flush() val end = System.currentTimeMillis() val time = (end - start) / 1000 log.info("Writer load test: %s s".format(time)) } private def validateLastMaterialUpdate(id:VersionID, expected:DateTime) = { val c1 = store.retrieveCurrentCorrelation(id).get assertEquals(expected, c1.lastUpdate) } @Test def constrainedMatchedPairsWithDifferentCategories = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), dateTimeAttributes, JUL_1_2010_1, "upstreamVsn", None) writer.storeDownstreamVersion(VersionID(pair, "id1"), intAttributes, JUL_1_2010_1, "upstreamVsn", "downstreamVsn", None) writer.flush() val unmatched = store.unmatchedVersions(dateTimeConstraints, intConstraints, None) assertEquals(0, unmatched.size) } @Test def unmatchedPairFromUpstream = { val writer = store.openWriter() val timestamp = new DateTime() writer.storeUpstreamVersion(VersionID(pair, "id2"), emptyAttributes, DEC_31_2009, "upstreamVsn", None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(1, unmatched.size) assertCorrelationEquals(new Correlation(null, pair, "id2", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, "upstreamVsn", null, null, false), unmatched(0)) } @Theory def constrainedAndIncludedUnmatchedPairFromUpstream(system:AttributeSystem) = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id2"), system.includedAttrs, DEC_31_2009, "upstreamVsn", None) writer.flush() val unmatched = store.unmatchedVersions(system.constraints, system.constraints, None) assertEquals(1, unmatched.size) assertCorrelationEquals(new Correlation(null, pair, "id2", system.includedStrAttrs, emptyStrAttributes, DEC_31_2009, timestamp, "upstreamVsn", null, null, false), unmatched(0)) } @Theory def constrainedAndExcludedUnmatchedPairFromUpstream(system:AttributeSystem) = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id2"), system.excludedAttrs, DEC_31_2009, "upstreamVsn", None) writer.flush() val unmatched = store.unmatchedVersions(system.constraints, system.constraints, None) assertEquals(0, unmatched.size) } @Test def unmatchedPairFromUpstreamShouldBeIndicatedInReturnValue { val timestamp = new DateTime() val writer = store.openWriter() val corr = writer.storeUpstreamVersion(VersionID(pair, "id2"), emptyAttributes, DEC_31_2009, "upstreamVsn", None) writer.flush() assertCorrelationEquals(new Correlation(null, pair, "id2", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, "upstreamVsn", null, null, false), corr) } @Test def unmatchedPairFromDownstream = { val writer = store.openWriter() val timestamp = new DateTime() writer.storeDownstreamVersion(VersionID(pair, "id3"), emptyAttributes, DEC_31_2009, "upstreamVsn", "downstreamVsn", None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(1, unmatched.size) assertCorrelationEquals(new Correlation(null, pair, "id3", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, null, "upstreamVsn", "downstreamVsn", false), unmatched(0)) } @Test def unmatchedPairFromDownstreamShouldBeIndicatedInReturnValue { val timestamp = new DateTime() val writer = store.openWriter() val corr = writer.storeDownstreamVersion(VersionID(pair, "id3"), emptyAttributes, DEC_31_2009, "upstreamVsn", "downstreamVsn", None) writer.flush() assertCorrelationEquals(new Correlation(null, pair, "id3", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, null, "upstreamVsn", "downstreamVsn", false), corr) } @Test def matchedPairsAfterChanges = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id4"), emptyAttributes, DEC_31_2009, "upstreamVsnA", None) writer.storeUpstreamVersion(VersionID(pair, "id4"), emptyAttributes, DEC_31_2009, "upstreamVsnB", None) writer.storeDownstreamVersion(VersionID(pair, "id4"), emptyAttributes, DEC_31_2009, "upstreamVsnA", "downstreamVsnA", None) writer.storeDownstreamVersion(VersionID(pair, "id4"), emptyAttributes, DEC_31_2009, "upstreamVsnB", "downstreamVsnB", None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(0, unmatched.size) } @Test def unmatchedPairsAfterChanges = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id5"), emptyAttributes,DEC_31_2009, "upstreamVsnA", None) writer.storeDownstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnA", "downstreamVsnA", None) writer.storeUpstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnB", None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(1, unmatched.size) assertCorrelationEquals(new Correlation(null, pair, "id5", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, "upstreamVsnB", "upstreamVsnA", "downstreamVsnA", false), unmatched(0)) } @Test def unmatchedPairsAfterUpstreamRemoved = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id5"), emptyAttributes,DEC_31_2009, "upstreamVsnA", None) writer.storeDownstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnA", "downstreamVsnA", None) writer.clearUpstreamVersion(VersionID(pair, "id5"), None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(1, unmatched.size) assertCorrelationEquals(new Correlation(null, pair, "id5", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, null, "upstreamVsnA", "downstreamVsnA", false), unmatched(0)) } @Test def unmatchedPairsAfterDownstreamRemoved = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id5"), emptyAttributes,DEC_31_2009, "upstreamVsnA", None) writer.storeDownstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnA", "downstreamVsnA", None) writer.clearDownstreamVersion(VersionID(pair, "id5"), None) writer.flush() val unmatched = store.unmatchedVersions(Seq(), Seq(), None) assertEquals(1, unmatched.size) assertCorrelationEquals(new Correlation(null, pair, "id5", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, "upstreamVsnA", null, null, false), unmatched(0)) } @Test def unmatchedPairsAfterChangesShouldBeIndicatedInReturnValue = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnA", None) writer.storeDownstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnA", "downstreamVsnA", None) val corr = writer.storeUpstreamVersion(VersionID(pair, "id5"), emptyAttributes, DEC_31_2009, "upstreamVsnB", None) writer.flush() assertCorrelationEquals(new Correlation(null, pair, "id5", emptyStrAttributes, emptyStrAttributes, DEC_31_2009, timestamp, "upstreamVsnB", "upstreamVsnA", "downstreamVsnA", false), corr) } @Test def deletingSource = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id6"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id6", None) writer.storeUpstreamVersion(VersionID(pair, "id7"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id7", None) val corr = writer.clearUpstreamVersion(VersionID(pair, "id6"), None) writer.flush() assertCorrelationEquals(new Correlation(null, pair, "id6", null, null, null, null, null, null, null, true), corr) val collector = new Collector store.queryUpstreams(List(new TimeRangeConstraint("bizDateTime", DEC_1_2009, endOfDay(DEC_1_2009))), collector.collectUpstream) assertEquals( List(CollectedUpstreamDetail(VersionID(pair, "id7"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_1_2009)), DEC_1_2009, "upstreamVsn-id7")), collector.upstreamObjs.toList) } @Theory def deletingSourceThatIsMatched(system:AttributeSystem) = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id6"), system.includedAttrs, DEC_1_2009, "upstreamVsn-id6", None) writer.storeDownstreamVersion(VersionID(pair, "id6"), system.includedAttrs, DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6", None) writer.clearUpstreamVersion(VersionID(pair, "id6"), None) writer.flush() val collector = new Collector store.queryUpstreams(List(new TimeRangeConstraint("bizDateTime", DEC_1_2009, endOfDay(DEC_1_2009))), collector.collectUpstream) assertEquals(0, collector.upstreamObjs.size) } @Test def deletingDest = { val writer1 = store.openWriter() writer1.storeDownstreamVersion(VersionID(pair, "id6"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6", None) writer1.storeDownstreamVersion(VersionID(pair, "id7"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id7", "downstreamVsn-id7", None) writer1.flush() val writer2 = store.openWriter() val corr = writer2.clearDownstreamVersion(VersionID(pair, "id6"), None) writer2.flush() assertCorrelationEquals(new Correlation(null, pair, "id6", null, null, null, null, null, null, null, true), corr) val collector = new Collector val digests = store.queryDownstreams(List(new TimeRangeConstraint("bizDateTime", DEC_1_2009, endOfDay(DEC_1_2009))), collector.collectDownstream) assertEquals( List(CollectedDownstreamDetail(VersionID(pair, "id7"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_1_2009)), DEC_1_2009, "upstreamVsn-id7", "downstreamVsn-id7")), collector.downstreamObjs.toList) } @Theory def deletingDestThatIsMatched(system:AttributeSystem) = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id6"), system.includedAttrs, DEC_1_2009, "upstreamVsn-id6", None) writer.storeDownstreamVersion(VersionID(pair, "id6"), system.includedAttrs, DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6", None) writer.clearDownstreamVersion(VersionID(pair, "id6"), None) writer.flush() val collector = new Collector store.queryDownstreams(List(new TimeRangeConstraint("bizDate", DEC_1_2009, endOfDay(DEC_1_2009))), collector.collectDownstream) assertEquals(0, collector.downstreamObjs.size) } @Theory def queryUpstreamRangeExcludesExcluded(system:AttributeSystem) = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), system.includedAttrs, DEC_31_2009, "upstreamVsn-id1", None) writer.storeUpstreamVersion(VersionID(pair, "id2"), system.excludedAttrs, DEC_31_2009, "upstreamVsn-id2", None) writer.flush() val collector = new Collector val digests = store.queryUpstreams(system.constraints, collector.collectUpstream) assertEquals( List(CollectedUpstreamDetail(VersionID(pair, "id1"), AttributesUtil.toUntypedMap(system.includedAttrs), DEC_31_2009, "upstreamVsn-id1")), collector.upstreamObjs.toList) } @Theory def queryDownstreamRangeExcludesExcluded(system:AttributeSystem) = { val writer = store.openWriter() writer.storeDownstreamVersion(VersionID(pair, "id1"), system.includedAttrs, DEC_31_2009, "upstreamVsn-id1", "downstreamVsn-id1", None) writer.storeDownstreamVersion(VersionID(pair, "id2"), system.excludedAttrs, DEC_31_2009, "upstreamVsn-id2", "downstreamVsn-id1", None) writer.flush() val collector = new Collector val digests = store.queryDownstreams(system.constraints, collector.collectDownstream) assertEquals( List(CollectedDownstreamDetail(VersionID(pair, "id1"), AttributesUtil.toUntypedMap(system.includedAttrs), DEC_31_2009, "upstreamVsn-id1", "downstreamVsn-id1")), collector.downstreamObjs.toList) } @Test def queryUpstreamRangeReturnsInIDOrderWithAsciiOrdering = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id7"), bizDateTimeMap(DEC_2_2009), DEC_2_2009, "upstreamVsn-id7", None) writer.storeUpstreamVersion(VersionID(pair, "id6"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id6", None) writer.flush() val collector = new Collector val digests = store.queryUpstreams(List(), collector.collectUpstream) assertEquals( List( CollectedUpstreamDetail(VersionID(pair, "id6"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_1_2009)), DEC_1_2009, "upstreamVsn-id6"), CollectedUpstreamDetail(VersionID(pair, "id7"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_2_2009)), DEC_2_2009, "upstreamVsn-id7")), collector.upstreamObjs.toList) } @Test def queryDownstreamRangeReturnsInIDOrderWithAsciiOrdering = { val writer = store.openWriter() writer.storeDownstreamVersion(VersionID(pair, "id7"), bizDateTimeMap(DEC_2_2009), DEC_2_2009, "upstreamVsn-id7", "downstreamVsn-id7", None) writer.storeDownstreamVersion(VersionID(pair, "id6"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6", None) writer.flush() val collector = new Collector val digests = store.queryDownstreams(List(), collector.collectDownstream) assertEquals( List( CollectedDownstreamDetail(VersionID(pair, "id6"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_1_2009)), DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6"), CollectedDownstreamDetail(VersionID(pair, "id7"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_2_2009)), DEC_2_2009, "upstreamVsn-id7", "downstreamVsn-id7")), collector.downstreamObjs.toList) } @Test def queryUpstreamRangeCanReturnResultsWithUnicodeCollation = { val writer = storeWithUnicodeOrder.openWriter() writer.storeUpstreamVersion(VersionID(pairWithUnicodeOrder, "FooBarWithSuffix"), bizDateTimeMap(DEC_2_2009), DEC_2_2009, "upstreamVsn-id7", None) writer.storeUpstreamVersion(VersionID(pairWithUnicodeOrder, "foo"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id6", None) writer.flush() val collector = new Collector val digests = storeWithUnicodeOrder.queryUpstreams(List(), collector.collectUpstream) assertEquals( List( CollectedUpstreamDetail(VersionID(pairWithUnicodeOrder, "foo"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_1_2009)), DEC_1_2009, "upstreamVsn-id6"), CollectedUpstreamDetail(VersionID(pairWithUnicodeOrder, "FooBarWithSuffix"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_2_2009)), DEC_2_2009, "upstreamVsn-id7")), collector.upstreamObjs.toList) } @Test def queryDownstreamRangeCanReturnResultsWithUnicodeCollation = { val writer = storeWithUnicodeOrder.openWriter() writer.storeDownstreamVersion(VersionID(pairWithUnicodeOrder, "foo"), bizDateTimeMap(DEC_1_2009), DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6", None) writer.storeDownstreamVersion(VersionID(pairWithUnicodeOrder, "FooBarWithSuffix"),bizDateTimeMap(DEC_2_2009), DEC_2_2009, "upstreamVsn-id7", "downstreamVsn-id7", None) writer.flush() val collector = new Collector val digests = storeWithUnicodeOrder.queryDownstreams(List(), collector.collectDownstream) assertEquals( List( CollectedDownstreamDetail(VersionID(pairWithUnicodeOrder, "foo"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_1_2009)), DEC_1_2009, "upstreamVsn-id6", "downstreamVsn-id6"), CollectedDownstreamDetail(VersionID(pairWithUnicodeOrder, "FooBarWithSuffix"), AttributesUtil.toUntypedMap(bizDateTimeMap(DEC_2_2009)), DEC_2_2009, "upstreamVsn-id7", "downstreamVsn-id7")), collector.downstreamObjs.toList) } @Test def storedUpstreamShouldBeRetrievable = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id23"), emptyAttributes, DEC_1_2009, "upstreamVsn-id23", None) writer.flush() val corr = store.retrieveCurrentCorrelation(VersionID(pair, "id23")).getOrElse(null) assertCorrelationEquals( new Correlation(null, pair, "id23", emptyStrAttributes, null, DEC_1_2009, timestamp, "upstreamVsn-id23", null, null, false), corr) } @Test def storedDownstreamShouldBeRetrievable = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeDownstreamVersion(VersionID(pair, "id23"), emptyAttributes, DEC_1_2009, "upstreamVsn-id23", "downstreamVsn-id23", None) writer.flush() val corr = store.retrieveCurrentCorrelation(VersionID(pair, "id23")).getOrElse(null) assertCorrelationEquals( new Correlation(null, pair, "id23", null, emptyStrAttributes, DEC_1_2009, timestamp, null, "upstreamVsn-id23", "downstreamVsn-id23", false), corr) } @Test def storedMatchShouldBeRetrievable = { val timestamp = new DateTime() val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id23"), emptyAttributes, DEC_1_2009, "upstreamVsn-id23", None) writer.storeDownstreamVersion(VersionID(pair, "id23"), emptyAttributes, DEC_1_2009, "upstreamVsn-id23", "downstreamVsn-id23", None) writer.flush() val corr = store.retrieveCurrentCorrelation(VersionID(pair, "id23")).getOrElse(null) assertCorrelationEquals( new Correlation(null, pair, "id23", emptyStrAttributes, emptyStrAttributes, DEC_1_2009, timestamp, "upstreamVsn-id23", "upstreamVsn-id23", "downstreamVsn-id23", true), corr) } @Test def unknownCorrelationShouldNotBeRetrievable = { val corr = store.retrieveCurrentCorrelation(VersionID(pair, "id99-missing")) assertEquals(None, corr) } @Test def storesMustBeIsolatedByPairKey = { val writer = store.openWriter() val otherWriter = otherStore.openWriter() otherWriter.storeUpstreamVersion(VersionID(otherPair, "123456789"), emptyAttributes, DEC_1_2009, "up-123456789", None) otherWriter.storeDownstreamVersion(VersionID(otherPair, "123456789"), emptyAttributes, DEC_1_2009, "up-123456789", "down-123456789", None) otherWriter.flush() assertCorrelationEquals( new Correlation(null, otherPair, "123456789", Map[String,String](), Map[String,String](), DEC_1_2009, null, "up-123456789", "up-123456789", "down-123456789", true), otherStore.retrieveCurrentCorrelation(VersionID(otherPair, "123456789")).getOrElse(null)) writer.storeUpstreamVersion(VersionID(pair, "123456789"), emptyAttributes, DEC_1_2009, "up-987654321", None) writer.flush() assertCorrelationEquals( new Correlation(null, pair, "123456789", Map[String,String](), Map[String,String](), DEC_1_2009, null, "up-987654321", null, null, false), store.retrieveCurrentCorrelation(VersionID(pair, "123456789")).getOrElse(null)) // re-check other store assertCorrelationEquals( new Correlation(null, otherPair, "123456789", Map[String,String](), Map[String,String](), DEC_1_2009, null, "up-123456789", "up-123456789", "down-123456789", true), otherStore.retrieveCurrentCorrelation(VersionID(otherPair, "123456789")).getOrElse(null)) } @Test def flushingWriterMustClearBuffers { val writer = store.openWriter() assertFalse(writer.isDirty) writer.storeUpstreamVersion(VersionID(pair, "id23"), emptyAttributes, DEC_1_2009, "upstreamVsn-id23", None) assertTrue(writer.isDirty) writer.storeDownstreamVersion(VersionID(pair, "id23"), emptyAttributes, DEC_1_2009, "upstreamVsn-id23", "downstreamVsn-id23", None) assertTrue(writer.isDirty) writer.flush() assertFalse(writer.isDirty) writer.clearUpstreamVersion(VersionID(pair, "id23"), None) assertTrue(writer.isDirty) writer.flush() assertFalse(writer.isDirty) } @Test def writerMustFlushWhenMaxBufferSizeIsReached { val writer = store.openWriter() assertFalse(writer.isDirty) for (i <- 1 to 9999) { writer.storeUpstreamVersion(VersionID(pair, "id" + i), emptyAttributes, DEC_1_2009, "upstreamVsn-id" + i, None) assertTrue(writer.isDirty) } writer.storeUpstreamVersion(VersionID(pair, "id10000"), emptyAttributes, DEC_1_2009, "upstreamVsn-id10000", None) // should be flushed implicitly at this point assertFalse(writer.isDirty) } @Test def storeShouldClearWhenRemoved = { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), emptyAttributes, DEC_31_2009, "upstreamVsn", None) writer.storeDownstreamVersion(VersionID(pair, "id2"), emptyAttributes, DEC_31_2009, "upstreamVsn", "downstreamVsn", None) writer.flush() assertEquals(2, store.unmatchedVersions(Seq(), Seq(), None).length) stores.remove(pair) val reopenedStore = stores(pair) assertEquals(0, reopenedStore.unmatchedVersions(Seq(), Seq(), None).length) } @Test def shouldAllowCategoriesToBeAddedWhenEmpty() { store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("newSet", None, Some(new SetCategoryDescriptor(Set("aaa")))))) } @Test def shouldPreventAddingCategoryWhenDataExists() { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), emptyAttributes, DEC_31_2009, "upstreamVsn", None) writer.flush() try { store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("newSet", None, Some(new SetCategoryDescriptor(Set("aaa")))))) fail("Expected IncompatibleCategoryChangeException") } catch { case e:IncompatibleCategoryChangeException => assertEquals( "Change to category newSet is not allowed: Cannot add a category as existing data is stored for pair PairRef(%s,%s)".format(pair.name, pair.space), e.getMessage) } } @Test def shouldAllowAdditionOfSetCategoryValue() { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), Map("someSet" -> StringAttribute("aaa")), DEC_31_2009, "upstreamVsn", None) writer.flush() store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("someSet", Some(new SetCategoryDescriptor(Set("aaa"))), Some(new SetCategoryDescriptor(Set("aaa", "bbb")))))) } @Test def shouldNotAllowRemovalOfUsedSetCategoryValue() { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), Map("someSet" -> StringAttribute("aaa")), DEC_31_2009, "upstreamVsn", None) writer.flush() try { store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("someSet", Some(new SetCategoryDescriptor(Set("aaa", "bbb"))), Some(new SetCategoryDescriptor(Set("bbb")))))) fail("Expected IncompatibleCategoryChangeException") } catch { case e:IncompatibleCategoryChangeException => assertEquals( "Change to category someSet is not allowed: Updated category bounds do not cover all stored values for pair PairRef(%s,%s)".format(pair.name, pair.space), e.getMessage) } } @Test def shouldAllowRemovalOfUnusedSetCategoryValue() { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), Map("someSet" -> StringAttribute("aaa")), DEC_31_2009, "upstreamVsn", None) writer.flush() store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("someSet", Some(new SetCategoryDescriptor(Set("aaa", "bbb"))), Some(new SetCategoryDescriptor(Set("aaa")))))) } @Test def shouldAllowChangeOfAttributeTypeWhenDataIsNotPresent() { store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("someSet", Some(new SetCategoryDescriptor(Set("aaa", "bbb"))), Some(new RangeCategoryDescriptor("date"))))) } @Test def shouldNotAllowChangeOfAttributeTypeWhenDataIsPresent() { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), Map("someSet" -> StringAttribute("aaa")), DEC_31_2009, "upstreamVsn", None) writer.flush() try { store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("someSet", Some(new SetCategoryDescriptor(Set("aaa", "bbb"))), Some(new RangeCategoryDescriptor("date"))))) fail("Expected IncompatibleCategoryChangeException") } catch { case e:IncompatibleCategoryChangeException => assertEquals( "Change to category someSet is not allowed: Cannot change category type as existing data is stored for pair PairRef(%s,%s)".format(pair.name, pair.space), e.getMessage) } } @Test def shouldNotAllowChangeOfRangeAttributeDataTypeWhenDataIsPresent() { val writer = store.openWriter() writer.storeUpstreamVersion(VersionID(pair, "id1"), Map("someDate" -> DateAttribute(DEC_31_2009.toLocalDate)), DEC_31_2009, "upstreamVsn", None) writer.flush() try { store.ensureUpgradeable(UpstreamEndpoint, Seq(CategoryChange("someDate", Some(new RangeCategoryDescriptor("date")), Some(new RangeCategoryDescriptor("datetime"))))) fail("Expected IncompatibleCategoryChangeException") } catch { case e:IncompatibleCategoryChangeException => assertEquals( "Change to category someDate is not allowed: Cannot change category type as existing data is stored for pair PairRef(%s,%s)".format(pair.name, pair.space), e.getMessage) } } private def assertCorrelationEquals(expected:Correlation, actual:Correlation) { if (expected == null) { assertNull(actual) } else { assertNotNull(actual) assertEquals(expected.id, actual.id) assertEquals(expected.pairing, actual.pairing) assertEquals(expected.upstreamVsn, actual.upstreamVsn) assertEquals(expected.downstreamUVsn, actual.downstreamUVsn) assertEquals(expected.downstreamDVsn, actual.downstreamDVsn) assertEquals(expected.upstreamAttributes, actual.upstreamAttributes) assertEquals(expected.downstreamAttributes, actual.downstreamAttributes) assertEquals(expected.isMatched, actual.isMatched) } } } case class CollectedUpstreamDetail(id:VersionID, attributes:Map[String, String], lastUpdate:DateTime, vsn:String) case class CollectedDownstreamDetail(id:VersionID, attributes:Map[String, String], lastUpdate:DateTime, uvsn:String, dvsn:String) class Collector { val upstreamObjs = new ListBuffer[CollectedUpstreamDetail] val downstreamObjs = new ListBuffer[CollectedDownstreamDetail] def collectUpstream(id:VersionID, attributes:Map[String, String], lastUpdate:DateTime, vsn:String) = { upstreamObjs += CollectedUpstreamDetail(id, attributes, lastUpdate, vsn) } def collectDownstream(id:VersionID, attributes:Map[String, String], lastUpdate:DateTime, uvsn:String, dvsn:String) = { downstreamObjs += CollectedDownstreamDetail(id, attributes, lastUpdate, uvsn, dvsn) } } object LuceneVersionCorrelationStoreTest { val space = System.currentTimeMillis() val pair = PairRef(name="pair", space = space) val otherPair = PairRef(name="other-pair",space = space) val pairWithUnicodeOrder = PairRef(name="pair-with-unicode-ordering",space = space) val dummyConfigStore = EasyMock.createMock(classOf[SystemConfigStore]) val dummyDomainConfigStore = EasyMock.createMock(classOf[DomainConfigStore]) EasyMock.expect(dummyConfigStore. maybeSystemConfigOption(VersionCorrelationStore.schemaVersionKey)). andStubReturn(Some(VersionCorrelationStore.currentSchemaVersion.toString)) Map(pair -> AsciiCollationOrdering.name, pairWithUnicodeOrder -> UnicodeCollationOrdering.name).foreach { case (pair, collation) => EasyMock.expect(dummyDomainConfigStore.getPairDef(pair) ).andStubReturn( DomainPairDef( key = pair.name, space = space, upstreamName = "%s-dummyUpstream".format(pair.name), downstreamName = "%s-dummyDownstream".format(pair.name)) ) Seq( "dummyUpstream", "dummyDownstream").foreach { sideName: String => EasyMock.expect(dummyDomainConfigStore.getEndpoint(space, "%s-%s".format(pair.name, sideName)) ).andStubReturn(Endpoint(collation=collation)) } } EasyMock.replay(dummyConfigStore) EasyMock.replay(dummyDomainConfigStore) val dummyDiagnostics = EasyMock.createNiceMock(classOf[DiagnosticsManager]) EasyMock.replay(dummyDiagnostics) val stores = new LuceneVersionCorrelationStoreFactory("target", dummyConfigStore, dummyDomainConfigStore, dummyDiagnostics) // Helper methods for various constraint/attribute scenarios def bizDateTimeSeq(d:DateTime) = Seq(d.toString()) def bizDateTimeMap(d:DateTime) = Map("bizDateTime" -> DateTimeAttribute(d)) def bizDateSeq(d:LocalDate) = Seq(d.toString()) def bizDateMap(d:LocalDate) = Map("bizDate" -> DateAttribute(d)) def intMap(i:Int) = Map("someInt" -> IntegerAttribute(i)) def stringMap(s:String) = Map("someString" -> StringAttribute(s)) // Standard attribute/constraint definitions private val dateTimeAttributes = bizDateTimeMap(JUL_1_2010_1) private val excludedByEarlierDateTimeAttributes = bizDateTimeMap(FEB_15_2010) private val excludedByLaterDateTimeAttributes = bizDateTimeMap(AUG_11_2010_1) private val dateTimeConstraints = Seq(new TimeRangeConstraint("bizDateTime", JUL_2010, END_JUL_2010)) private val unboundedLowerDateTimeConstraint = Seq(new TimeRangeConstraint("bizDateTime", null, END_JUL_2010)) private val unboundedUpperDateTimeConstraint = Seq(new TimeRangeConstraint("bizDateTime", JUL_2010, null)) private val dateAttributes = bizDateMap(JUL_1_2010.toLocalDate) private val excludedByEarlierDateAttributes = bizDateMap(FEB_15_2010.toLocalDate) private val excludedByLaterDateAttributes = bizDateMap(AUG_11_2010.toLocalDate) private val dateConstraints = Seq(new DateRangeConstraint("bizDate", JUL_1_2010.toLocalDate, JUL_31_2010.toLocalDate)) private val unboundedLowerDateConstraint = Seq(new DateRangeConstraint("bizDate", null, JUL_31_2010.toLocalDate)) private val unboundedUpperDateConstraint = Seq(new DateRangeConstraint("bizDate", JUL_1_2010.toLocalDate, null)) private val intAttributes = intMap(2500) private val excludedIntAttributes = intMap(20000) private val intConstraints = Seq(new IntegerRangeConstraint("someInt", 2000, 2999)) private val stringAttributes = stringMap("abc") private val excludedStringAttributes = stringMap("def") private val stringConstraints = Seq(new StringPrefixConstraint("someString", "ab")) private val setConstraints = Seq(new SetConstraint("someString", Set("abc","abc123","abcdef"))) // Defines a testable combination of constraints/attributes the store should be able to handle case class AttributeSystem(constraints:Seq[ScanConstraint], includedAttrs:Map[String, TypedAttribute], excludedAttrs:Map[String, TypedAttribute]) { def includedStrAttrs = includedAttrs.map { case (k, v) => k -> v.value }.toMap def excludedStrAttrs = excludedAttrs.map { case (k, v) => k -> v.value }.toMap } @DataPoints def dateTimes = Array( AttributeSystem(dateTimeConstraints, dateTimeAttributes, excludedByLaterDateTimeAttributes), AttributeSystem(dateTimeConstraints, dateTimeAttributes, excludedByEarlierDateTimeAttributes) ) @DataPoints def unboundedDateTimes = Array( AttributeSystem(unboundedLowerDateTimeConstraint, dateTimeAttributes, excludedByLaterDateTimeAttributes), AttributeSystem(unboundedUpperDateTimeConstraint, dateTimeAttributes, excludedByEarlierDateTimeAttributes) ) @DataPoints def dates = Array( AttributeSystem(dateConstraints, dateAttributes, excludedByLaterDateAttributes), AttributeSystem(dateConstraints, dateAttributes, excludedByEarlierDateAttributes) ) @DataPoints def unboundedDates = Array( AttributeSystem(unboundedLowerDateConstraint, dateAttributes, excludedByLaterDateAttributes), AttributeSystem(unboundedUpperDateConstraint, dateAttributes, excludedByEarlierDateAttributes) ) @DataPoint def ints = AttributeSystem(intConstraints, intAttributes, excludedIntAttributes) @DataPoint def strings = AttributeSystem(stringConstraints, stringAttributes, excludedStringAttributes) @DataPoint def set = AttributeSystem(setConstraints, stringAttributes, excludedStringAttributes) @DataPoints def setAndDateTimes = Array( AttributeSystem(dateTimeConstraints ++ setConstraints, dateTimeAttributes ++ stringAttributes, excludedByLaterDateTimeAttributes ++ excludedStringAttributes), AttributeSystem(dateTimeConstraints ++ setConstraints, dateTimeAttributes ++ stringAttributes, dateTimeAttributes ++ excludedStringAttributes), AttributeSystem(dateTimeConstraints ++ setConstraints, dateTimeAttributes ++ stringAttributes, excludedByLaterDateTimeAttributes ++ stringAttributes) ) }
lshift/diffa
kernel/src/test/scala/net/lshift/diffa/kernel/differencing/LuceneVersionCorrelationStoreTest.scala
Scala
apache-2.0
38,380
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.bwsw.sj.engine.core.testutils.benchmark.loader.kafka import com.bwsw.common.KafkaClient import com.bwsw.sj.common.utils.benchmark.KafkaDataSender import com.bwsw.sj.engine.core.testutils.benchmark.loader.{BenchmarkDataSender, SenderFactory} import com.typesafe.config.Config /** * Provides methods for sending data into Kafka topic for test some application * * @param config configuration of data sender * @param messagesCountMultiplier multiplies count of messages sending by send() * @author Pavel Tomskikh */ class KafkaBenchmarkDataSender(config: KafkaBenchmarkDataSenderConfig, messagesCountMultiplier: Double) extends BenchmarkDataSender[KafkaBenchmarkDataSenderParameters] { override val warmingUpParameters = KafkaBenchmarkDataSenderParameters(10, 10) private val client: KafkaClient = new KafkaClient(Array(config.zooKeeperAddress)) private val sender = new KafkaDataSender(config.kafkaAddress) private var currentMessageSize: Long = 0 private var currentStorageSize: Long = 0 clearStorage() /** * Removes data from Kafka topic */ override def clearStorage(): Unit = { deleteTopic() createTopic() } /** * Sends data into Kafka topic * * @param parameters sending data parameters */ override def send(parameters: KafkaBenchmarkDataSenderParameters): Unit = { if (parameters.messageSize != currentMessageSize) { clearStorage() currentStorageSize = 0 currentMessageSize = parameters.messageSize } if (parameters.messagesCount > currentStorageSize) { val appendedMessages = parameters.messagesCount - currentStorageSize sender.send( messageSize = currentMessageSize, messages = (appendedMessages * messagesCountMultiplier).toInt, words = config.words, separator = " ", topic = config.topic) currentStorageSize = parameters.messagesCount } } override def iterator: Iterator[KafkaBenchmarkDataSenderParameters] = { config.messageSizes.flatMap { messageSize => config.messagesCounts.map { messagesCount => KafkaBenchmarkDataSenderParameters(messageSize, messagesCount) } }.iterator } /** * Closes connection with Kafka */ override def stop(): Unit = client.close() /** * Creates topic if it does not exists */ private def createTopic(): Unit = { if (!client.topicExists(config.topic)) { client.createTopic(config.topic, 1, 1) while (!client.topicExists(config.topic)) Thread.sleep(100) } } /** * Deletes topic if it exists */ private def deleteTopic(): Unit = { if (client.topicExists(config.topic)) { client.deleteTopic(config.topic) while (client.topicExists(config.topic)) Thread.sleep(100) } } } object KafkaBenchmarkDataSender extends KafkaBenchmarkDataSenderFactory class KafkaBenchmarkDataSenderFactory(messagesCountMultiplier: Double = 1.1) extends SenderFactory[KafkaBenchmarkDataSenderParameters, KafkaBenchmarkDataSenderConfig] { override def create(config: Config): (KafkaBenchmarkDataSender, KafkaBenchmarkDataSenderConfig) = { val senderConfig = new KafkaBenchmarkDataSenderConfig(config) val sender = new KafkaBenchmarkDataSender(senderConfig, messagesCountMultiplier) (sender, senderConfig) } }
bwsw/sj-platform
core/sj-engine-core/src/main/scala/com/bwsw/sj/engine/core/testutils/benchmark/loader/kafka/KafkaBenchmarkDataSender.scala
Scala
apache-2.0
4,178
package com.outr.arango import com.outr.arango.model.ArangoCode case class ArangoError(error: Boolean, code: Int, errorNum: Option[Int], errorMessage: String) { lazy val errorCode: ArangoCode = ArangoCode(errorNum.getOrElse(ArangoCode.Failed.code)) def is(code: ArangoCode): Boolean = code == errorCode override def toString: String = s"error: $error, message: $errorMessage, code: $errorCode" }
outr/arangodb-scala
driver/src/main/scala/com/outr/arango/ArangoError.scala
Scala
mit
405
/** * Copyright (C) 2011 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.fb import collection.mutable import org.orbeon.oxf.fr.FormRunner._ import org.orbeon.oxf.properties.Properties import org.orbeon.oxf.util.ScalaUtils._ import org.orbeon.oxf.xforms.action.XFormsAPI._ import org.orbeon.saxon.om.NodeInfo import org.orbeon.scaxon.XML._ /* * Form Builder: operations on grids. */ trait GridOps extends ContainerOps { case class Cell(td: NodeInfo, rowspan: Int, missing: Boolean) { def originalRowspan = getNormalizedRowspan(td) def originalRowspan_= (newRowSpan: Int): Unit = ensureAttribute(td, "rowspan", newRowSpan.toString) } // Get the first enclosing repeated grid or legacy repeat def getContainingGrid(descendantOrSelf: NodeInfo, includeSelf: Boolean = false) = findAncestorContainers(descendantOrSelf, includeSelf) filter IsGrid head // Extract the rowspan of a td (default is 1 if there is no attribute) private def getNormalizedRowspan(td: NodeInfo) = td attValueOpt "rowspan" map (_.toInt) getOrElse 1 // For the previous row of prepared cells, and a new row of tds, return the new row of prepared cells private def newCellsRow(previousRow: Seq[Cell], tds: Seq[NodeInfo]): Seq[Cell] = previousRow match { case Seq() ⇒ // First row: start with initial rowspans tds map (td ⇒ Cell(td, getNormalizedRowspan(td), missing = false)) case _ ⇒ // Subsequent rows val tdsIterator = tds.toIterator previousRow map { case Cell(_, 1, _) ⇒ val td = tdsIterator.next() Cell(td, getNormalizedRowspan(td), missing = false) case Cell(td, rowspan, _) ⇒ Cell(td, rowspan - 1, missing = true) } } // Get cell/rowspan information for the given grid row def getRowCells(tr: NodeInfo): Seq[Cell] = { // All trs up to and including the current tr val trs = (tr precedingSibling "*:tr").reverse :+ tr // For each row, the Seq of tds val rows = trs map (_ \\ "*:td") // Return the final row of prepared cells rows.foldLeft(Seq[Cell]())(newCellsRow) } // Get cell/rowspan information for all the rows in the grid def getAllRowCells(grid: NodeInfo): Seq[Seq[Cell]] = { // All trs up to and including the current tr val trs = grid \\ "*:tr" // For each row, the Seq of tds val rows = trs map (_ \\ "*:td") // Accumulate the result for each row as we go val result = mutable.Buffer[Seq[Cell]]() rows.foldLeft(Seq[Cell]()) { (previousRow, tds) ⇒ val newRow = newCellsRow(previousRow, tds) result += newRow newRow } result } // Width of the grid in columns def getGridSize(grid: NodeInfo) = (grid \\ "*:tr")(0) \\ "*:td" size def newTdElement(grid: NodeInfo, id: String, rowspan: Option[Int] = None): NodeInfo = rowspan match { case Some(rowspan) ⇒ <xh:td xmlns:xh="http://www.w3.org/1999/xhtml" id={id} rowspan={rowspan.toString}/> case _ ⇒ <xh:td xmlns:xh="http://www.w3.org/1999/xhtml" id={id}/> } private def trAtRowPos(gridId: String, rowPos: Int): NodeInfo = { val grid = containerById(gridId) val trs = grid \\ "*:tr" // Reason for the modulo: the value of rowPos sent by client is on the flatten iterations / rows; // it is not just row position in the first iteration. trs(rowPos % trs.length) } // Insert a row below def insertRowBelow(gridId: String, rowPos: Int): NodeInfo = insertRowBelow(trAtRowPos(gridId, rowPos)) def insertRowBelow(tr: NodeInfo): NodeInfo = { // NOTE: This algorithm expands rowspans that span over the current row, but not rowspans that end with the // current row. val grid = getContainingGrid(tr) val rowCells = getRowCells(tr) // Number of cells that end at the current row val newCellCount = rowCells count (cell ⇒ cell.rowspan == 1) // Increment rowspan of cells that don't end at the current row rowCells foreach { cell ⇒ if (cell.rowspan > 1) cell.originalRowspan += 1 } // Insert the new row val result = insert(into = grid, after = tr, origin = newRow(grid, newCellCount)).headOption debugDumpDocumentForGrids("insert row below", grid) result orNull // bad, but insert() is not always able to return the inserted item at this time } // Insert a row above def insertRowAbove(gridId: String, rowPos: Int): NodeInfo = insertRowAbove(trAtRowPos(gridId, rowPos)) def insertRowAbove(tr: NodeInfo): NodeInfo = tr precedingSibling "*:tr" headOption match { case Some(prevRow) ⇒ // Do as if this was an insert below the previous row // This makes things simpler as we can reuse insertRowBelow, but maybe another logic could make sense too insertRowBelow(prevRow) case None ⇒ // Insert as first row of the table val grid = getContainingGrid(tr) val result = insert(into = grid, before = tr, origin = newRow(grid, getGridSize(grid))).head debugDumpDocumentForGrids("insert row above", grid) result } private def newRow(grid: NodeInfo, size: Int): NodeInfo = { // Get as many fresh ids as there are tds val ids = nextIds(grid, "tmp", size).toIterator <xh:tr xmlns:xh="http://www.w3.org/1999/xhtml">{ 1 to size map (_ ⇒ <xh:td id={ids.next()}/>) }</xh:tr> } // Delete a row and contained controls def deleteRow(gridId: String, rowPos: Int): Unit = deleteRow(trAtRowPos(gridId, rowPos)) def deleteRow(tr: NodeInfo): Unit = { val doc = tr.getDocumentRoot val allRowCells = getAllRowCells(getContainingGrid(tr)) val posy = tr precedingSibling "*:tr" size val rowCells = allRowCells(posy) val nextRowCells = if (allRowCells.size > posy + 1) Some(allRowCells(posy + 1)) else None // Find all tds to delete val tdsToDelete = tr \\\\ "*:td" // Find the new td to select if we are removing the currently selected td val newTdToSelect = findNewTdToSelect(tr, tdsToDelete) // Decrement rowspans if needed rowCells.zipWithIndex foreach { case (cell, posx) ⇒ if (cell.originalRowspan > 1) { if (cell.missing) { // This cell is part of a rowspan that starts in a previous row, so decrement cell.originalRowspan -= 1 } else if (nextRowCells.isDefined) { // This cell is the start of a rowspan, and we are deleting it, so add a td in the next row // TODO XXX: issue: as we insert tds, we can't rely on Cell info unless it is updated ⇒ } } } // Delete all controls in the row tdsToDelete foreach (deleteCellContent(_)) // Delete row and its content delete(tr) // Update templates updateTemplatesCheckContainers(doc, findAncestorRepeatNames(tr).to[Set]) // Adjust selected td if needed newTdToSelect foreach selectTd debugDumpDocumentForGrids("delete row", tr) } // Whether this is the last grid in the section // NOTE: Use this until we implement the new selection system allowing moving stuff around freely def isLastGridInSection(grid: NodeInfo) = childrenGrids(findAncestorContainers(grid).head).size == 1 private def tdAtColPos(gridId: String, colPos: Int): NodeInfo = { val grid = containerById(gridId) val firstRow = (grid \\ "*:tr").head (firstRow \\ "*:td")(colPos) } def maxGridColumns = Properties.instance.getPropertySet.getInteger("oxf.fb.grid.max-columns", 4) // Insert a column to the right def insertColRight(gridId: String, colPos: Int): Unit = insertColRight(tdAtColPos(gridId, colPos)) def insertColRight(firstRowTd: NodeInfo): Unit = { val grid = getContainingGrid(firstRowTd) if (getGridSize(grid) < maxGridColumns) { val allRowCells = getAllRowCells(grid) val pos = firstRowTd precedingSibling "*:td" size val ids = nextIds(grid, "tmp", allRowCells.size).toIterator allRowCells foreach { cells ⇒ val cell = cells(pos) // For now insert same rowspans as previous column, but could also insert full column as an option if (! cell.missing) { insert(into = cell.td parent *, after = cell.td, origin = newTdElement(grid, ids.next(), if (cell.rowspan > 1) Some(cell.rowspan) else None)) } } debugDumpDocumentForGrids("insert col right", grid) } } // Insert a column to the left def insertColLeft(gridId: String, colPos: Int): Unit = insertColLeft(tdAtColPos(gridId, colPos)) def insertColLeft(firstRowTd: NodeInfo): Unit = { val grid = getContainingGrid(firstRowTd) if (getGridSize(grid) < maxGridColumns) { val pos = firstRowTd precedingSibling "*:td" size if (pos > 0) { // Do as if this was an insert to the right of the previous column // This makes things simpler as we can reuse insertColRight, but maybe another logic could make sense too insertColRight(firstRowTd precedingSibling "*:td" head) } else { // First column: just insert plain tds as the first row val trs = grid \\ "*:tr" val ids = nextIds(grid, "tmp", trs.size).toIterator trs foreach { tr ⇒ insert(into = tr, origin = newTdElement(grid, ids.next())) } debugDumpDocumentForGrids("insert col left", grid) } } } // Find a column's tds def getColTds(td: NodeInfo) = { val rows = getContainingGrid(td) \\ "*:tr" val (x, _) = tdCoordinates(td) rows map (row ⇒ (row \\ "*:td")(x)) } // Insert a column and contained controls def deleteCol(gridId: String, colPos: Int): Unit = deleteCol(tdAtColPos(gridId, colPos)) def deleteCol(firstRowTd: NodeInfo): Unit = { val doc = firstRowTd.getDocumentRoot val grid = getContainingGrid(firstRowTd) val allRowCells = getAllRowCells(grid) val pos = firstRowTd precedingSibling "*:td" size // Find all tds to delete val tdsToDelete = allRowCells map (_(pos)) filterNot (_.missing) map (_.td) // Find the new td to select if we are removing the currently selected td val newTdToSelect = findNewTdToSelect(firstRowTd, tdsToDelete) // Delete the concrete td at this column position in each row tdsToDelete foreach { td ⇒ deleteCellContent(td) delete(td) } // Update templates updateTemplatesCheckContainers(doc, findAncestorRepeatNames(firstRowTd).to[Set]) // Adjust selected td if needed newTdToSelect foreach selectTd debugDumpDocumentForGrids("delete col", grid) } def controlsInCol(gridId: String, colPos: Int): Int = controlsInCol(tdAtColPos(gridId, colPos)) def controlsInCol(firstRowTd: NodeInfo): Int = { val grid = getContainingGrid(firstRowTd) val allRowCells = getAllRowCells(grid) val (x, _) = tdCoordinates(firstRowTd: NodeInfo, allRowCells: Seq[Seq[Cell]]) allRowCells map (_(x)) filterNot (_.missing) count (cell ⇒ hasChildren(cell.td)) } def controlsInRow(gridId: String, rowPos: Int): Int = { val row = trAtRowPos(gridId, rowPos) (row \\ "*:td" \\ *).length } private def selectedCellVar = asNodeInfo(topLevelModel("fr-form-model").get.getVariable("selected-cell")) // Find the currently selected grid td if any def findSelectedTd(inDoc: NodeInfo) = findInViewTryIndex(inDoc, selectedCellVar.stringValue) // Make the given grid td selected def selectTd(newTd: NodeInfo): Unit = setvalue(selectedCellVar, newTd \\@ "id" stringValue) // Whether a call to ensureEmptyTd() will succeed def willEnsureEmptyTdSucceed(inDoc: NodeInfo): Boolean = findSelectedTd(inDoc) match { case Some(currentTd) ⇒ if (currentTd \\ * nonEmpty) currentTd followingSibling "*:td" match { case Seq(followingTd, _*) if followingTd \\ * nonEmpty ⇒ false case _ ⇒ true } else true case None ⇒ false } // Try to ensure that there is an empty td after the current location, inserting a new row if possible def ensureEmptyTd(inDoc: NodeInfo): Option[NodeInfo] = { findSelectedTd(inDoc) flatMap { currentTd ⇒ if (currentTd \\ * nonEmpty) { // There is an element in the current td, figure out what to do currentTd followingSibling "*:td" match { case Seq(followingTd, _*) if followingTd \\ * isEmpty ⇒ // Next td exists is empty: move to that one selectTd(followingTd) Some(followingTd) case Seq(followingTd, _*) ⇒ // Next td exists but is not empty: NOP for now None case _ ⇒ // We are the last cell of the row val nextTr = currentTd.getParent followingSibling "*:tr" take 1 val nextTrFirstTd = nextTr \\ "*:td" take 1 val newTd = if (nextTr.isEmpty || (nextTrFirstTd \\ *).nonEmpty) // The first cell of the next row is occupied, or there is no next row: insert new row insertRowBelow(currentTd.getParent) \\ "*:td" head else // There is a next row, and its first cell is empty: move to that one nextTrFirstTd.head selectTd(newTd) Some(newTd) } } else Some(currentTd) } } // @mi/@max can be simple AVTs, i.e. AVTs which cover the whole attribute, e.g. "{my-min}" // The main reason to do this instead of making @min/@max plain XPath expressions is that @max also supports the // literal "none" (and "unbounded" for backward compatibility). // NOTE: This doesn't check that the expression is syntactically correct, in particular it doesn't check that // curly brackets are absent or escaped within the AVT. private val SimpleAVTRegex = """^\\{(.+)\\}$""".r private def trimSimpleAVT(s: String) = s match { case SimpleAVTRegex(v) ⇒ v.replaceAllLiterally("{{", "{").replaceAllLiterally("}}", "}") case v ⇒ v } // NOTE: Value can be a simple AVT def getNormalizedMin(doc: NodeInfo, gridName: String) = findControlByName(doc, gridName) flatMap (_ attValueOpt "min") map trimSimpleAVT getOrElse "0" private val NoMaximum = Set("none", "unbounded") // NOTE: Value can be a simple AVT def getNormalizedMax(doc: NodeInfo, gridName: String) = findControlByName(doc, gridName) flatMap (_ attValueOpt "max") filterNot NoMaximum map trimSimpleAVT // XForms callers: get the grid's normalized max attribute, the empty sequence if no maximum def getNormalizedMaxOrEmpty(doc: NodeInfo, gridName: String) = getNormalizedMax(doc, gridName).orNull // Convert a min/max value to a value suitable to be written to the @min/@max attributes. // // - blank value → None // - non-positive integer value → None // - positive integer value → Some(int: String) // - any other value → Some("{expression}") def minMaxForAttribute(s: String) = s.trimAllToOpt flatMap { value ⇒ try { val int = value.toInt int > 0 option int.toString } catch { case _: NumberFormatException ⇒ val escaped = value.replaceAllLiterally("{", "{{").replaceAllLiterally("}", "}}") Some(s"{$escaped}") } } // Get the x/y position of a td given Cell information private def tdCoordinates(td: NodeInfo, cells: Seq[Seq[Cell]]): (Int, Int) = { // Search rows first, then cols // Another solution would be to store the position directly into Cell val y = td parent * precedingSibling "*:tr" size val x = cells(y) indexWhere (_.td == td) (x, y) } // Get the x/y position of a td given Cell information def tdCoordinates(td: NodeInfo): (Int, Int) = tdCoordinates(td, getAllRowCells(getContainingGrid(td))) // Whether there will be controls to delete if the cell is expanded def expandCellTouchesControl(td: NodeInfo): Boolean = { val allRowCells = getAllRowCells(getContainingGrid(td)) val (x, y) = tdCoordinates(td, allRowCells) val cell = allRowCells(y)(x) hasChildren(allRowCells(y + cell.rowspan)(x).td) } // Vertically expand the given cell def expandCell(td: NodeInfo): Unit = { val allRowCells = getAllRowCells(getContainingGrid(td)) val (x, y) = tdCoordinates(td, allRowCells) val cell = allRowCells(y)(x) val cellBelow = allRowCells(y + cell.rowspan)(x) // Increment rowspan cell.originalRowspan += cellBelow.originalRowspan // Delete cell below delete(cellBelow.td) } // Vertically shrink the given cell def shrinkCell(td: NodeInfo): Unit = { val grid = getContainingGrid(td) val allRowCells = getAllRowCells(grid) val (x, y) = tdCoordinates(td, allRowCells) val cell = allRowCells(y)(x) // Decrement rowspan attribute cell.originalRowspan -= 1 // Insert new td val posyToInsertInto = y + cell.rowspan - 1 val rowBelow = allRowCells(posyToInsertInto) val trToInsertInto = grid \\ "*:tr" apply posyToInsertInto val tdToInsertAfter = rowBelow.slice(0, x).reverse find (! _.missing) map (_.td) toSeq insert(into = trToInsertInto, after = tdToInsertAfter, origin = newTdElement(grid, nextId(grid, "tmp"))) } def initializeGrids(doc: NodeInfo): Unit = { // 1. Annotate all the grid tds of the given document with unique ids, if they don't have them already // We do this so that ids are stable as we move things around, otherwise if the XForms document is recreated // new automatic ids are generated for objects without id. def annotate(token: String, elements: Seq[NodeInfo]) = { // Get as many fresh ids as there are tds val ids = nextIds(doc, token, elements.size).toIterator // Add the missing ids elements foreach (ensureAttribute(_, "id", ids.next())) } // All grids and grid tds with no existing id val bodyElement = findFRBodyElement(doc) annotate("tmp", bodyElement \\\\ "*:grid" \\\\ "*:td" filterNot hasId) annotate("tmp", bodyElement \\\\ "*:grid" filterNot hasId) // 2. Select the first td if any bodyElement \\\\ "*:grid" \\\\ "*:td" take 1 foreach selectTd } def canDeleteGrid(grid: NodeInfo): Boolean = canDeleteContainer(grid) def deleteGridById(gridId: String) = deleteContainerById(canDeleteGrid, gridId) def canDeleteRow(grid: NodeInfo): Boolean = (grid \\ "*:tr").length > 1 def canDeleteCol(grid: NodeInfo): Boolean = ((grid \\ "*:tr").head \\ "*:td").length > 1 private val DeleteTests = List( "grid" → canDeleteGrid _, "row" → canDeleteRow _, "col" → canDeleteCol _ ) // Return all classes that need to be added to an editable grid def gridCanDoClasses(gridId: String): Seq[String] = { val grid = containerById(gridId) val deleteClasses = DeleteTests collect { case (what, test) if test(grid) ⇒ "fb-can-delete-" + what } val insertClasses = getGridSize(grid) < maxGridColumns list "fb-can-add-col" "fr-editable" :: deleteClasses ::: insertClasses } // Find the new td to select if we are removing the currently selected td def findNewTdToSelect(inDoc: NodeInfo, tdsToDelete: Seq[NodeInfo]) = findSelectedTd(inDoc) match { case Some(selectedTd) if tdsToDelete contains selectedTd ⇒ // Prefer trying following before preceding, as things move up and left when deleting // NOTE: Could improve this by favoring things "at same level", e.g. stay in grid if possible, then // stay in section, etc. (followingTd(selectedTd) filterNot (tdsToDelete contains _) headOption) orElse (precedingTds(selectedTd) filterNot (tdsToDelete contains _) headOption) case _ ⇒ None } // Return a td's preceding tds in the hierarchy of containers def precedingTds(td: NodeInfo) = { val preceding = td preceding "*:td" preceding intersect (findAncestorContainers(td).last descendant "*:td") } // Return a td's following tds in the hierarchy of containers def followingTd(td: NodeInfo) = { val following = td following "*:td" following intersect (findAncestorContainers(td).last descendant "*:td") } }
joansmith/orbeon-forms
src/main/scala/org/orbeon/oxf/fb/GridOps.scala
Scala
lgpl-2.1
20,810
object Test extends App { trait A { def f[T[_]](x : T[Int]) : T[Any] } class B extends A { def f[T[+_]](x : T[Int]) : T[Any] = x } class P[Y](var y : Y) val p = new P(1) val palias = (new B():A).f[P](p) palias.y = "hello" val z: Int = p.y }
AlexSikia/dotty
tests/untried/neg/t2066b.scala
Scala
bsd-3-clause
258
package houseprices.csv import houseprices.postcodes.ClasspathSource import houseprices.postcodes.Postcode import houseprices.postcodes.PostcodeRepo import houseprices.PricePaid import houseprices.Address import houseprices.postcodes.FileSource import org.slf4j.LoggerFactory class PricePaidCsvProcessor(val csvInputFile: String) { val log = LoggerFactory.getLogger(getClass) lazy val postcodeRepo = PostcodeRepo lazy val csv = FileSource(csvInputFile, "iso-8859-1").getLines() var currentLineNumber = 0 def foreach(rowProcessor: PricePaid => Unit): Unit = { for (row <- csv) processRow(row, rowProcessor) } private def processRow(rowString: String, pricePaidProcessor: PricePaid => Unit) = { try { currentLineNumber = currentLineNumber + 1 val cols = rowString.split(",").map(_.replaceAll("\\"", "")) val id = cols(0) val price = cols(1).toInt val date = cols(2) val postcode = Postcode(cols(3)) val primary = cols(7) val secondary = cols(8) val street = cols(9) val locality = cols(10) val town = cols(11) val district = cols(12) val county = cols(13) val location = postcodeRepo.latLon(postcode) pricePaidProcessor(PricePaid(id, price, date, Address(postcode.value, primary, secondary, street, locality, town, district, county, location))) } catch { case e: Exception => { log.info("lineNumber = {}", currentLineNumber) log.error(e.getMessage) } } } }
ayubmalik/houseprices
houseprices-core/src/main/scala/houseprices/csv/PricePaidCsvProcessor.scala
Scala
mit
1,519
object Test extends dotty.runtime.LegacyApp { val f = () => 5 def test(g: => Int): Unit = { val gFunc = g _ val isSameClosureClass = gFunc.getClass == f.getClass val isSame = gFunc eq f println("Is same closure class: "+isSameClosureClass+" is same closure: "+isSame) } test(f()) }
folone/dotty
tests/pending/run/t1247.scala
Scala
bsd-3-clause
307
/* * Copyright 2017 FOLIO Co., Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.folio_sec.reladomo.scala_api import com.gs.fw.common.mithra.{ MithraManager, MithraManagerProvider, TransactionalCommand } object TransactionProvider extends TransactionProvider { override def mithraManager: MithraManager = MithraManagerProvider.getMithraManager } /** * Utility which provides a Reladomo transaction. */ trait TransactionProvider { def mithraManager: MithraManager def withTransaction[A](action: Transaction => A): A = { // Keep this as-is for Scala 2.11 val command = new TransactionalCommand[A] { override def executeTransaction(tx: Transaction): A = action.apply(tx) } // MithraManager internally depends on the ThreadLocal<MithraTransaction> object for the current thread. mithraManager.executeTransactionalCommand(command) } }
folio-sec/reladomo-scala
reladomo-scala-common/src/main/scala/com/folio_sec/reladomo/scala_api/TransactionProvider.scala
Scala
apache-2.0
1,407
package mesosphere.marathon.tasks import com.codahale.metrics.MetricRegistry import mesosphere.marathon.MarathonSpec import mesosphere.marathon.Protos.Constraint import mesosphere.marathon.state.AppDefinition import mesosphere.marathon.state.PathId.StringPathId import mesosphere.marathon.tasks.TaskQueue.QueuedTask import scala.collection.immutable.Seq import scala.concurrent.duration.Deadline class TaskQueueTest extends MarathonSpec { val app1 = AppDefinition(id = "app1".toPath, constraints = Set.empty) val app2 = AppDefinition(id = "app2".toPath, constraints = Set(buildConstraint("hostname", "UNIQUE"), buildConstraint("rack_id", "CLUSTER", "rack-1"))) val app3 = AppDefinition(id = "app3".toPath, constraints = Set(buildConstraint("hostname", "UNIQUE"))) var queue: TaskQueue = null before { val metricRegistry = new MetricRegistry queue = new TaskQueue() } def buildConstraint(field: String, operator: String, value: String = ""): Constraint = { Constraint.newBuilder() .setField(field) .setOperator(Constraint.Operator.valueOf(operator)) .setValue(value) .build() } test("Priority") { queue.add(app1) queue.add(app2) queue.add(app3) assert(app2 == queue.poll().get.app, s"Should return $app2") assert(app3 == queue.poll().get.app, s"Should return $app3") assert(app1 == queue.poll().get.app, s"Should return $app1") } test("Retain") { queue.add(app1) queue.add(app2) queue.add(app3) assert(queue.list.size == 3, "Queue should contain 3 elements.") queue.retain { case QueuedTask(app, _) => app.id == app2.id } assert(queue.list.size == 1, "Queue should contain 1 elements.") } test("RemoveAll") { queue.add(app1) queue.add(app2) queue.add(app3) val res = queue.removeAll().map(_.app) assert(Vector(app2, app3, app1) == res, s"Should return all elements in correct order.") assert(queue.queue.isEmpty, "TaskQueue should be empty.") } test("AddAll") { val queue = new TaskQueue queue.addAll(Seq( QueuedTask(app1, Deadline.now), QueuedTask(app2, Deadline.now), QueuedTask(app3, Deadline.now) )) assert(queue.list.size == 3, "Queue should contain 3 elements.") assert(queue.count(app1) == 1, s"Queue should contain $app1.") assert(queue.count(app2) == 1, s"Queue should contain $app2.") assert(queue.count(app3) == 1, s"Queue should contain $app3.") } }
sttts/marathon
src/test/scala/mesosphere/marathon/tasks/TaskQueueTest.scala
Scala
apache-2.0
2,463
package scadla.utils.thread import scala.language.postfixOps import squants.space.LengthConversions._ /* Radius of ISO metric sizes. _x_y stands of x.y mm. */ object ISO { val M1 = (1 mm) / 2.0 val M1_2 = (1.2 mm) / 2.0 val M1_6 = (1.6 mm) / 2.0 val M2 = (2 mm) / 2.0 val M2_5 = (2.5 mm) / 2.0 val M3 = (3 mm) / 2.0 val M4 = (4 mm) / 2.0 val M5 = (5 mm) / 2.0 val M6 = (6 mm) / 2.0 val M8 = (8 mm) / 2.0 val M10 = (10 mm) / 2.0 val M12 = (12 mm) / 2.0 val M16 = (16 mm) / 2.0 val M20 = (20 mm) / 2.0 val M24 = (24 mm) / 2.0 val M30 = (30 mm) / 2.0 val M36 = (36 mm) / 2.0 val M42 = (42 mm) / 2.0 val M48 = (48 mm) / 2.0 val M56 = (56 mm) / 2.0 val M64 = (64 mm) / 2.0 }
dzufferey/scadla
src/main/scala/scadla/utils/thread/ISO.scala
Scala
apache-2.0
764
package scalax.collection.constrained /** * Predefined constraints that may be passed to constrained `Graph`s. * * @author Peter Empen */ package object constraints { }
opyate/scala-graph
constrained/src/main/scala/scalax/collection/constrained/constraints/package.scala
Scala
bsd-3-clause
174
package com.temportalist.morphadditions.common import com.temportalist.origin.api.common.proxy.IProxy import net.minecraft.entity.player.EntityPlayer import net.minecraft.tileentity.TileEntity import net.minecraft.world.World /** * * * @author TheTemportalist */ class ProxyCommon extends IProxy { override def register(): Unit = {} override def getClientElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int, z: Int, tileEntity: TileEntity): AnyRef = null override def getServerElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int, z: Int, tileEntity: TileEntity): AnyRef = null def tickPlayer(player: MorphedPlayer): Unit = { player.tick() //if (player.getCoolDown < 0) player.syncCoolDown() } }
TheTemportalist/MorphAdditions
src/main/scala/com/temportalist/morphadditions/common/ProxyCommon.scala
Scala
apache-2.0
753
/** * Copyright (C) 2016 Nicola Justus <[email protected]> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package de.thm.move.util import javafx.beans.property.ObjectProperty import javafx.event.{Event, EventHandler} import javafx.scene.Node import javafx.scene.control.{ChoiceBox, Label, MenuItem} import javafx.scene.input.{KeyCode, KeyEvent} import de.thm.move.implicits.FxHandlerImplicits._ import de.thm.move.views.anchors.Anchor import de.thm.move.views.shapes.{MovableShape, ResizableShape} /** General utils for working with JavaFx. */ object JFxUtils { /** Adds the given listener to the selectionProperty of the given ChoiceBox. * The eventHandler only gets the new value and discards the old value. */ def onChoiceboxChanged[A](box:ChoiceBox[A])(eventHandler: A => Unit): Unit = { box.getSelectionModel. selectedItemProperty.addListener { (_:A, newA:A) => eventHandler(newA) } } def binAnchorsLayoutToNodeLayout(node:Node)(anchors:Anchor*): Unit = { anchors.foreach { anchor => anchor.layoutXProperty().bind(node.layoutXProperty()) anchor.layoutYProperty().bind(node.layoutYProperty()) } } /** Copies the value of property2 into property1 */ def copyProperty[A](property1:ObjectProperty[A], property2:ObjectProperty[A]): Unit = property1.set(property2.get) /** Checks if the parent of given Node n is a MovableShape and * if it is the given function fn is called with the parent. If the parent * isn't a MovableShape the function fn is called with the given Node n. */ def withParentMovableElement[A](n:Node with MovableShape)(fn: MovableShape => A):A = (n, n.getParent) match { case (_,ms:MovableShape) => fn(ms) case (ms:MovableShape,_) => fn(ms) } def withResizableElement[A](n:Node)(fn: ResizableShape => A):A = (n, n.getParent) match { case (_,ms:ResizableShape) => fn(ms) case (ms:ResizableShape,_) => fn(ms) case _ => throw new IllegalArgumentException(s"that's not a resizableShape: $n") } /** Creates an EventHandler, calls inside handle() the given function fn and consumes the event afterwards. */ def withConsumedEvent[A <: Event](fn: A => Unit): EventHandler[A] = new EventHandler[A]() { override def handle(event: A): Unit = { fn(event) event.consume() } } /** Creates a new EventHandler[A] in which fn get's executed if predicate returns true. */ def filteredEventHandler[A <: Event](predicate: A => Boolean)(fn: => Unit): EventHandler[A] = new EventHandler[A]() { override def handle(a:A):Unit = if(predicate(a)) fn } /** A predicate for filtering a KeyEvent-Stream by the pressed KeyCode. */ val byKeyCode: KeyCode => KeyEvent => Boolean = code => kv => kv.getCode == code def addFontIcon(elem:MenuItem, iconIdent:String): MenuItem = { val lbl = new Label(iconIdent) lbl.getStyleClass().add("toolbar-button") elem.setGraphic(lbl) elem } }
THM-MoTE/MoVE
src/main/scala/de/thm/move/util/JFxUtils.scala
Scala
mpl-2.0
3,141
package com.landoop.streamreactor.connect.hive.source.config import java.util.Collections import cats.data.NonEmptyList import com.landoop.streamreactor.connect.hive.{DatabaseName, HadoopConfiguration, TableName, Topic} import com.landoop.streamreactor.connect.hive.kerberos.Kerberos import scala.collection.JavaConverters._ case class ProjectionField(name: String, alias: String) case class HiveSourceConfig(dbName: DatabaseName, kerberos: Option[Kerberos], hadoopConfiguration: HadoopConfiguration, tableOptions: Set[SourceTableOptions] = Set.empty, pollSize: Int = 1024) case class SourceTableOptions( tableName: TableName, topic: Topic, projection: Option[NonEmptyList[ProjectionField]] = None, limit: Int = Int.MaxValue ) object HiveSourceConfig { def fromProps(props: Map[String, String]): HiveSourceConfig = { val config = HiveSourceConfigDefBuilder(props.asJava) val tables = config.getKCQL.map { kcql => val fields = Option(kcql.getFields) .getOrElse(Collections.emptyList) .asScala .toList .map { field => ProjectionField(field.getName, field.getAlias) } val projection = fields match { case Nil => None case ProjectionField("*", "*") :: Nil => None case _ => NonEmptyList.fromList(fields) } SourceTableOptions( TableName(kcql.getSource), Topic(kcql.getTarget), projection, limit = if (kcql.getLimit < 1) Int.MaxValue else kcql.getLimit ) } HiveSourceConfig( dbName = DatabaseName(props(HiveSourceConfigConstants.DatabaseNameKey)), tableOptions = tables, kerberos = Kerberos.from(config, HiveSourceConfigConstants), hadoopConfiguration = HadoopConfiguration.from(config, HiveSourceConfigConstants), pollSize = props .getOrElse(HiveSourceConfigConstants.PollSizeKey, 1024) .toString .toInt ) } }
datamountaineer/stream-reactor
kafka-connect-hive-1.1/src/main/scala/com/landoop/streamreactor/connect/hive/source/config/HiveSourceConfig.scala
Scala
apache-2.0
2,119
/* * Copyright (c) 2015, Michael Lewis * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.waioeka.sbt import com.waioeka.sbt.runner.CucumberRunner import cucumber.api.scala.{ScalaDsl, EN} import org.scalatest.Matchers class CucumberTestSuite extends CucumberRunner /** * MultiplicationSteps * */ class MultiplicationSteps extends ScalaDsl with EN with Matchers { var x : Int = 0 var y : Int = 0 var z : Int = 0 Given("""^a variable x with value (\d+)$""") { (arg0: Int) => x = arg0 } Given("""^a variable y with value (\d+)$""") { (arg0: Int) => y = arg0 } When("""^I multiply x \* y$""") { () => z = x * y } Then("""^I get (\d+)$""") { (arg0: Int) => z should be (arg0) } }
rrramiro/cucumber
cucumber-runner/src/test/scala/com/waioeka/sbt/MultiplicationSteps.scala
Scala
bsd-2-clause
2,007
/* * Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.wegtam.tensei.agent.helpers import java.time.{ LocalDate, LocalDateTime, LocalTime, OffsetDateTime } import argonaut._ import Argonaut._ /** * Contains codecs for argonaut to handle several classes from `java.time`. */ object ArgonautJavaTime { /** * A codec for decoding and encoding [[java.time.LocalDate]] instances. * * @return An argonaut json codec. */ implicit def LocalDateCodec: CodecJson[LocalDate] = CodecJson( (t: LocalDate) => jString(t.toString), c => for { t <- c.as[String] } yield LocalDate.parse(t) ) /** * A codec for decoding and encoding [[java.time.OffsetDateTime]] instances. * * @return An argonaut json codec. */ implicit def OffsetDateTimeCodec: CodecJson[OffsetDateTime] = CodecJson( (t: OffsetDateTime) => jString(t.toString), c => for { t <- c.as[String] } yield OffsetDateTime.parse(t) ) /** * A codec for decoding and encoding [[java.time.LocalDateTime]] instances. * * @return An argonaut json codec. */ implicit def LocalDateTimeCodec: CodecJson[LocalDateTime] = CodecJson( (t: LocalDateTime) => jString(t.toString), c => for { t <- c.as[String] } yield LocalDateTime.parse(t) ) /** * A codec for decoding and encoding [[java.time.LocalTime]] instances. * * @return An argonaut json codec. */ implicit def LocalTimeCodec: CodecJson[LocalTime] = CodecJson( (t: LocalTime) => jString(t.toString), c => for { t <- c.as[String] } yield LocalTime.parse(t) ) }
Tensei-Data/tensei-agent
src/main/scala/com/wegtam/tensei/agent/helpers/ArgonautJavaTime.scala
Scala
agpl-3.0
2,365
package jumpmicro.jmsangriagraphql.impl.startup import akka.actor.{Props} import jumpmicro.jmsangriagraphql.impl.actor.StartWebServerActor import jumpmicro.shared.util.boilerplate.StartupAkkaActorsBoilerplate import org.log4s.getLogger //: ------------------------------------------------------------------------------------- //: Copyright © 2017 Philip Andrew https://github.com/PhilAndrew All Rights Reserved. //: Released under the MIT License, refer to the project website for licence information. //: ------------------------------------------------------------------------------------- import acyclic.skipped class StartupAkkaActors extends StartupAkkaActorsBoilerplate { private[this] val logger = getLogger // Add your Akka Actors here and they will start when this OSGi component loads def akkaActors = Seq(Props[StartWebServerActor]) }
PhilAndrew/JumpMicro
JMSangriaGraphql/src/main/scala/jumpmicro/jmsangriagraphql/impl/startup/StartupAkkaActors.scala
Scala
mit
861
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.util import org.apache.hadoop.fs.{ FileSystem, Path } import org.apache.parquet.avro.AvroParquetReader import org.apache.avro.generic.IndexedRecord import org.apache.spark.SparkContext import org.bdgenomics.utils.misc.HadoopUtil class ParquetFileTraversable[T <: IndexedRecord](sc: SparkContext, file: Path) extends Traversable[T] { def this(sc: SparkContext, file: String) = this(sc, new Path(file)) private val fs = file.getFileSystem(sc.hadoopConfiguration) val paths: List[Path] = { if (!fs.exists(file)) { throw new IllegalArgumentException("The path %s does not exist".format(file)) } val status = fs.getFileStatus(file) var paths = List[Path]() if (HadoopUtil.isDirectory(status)) { val files = fs.listStatus(file) files.foreach { file => if (file.getPath.getName.contains("part")) { paths ::= file.getPath } } } else if (fs.isFile(file)) { paths ::= file } else { throw new IllegalArgumentException("The path '%s' is neither file nor directory".format(file)) } paths } override def foreach[U](f: (T) => U) { for (path <- paths) { val parquetReader = new AvroParquetReader[T](path) var record = null.asInstanceOf[T] do { record = parquetReader.read() if (record != null.asInstanceOf[T]) { f(record) } } while (record != null.asInstanceOf[T]) parquetReader.close() } } }
tdanford/adam
adam-core/src/main/scala/org/bdgenomics/adam/util/ParquetFileTraversable.scala
Scala
apache-2.0
2,301
package be.cmpg.cancer import scopt.OptionParser import java.io.File import scala.collection.mutable.HashMap object MutualExclusivityPrepareInput extends App { case class InputConfig( maf: Option[File] = None, expression: Map[String, File] = Map(), seedGenesMutations: Int = 1, acceptedCorrelationQVal: Double = 0.05, acceptedGisticThreshold: Int = 2, maxQtyMutations: Int = 500, maxQtyCopyNumber: Int = 500, output: String = "SSAME_input") val helper = new CancerHelper val parser = new scopt.OptionParser[InputConfig]("SSA.ME.") { opt[String]('o', "outputPrefix") action { (x, c) => c.copy(output = x) } text ("The name to be used in the output files (XXX.m2 and XXX.glst).") opt[Int]('s', "seedGenesMutations") action { (x, c) => c.copy(seedGenesMutations = x) } text ("The number of mutated samples required in a gene to be included (default: 1)") opt[Int]("maxQtyMutations") action { (x, c) => c.copy(maxQtyMutations = x) } text ("Maximum number of mutations for a sample. If more, the sample is removed. (default: 500)") opt[Int]("maxQtyCopyNumber") action { (x, c) => c.copy(maxQtyCopyNumber = x) } text ("Maximum number of copy number variations for a sample. If more, the sample is removed. (default: 500)") opt[File]('m', "maf") action { (x, c) => c.copy(maf = Some(x)) } text ("Mutation .maf file") opt[Map[String, File]]('e', "expression") action { (x, c) => c.copy(expression = x) } text ("expression file and GISTIC files (... -e corr=<file1>,gistic=<file2> OR" + " -e gistic=<file1>,exp=<file2>,cnv_thresholds=<file3> ...)." + " gistic folder should be a GISTIC output folder containing the files: all_thresholded.by_genes.txt, amp_genes.conf_99.txt and del_genes.conf_99.txt. " + " Correlation file should be tab delimited file (gene, corr, p-value, q-value).") opt[Double]("acceptedCorrelationQVal") action { (x, c) => c.copy(acceptedCorrelationQVal = x) } text ("q-value of the correlation to take into account (default: 0.05)") opt[Double]("acceptedGisticThreshold") action { (x, c) => c.copy(acceptedCorrelationQVal = x) } text ("Absolute value threshold to accept a gistic call using all_thresholded.by_genes.txt file (default: 2)") } parser.parse(args, InputConfig()) match { case Some(config) => val genePatientMatrix = { //val genePatientMatrix = new HashMap[PolimorphismKey, Polimorphism] val mutationMatrix = if (config.maf.isDefined) { println("Loading mutation file...") helper.loadMaf(config.maf.get, maxQtyMutations = config.maxQtyMutations) } else { Map[PolimorphismKey, Polimorphism]() } val copyNumberMatrix = if (config.expression.size >= 2) { println("Loading expression...") helper.loadExpression(config.expression, config.acceptedCorrelationQVal, config.acceptedGisticThreshold, config.maxQtyCopyNumber) } else { Map[PolimorphismKey, Polimorphism]() } copyNumberMatrix ++ mutationMatrix //genePatientMatrix.toMap } helper.printMutationMatrixFiles(config.output, config.seedGenesMutations, genePatientMatrix) println("Mutation matrix file printed: "+config.output) case None => parser.showUsageAsError // arguments are bad, error message will have been displayed } }
spulido99/SSA
src/main/scala/be/cmpg/cancer/MutualExclusivityPrepareInput.scala
Scala
gpl-2.0
3,560
/* Copyright 2009-2016 EPFL, Lausanne */ package leon package genc package ir import PrimitiveTypes._ /* * Collection of operators for IR with their precedence from the Scala language perspective. */ private[genc] object Operators { // NOTE It is possible to have more than one "From" or several "To", but this is not expected! // (It will compile but ungracefully do not what is expected...) // // NOTE It is also expected that ToIntegral is combined with FromIntegral. // // NOTE The subset of operators supported here has luckily the same precedence // rules in Scala/Java and C. We base the numbering here on the C one: // http://en.cppreference.com/w/c/language/operator_precedence#Literals sealed trait Operator { this: From with To => val symbol: String val precedence: Int override def toString = symbol } sealed trait From trait FromIntegral extends From trait FromLogical extends From trait FromPairOfT extends From // twice the same argument type, includes both FromIntegral and FromLogical sealed trait To trait ToIntegral extends To trait ToLogical extends To trait Integral extends FromIntegral with ToIntegral trait Logical extends FromLogical with ToLogical trait Ordered extends FromIntegral with ToLogical abstract class UnaryOperator(val symbol: String, val precedence: Int) extends Operator { this: From with To => } abstract class BinaryOperator(val symbol: String, val precedence: Int) extends Operator { this: From with To => } case object Plus extends BinaryOperator("+", 4) with Integral case object UMinus extends UnaryOperator("-", 2) with Integral case object Minus extends BinaryOperator("-", 4) with Integral case object Times extends BinaryOperator("*", 3) with Integral case object Div extends BinaryOperator("/", 3) with Integral case object Modulo extends BinaryOperator("%", 3) with Integral case object LessThan extends BinaryOperator("<", 6) with Ordered case object LessEquals extends BinaryOperator("<=", 6) with Ordered case object GreaterEquals extends BinaryOperator(">=", 6) with Ordered case object GreaterThan extends BinaryOperator(">", 6) with Ordered case object Equals extends BinaryOperator("==", 7) with FromPairOfT with ToLogical case object NotEquals extends BinaryOperator("!=", 7) with FromPairOfT with ToLogical case object Not extends UnaryOperator("!", 2) with Logical case object And extends BinaryOperator("&&", 11) with Logical case object Or extends BinaryOperator("||", 12) with Logical case object BNot extends UnaryOperator("~", 2) with Integral case object BAnd extends BinaryOperator("&", 8) with Integral // NOTE to avoid warning from compilers, case object BXor extends BinaryOperator("^", 8) with Integral // we make sure to add parenthesis case object BOr extends BinaryOperator("|", 8) with Integral // for those three operators... // ... even though it's safe no to add parenthesis. case object BLeftShift extends BinaryOperator("<<", 5) with Integral case object BRightShift extends BinaryOperator(">>", 5) with Integral }
epfl-lara/leon
src/main/scala/leon/genc/ir/Operators.scala
Scala
gpl-3.0
3,141
package com.datasift.dropwizard.scala.jdbi.tweak import org.scalatest.FlatSpec /** * Tests [[com.datasift.dropwizard.scala.jdbi.tweak.OptionContainerFactory]] */ class OptionContainerFactorySpec extends FlatSpec { val factory = new OptionContainerFactory "OptionContainerFactory for Ints" should "Accepts Options" in { assert(factory.accepts(classOf[Option[Int]])) } it should "not accept Lists" in { assert(!factory.accepts(classOf[List[Int]])) } it should "build a None by default" in { assert(factory.newContainerBuilderFor(classOf[Int]) .build() === None) } it should "Builds a Some of an Int on demand" in { assert(factory.newContainerBuilderFor(classOf[Int]).add(123) .build() === Some(123)) } it should "Builds a Some of the last Int on demand" in { assert(factory.newContainerBuilderFor(classOf[Int]).add(123).add(456) .build() === Some(456)) } "OptionContainerFactory for Strings" should "accept Options" in { assert(factory.accepts(classOf[Option[String]])) } it should "Doesn't accept Lists" in { assert(!factory.accepts(classOf[List[String]])) } it should "Builds a None by default" in { assert(factory.newContainerBuilderFor(classOf[String]) .build() === None) } it should "Builds a Some of a String on demand" in { assert(factory.newContainerBuilderFor(classOf[String]).add("abc") .build() === Some("abc")) } it should "Builds a Some of the last String on demand" in { assert(factory.newContainerBuilderFor(classOf[String]).add("abc").add("def") .build() === Some("def")) } }
datasift/dropwizard-scala
jdbi/src/test/scala/com/datasift/dropwizard/scala/jdbi/tweak/OptionContainerFactorySpec.scala
Scala
apache-2.0
1,622
package core import akka.actor.{ActorSystem, PoisonPill, Props} import akka.cluster.Cluster import akka.contrib.pattern.{ClusterSingletonManager, ClusterSingletonProxy} import akka.pattern.ask import com.typesafe.config.ConfigFactory import core.stress.SimpleClusterListener.IsRemoved import core.stress._ import org.slf4j.LoggerFactory import scala.concurrent.Await import scala.concurrent.duration._ /** * Core is type containing the ``system: ActorSystem`` member. This enables us to use it in our * apps as well as in our tests. */ trait Core { implicit def system: ActorSystem } /** * This trait implements ``Core`` by starting the required ``ActorSystem`` and registering the * termination handler to stop the system when the JVM exits. */ trait BootedCore extends Core { this: App => val log = LoggerFactory.getLogger(getClass) val nodePort: Int = Option(System.getProperty("nodePort")).getOrElse("2551").toInt val conf = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + nodePort) .withFallback(ConfigFactory.load()) .withFallback(ConfigFactory.load("aws.conf")) /** * Construct the ActorSystem we will use in our application */ implicit lazy val system = ActorSystem("ClusterSystem", conf) val listener = system.actorOf(Props(classOf[SimpleClusterListener])) /** * Ensure that the constructed ActorSystem is shut down when the JVM shuts down */ sys.addShutdownHook({ Cluster(system).leave(Cluster(system).selfAddress) blockUntilRemoved(10) system.shutdown() }) implicit val timeout: akka.util.Timeout = 3 seconds import scala.concurrent.ExecutionContext.Implicits.global private def blockUntilRemoved(retriesLeft: Long): Unit = { if (retriesLeft > 0) { val futureResp = listener ? IsRemoved(Cluster(system).selfAddress) val dd = futureResp.map { resp => if (!resp.asInstanceOf[Boolean]) { Thread.sleep(1000) blockUntilRemoved(retriesLeft - 1) } else { log.info("Node removed itself from the cluster!") } } Await.result(dd, atMost = 3 seconds) } } } /** * This trait contains the actors that make up our application; it can be mixed in with * ``BootedCore`` for running code or ``TestKit`` for unit and integration tests. */ trait CoreActors { this: Core => system.actorOf( ClusterSingletonManager.props( singletonProps = Props(classOf[JournaledActor]), singletonName = "writer", terminationMessage = PoisonPill, role = None), "singleton") val writer = system.actorOf( ClusterSingletonProxy.props( singletonPath = "/user/singleton/writer", role = None), name = "writerProxy") val reader = system.actorOf(Props[JournaledView]) val reportCollector = system.actorOf(Props(new ReportCollector)) val tester = system.actorOf(Props(new StressTester(writer, reader, reportCollector))) }
kciesielski/akka-journal-stress
src/main/scala/core/core.scala
Scala
apache-2.0
2,937
/* * Copyright (C) 2017. RandomCoder <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ package uk.co.randomcoding.cucumber.generator import java.io.{File, FileWriter} import scala.xml.{NodeSeq, XML} package object writer { def writeHtml(html: NodeSeq, targetFile: File) = { val writer = new FileWriter(targetFile) writer.write("<!DOCTYPE html>\\n") XML.write(writer, html.head, "UTF-8", false, null) writer.close() } }
randomcoder/gherkin-converter
src/main/scala/uk/co/randomcoding/cucumber/generator/writer/package.scala
Scala
agpl-3.0
1,101
package cilib import scalaz._ import scalaz.Ordering._ import scalaz.std.anyVal._ import scalaz.syntax.equal._ sealed trait Fit { def fold[Z](penalty: Adjusted => Z, valid: Feasible => Z, infeasible: Infeasible => Z): Z = this match { case p @ Adjusted(_,_) => penalty(p) case v @ Feasible(_) => valid(v) case x @ Infeasible(_,_) => infeasible(x) } } final case class Feasible(v: Double) extends Fit final case class Infeasible(v: Double, violations: Int) extends Fit { def adjust(f: Double => Double) = Adjusted(this, f(v)) } final case class Adjusted private[cilib] (original: Infeasible, adjust: Double) extends Fit @annotation.implicitNotFound("No instance of Fitness[${F},${A}] is available in current scope.") trait Fitness[F[_],A] { def fitness(a: F[A]): Maybe[Objective[A]] } abstract class Comparison(val opt: Opt) { def apply[F[_],A](a: F[A], b: F[A])(implicit F: Fitness[F,A]): F[A] } object Comparison { def compare[F[_],A](x: F[A], y: F[A])(implicit F: Fitness[F,A]): Reader[Comparison, F[A]] = Reader { _.apply(x, y) } def dominance(o: Opt) = new Comparison(o) { def apply[F[_],A](a: F[A], b: F[A])(implicit F: Fitness[F,A]) = { def fromOrdering(f1: Objective[A], f2: Objective[A]): F[A] = o.order(f1, f2) match { case LT => b case GT => a case EQ => a // ??? No difference? } val result = for { f1 <- F.fitness(a) f2 <- F.fitness(b) // c1 <- f1.violations // c2 <- f2.violations } yield { val c1l = f1.violations.length val c2l = f2.violations.length // Both feasible (i.e: no constraint violations) compare Fit if (c1l == 0 && c2l == 0) { /*println("no constraints violated") ;*/ fromOrdering(f1, f2) } else if (c1l == 0) a else if (c2l == 0) b else if (c1l < c2l) a else if (c2l < c1l) b else fromOrdering(f1, f2) } result.getOrElse(a) // ??? } } // Dominance is the generalised form of normal quality comparisons, taking constraint violations into account def quality(o: Opt) = dominance(o) def fittest[F[_],A](x: F[A], y: F[A])(implicit F: Fitness[F,A]): Reader[Comparison, Boolean] = Reader(a => scalaz.Maybe.maybeOrder(a.opt.objectiveOrder[A]).order(F.fitness(x), F.fitness(y)) === GT) } sealed trait Opt { def objectiveOrder[A]: Order[Objective[A]] def order[A](x: Objective[A], y: Objective[A]): Ordering = objectiveOrder[A].order(x, y) } final case object Min extends Opt { private val D = Order[Double].reverseOrder private def fitCompare(x: Fit, y: Fit) = (x, y) match { case (Adjusted(Infeasible(_,_), a), Adjusted(Infeasible(_,_), b)) => D.order(a, b) case (Adjusted(Infeasible(_,_), a), Feasible(b)) => D.order(a, b) case (Adjusted(Infeasible(_,_), _), Infeasible(_,_)) => GT case (Feasible(a), Adjusted(Infeasible(_, _), b)) => D.order(a, b) case (Feasible(_), Infeasible(_,_)) => GT case (Feasible(a), Feasible(b)) => { /*println("in feasible") ;*/ D.order(a, b) } case (Infeasible(_,_), Adjusted(_,_)) => LT case (Infeasible(_,_), Feasible(_)) => LT case (Infeasible(a,as), Infeasible(b,bs)) => if (as < bs) LT else if (as > bs) GT else EQ } def objectiveOrder[A] = new Order[Objective[A]] { def order(x: Objective[A], y: Objective[A]) = (x, y) match { case (Single(f1,_), Single(f2,_)) => fitCompare(f1, f2) case (Multi(xs), Multi(ys)) => val z = xs.zip(ys) val x = z.forall { case (a,b) => val r = fitCompare(a.f, b.f) r == LT || r == EQ } val y = z.exists { case (a,b) => fitCompare(a.f, b.f) == LT } if (x && y) LT else if (x) EQ else GT case _ => sys.error("Cannot compare multiple objective against a single objective") } } } final case object Max extends Opt { private val D = Order[Double] protected def fitCompare(x: Fit, y: Fit) = (x, y) match { case (Adjusted(_, a), Adjusted(_, b)) => D.order(a, b) case (Adjusted(_, a), Feasible(b)) => D.order(a, b) case (Adjusted(_,_), Infeasible(_,_)) => GT case (Feasible(a), Adjusted(_, b)) => D.order(a, b) case (Feasible(a), Feasible(b)) => D.order(a, b) case (Feasible(_), Infeasible(_,_)) => GT case (Infeasible(_,_), Adjusted(_,_)) => LT case (Infeasible(_,_), Feasible(_)) => LT case (Infeasible(a, as), Infeasible(b, bs)) => if (as < bs) GT else if (as > bs) LT else EQ } def objectiveOrder[A] = new Order[Objective[A]] { def order(x: Objective[A], y: Objective[A]) = (x, y) match { case (Single(f1, v1), Single(f2, v2)) => fitCompare(f1, f2) case (Multi(xs), Multi(ys)) => val z = xs.zip(ys) val x = z.forall { case (a,b) => val r = fitCompare(a.f, b.f) r == GT || r == EQ } val y = z.exists { case (a,b) => fitCompare(a.f, b.f) == GT } if (x && y) GT else if (x) EQ else LT case _ => sys.error("Cannot compare multiple objective against a single objective") } } }
robgarden/cilib
core/src/main/scala/cilib/Fitness.scala
Scala
gpl-3.0
5,244
/* * Copyright 2011-2017 Chris de Vreeze * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package eu.cdevreeze.yaidom.convert import java.nio.charset.Charset import scala.collection.immutable import eu.cdevreeze.yaidom.core.QName import eu.cdevreeze.yaidom.core.QNameProvider import eu.cdevreeze.yaidom.core.Scope import eu.cdevreeze.yaidom.core.XmlDeclaration import eu.cdevreeze.yaidom.simple.Comment import eu.cdevreeze.yaidom.simple.ConverterToDocument import eu.cdevreeze.yaidom.simple.ConverterToElem import eu.cdevreeze.yaidom.simple.Document import eu.cdevreeze.yaidom.simple.Elem import eu.cdevreeze.yaidom.simple.EntityRef import eu.cdevreeze.yaidom.simple.Node import eu.cdevreeze.yaidom.simple.ProcessingInstruction import eu.cdevreeze.yaidom.simple.Text /** * Converter from Scala XML nodes to yaidom nodes, in particular from `scala.xml.Elem` to [[eu.cdevreeze.yaidom.simple.Elem]] and * from `scala.xml.Document` to [[eu.cdevreeze.yaidom.simple.Document]]. * * This converter is handy when one wants to use XML literals (as offered by standard Scala XML) in combination with yaidom. * * This converter regards the input more like an "Elem" than an "ElemBuilder", in that scopes instead of namespace * declarations are extracted from input "elements", and in that conversions to yaidom Elems do not take any additional parent * scope parameter. On the other hand, Scala XML NamespaceBindings try to be a bit of both yaidom Scopes and yaidom Declarations. * * '''Beware that conversions from Scala XML Elems to yaidom Elems will fail if the Scala XML Elem uses namespaces in element and/or * attribute names that have not been declared!''' * * @author Chris de Vreeze */ trait ScalaXmlToYaidomConversions extends ConverterToDocument[scala.xml.Document] with ConverterToElem[scala.xml.Elem] { /** * Converts an `scala.xml.Document` to a [[eu.cdevreeze.yaidom.simple.Document]]. The resulting yaidom Document has no document URI. * * If the input Scala XML Document is not namespace-valid, an exception will be thrown. */ final def convertToDocument(v: scala.xml.Document): Document = { val xmlVersionOption = v.version val xmlDeclOption = xmlVersionOption map { xmlVersion => XmlDeclaration.fromVersion(xmlVersion). withEncodingOption(v.encoding.map(cs => Charset.forName(cs))). withStandaloneOption(v.standAlone) } Document( uriOption = None, xmlDeclarationOption = xmlDeclOption, children = v.children.toVector flatMap { case e: scala.xml.Elem => Some(convertToElem(v.docElem.asInstanceOf[scala.xml.Elem])) case pi: scala.xml.ProcInstr => Some(convertToProcessingInstruction(pi)) case c: scala.xml.Comment => Some(convertToComment(c)) case _ => None }) } /** * Converts an `scala.xml.Elem` to an [[eu.cdevreeze.yaidom.simple.Elem]]. * * If the input Scala XML Elem is not namespace-valid, an exception will be thrown. * * The result must be the same as `simple.Elem(ScalaXmlElem(v))`. */ final def convertToElem(v: scala.xml.Elem): Elem = { val qname: QName = toQName(v) val attributes: immutable.IndexedSeq[(QName, String)] = extractAttributes(v.attributes) val scope: Scope = extractScope(v.scope) // Recursive (not tail-recursive) val childSeq = v.child.toIndexedSeq flatMap { (n: scala.xml.Node) => convertToNodeOption(n) } new Elem( qname = qname, attributes = attributes, scope = scope, children = childSeq) } /** * Converts an `scala.xml.Node` to an optional [[eu.cdevreeze.yaidom.simple.Node]]. */ final def convertToNodeOption(v: scala.xml.Node): Option[Node] = { v match { case e: scala.xml.Elem => Some(convertToElem(e)) case cdata: scala.xml.PCData => Some(convertToCData(cdata)) case t: scala.xml.Text => Some(convertToText(t)) case at: scala.xml.Atom[_] => // Possibly an evaluated "parameter" in an XML literal Some(Text(text = at.data.toString, isCData = false)) case pi: scala.xml.ProcInstr => Some(convertToProcessingInstruction(pi)) case er: scala.xml.EntityRef => Some(convertToEntityRef(er)) case c: scala.xml.Comment => Some(convertToComment(c)) case _ => None } } /** Converts an `scala.xml.Text` to a [[eu.cdevreeze.yaidom.simple.Text]] */ final def convertToText(v: scala.xml.Text): Text = Text(text = v.data, isCData = false) /** Converts an `scala.xml.PCData` to a [[eu.cdevreeze.yaidom.simple.Text]] */ final def convertToCData(v: scala.xml.PCData): Text = Text(text = v.data, isCData = true) /** Converts an `scala.xml.ProcInstr` to a [[eu.cdevreeze.yaidom.simple.ProcessingInstruction]] */ final def convertToProcessingInstruction(v: scala.xml.ProcInstr): ProcessingInstruction = ProcessingInstruction(v.target, v.proctext) /** Converts an `scala.xml.EntityRef` to a [[eu.cdevreeze.yaidom.simple.EntityRef]] */ final def convertToEntityRef(v: scala.xml.EntityRef): EntityRef = EntityRef(v.entityName) /** Converts an `scala.xml.Comment` to a [[eu.cdevreeze.yaidom.simple.Comment]] */ final def convertToComment(v: scala.xml.Comment): Comment = Comment(v.commentText) /** Converts attributes, given as `scala.xml.MetaData`, to an `immutable.IndexedSeq[(QName, String)]`. */ final def extractAttributes(attrs: scala.xml.MetaData): immutable.IndexedSeq[(QName, String)] = { attrs.toIndexedSeq map { (attr: scala.xml.MetaData) => val attrValue = attr.value val attrValueText = if (attrValue.size >= 1) attrValue(0).text else "" (toQName(attr) -> attrValueText) } } /** * Converts the `scala.xml.NamespaceBinding` to a yaidom `Scope`. * * This implementation is brittle because of bug: SI 6939: Namespace binding (xmlns) is duplicated if a child redefines a prefix. * (see https://issues.scala-lang.org/browse/SI-6939 and https://github.com/scala/scala/pull/1858). Still, this implementation * tries to work around that bug. */ // scalastyle:off null final def extractScope(scope: scala.xml.NamespaceBinding): Scope = { if ((scope eq null) || (scope.uri eq null) || (scope == scala.xml.TopScope)) { Scope.Empty } else { val prefix = if (scope.prefix eq null) "" else scope.prefix // Recursive call (not tail-recursive), and working around the above-mentioned bug val parentScope = extractScope(scope.parent) if (scope.uri.isEmpty) { // Namespace undeclaration (which, looking at the NamespaceBinding API doc, seems not to exist) // Works for the default namespace too (knowing that "edited" prefix is not null but can be empty) parentScope -- Set(prefix) } else { // Works for namespace overrides too parentScope ++ Scope.from(prefix -> scope.uri) } } } /** Extracts the `QName` of an `scala.xml.Elem` */ // scalastyle:off null final def toQName(v: scala.xml.Elem)(implicit qnameProvider: QNameProvider): QName = { if (v.prefix eq null) qnameProvider.getUnprefixedQName(v.label) else qnameProvider.getQName(v.prefix, v.label) } /** Extracts the `QName` of an attribute as `scala.xml.MetaData`. */ final def toQName(v: scala.xml.MetaData)(implicit qnameProvider: QNameProvider): QName = { if (v.isPrefixed) qnameProvider.parseQName(v.prefixedKey) else qnameProvider.parseQName(v.key) } }
dvreeze/yaidom
shared/src/main/scala/eu/cdevreeze/yaidom/convert/ScalaXmlToYaidomConversions.scala
Scala
apache-2.0
8,005
object SCL5737 { object Test extends Enumeration { type Test = Value val Bar, Baz, Qux = Value } class Test2 { val map: Map[Test.Value, Int] = /*start*/Map(Test.Bar -> 1, Test.Baz -> 2, Test.Qux -> 3)/*end*/ } } //Map[SCL5737.Test.Value, Int]
ilinum/intellij-scala
testdata/typeInference/bugs5/SCL5737.scala
Scala
apache-2.0
249
package models import helpers.UnitSpec import org.joda.time.DateTime import org.joda.time.format.DateTimeFormat import scala.language.postfixOps import scala.util.Try import uk.gov.dvla.vehicles.presentation.common.views.models.DayMonthYear class DayMonthYearSpec extends UnitSpec { val validFourDigitYear = 1984 "DayMonthYear" should { "return the correct 'yyyy-MM-dd' date format" in { val dmy = DayMonthYear(1, 1, validFourDigitYear) dmy.`yyyy-MM-dd` shouldEqual validFourDigitYear.toString + "-01-01" } "Format to dd/MM/yyyy of 26-6-validFourDigitYear should give 26/06/validFourDigitYear" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) dmy.`dd/MM/yyyy` shouldEqual "26/06/" + validFourDigitYear.toString } "Format to yyyy-MM-dd of 26-6-validFourDigitYear should give validFourDigitYear-06-26" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) dmy.`yyyy-MM-dd` shouldEqual validFourDigitYear + "-06-26" } "Format to yyyy-MM-dd of empty DayMonthYear should give empty" in { val dmy = DayMonthYear(0, 0, 0) dmy.`yyyy-MM-dd` should equal("") } "Format to yyyy-MM-dd'T'HH:mm:00" in { val dmy = DayMonthYear(30, 5, validFourDigitYear, Some(9), Some(45)) dmy.`yyyy-MM-dd'T'HH:mm:00` should equal(validFourDigitYear + "-05-30T09:45:00") } "format as '23 September, 2013' " in { val dmy = DayMonthYear(23, 9, validFourDigitYear) dmy.`dd month, yyyy` shouldEqual "23 September, " + validFourDigitYear } """accept format "01 September, 2001" """ in { Try(DateTimeFormat.forPattern("dd MMMM, yyyy").parseDateTime("01 September, 2001")).isSuccess should equal(true) } """accept format "01 September 2001" """ in { Try(DateTimeFormat.forPattern("dd MMMM yyyy").parseDateTime("01 September 2001")).isSuccess should equal(true) } """reject format "31 February 2001" """ in { Try(DateTimeFormat.forPattern("dd MMMM yyyy").parseDateTime("31 February 2001")).isFailure should equal(true) } "convert to a valid date time" in { val year = validFourDigitYear val month = 11 val day = 25 val dayMonthYear = DayMonthYear(day = day, month = month, year = year) dayMonthYear.toDateTime.isEmpty should equal(false) // Indicates we get a Some[T] back from the Option[T] dayMonthYear.toDateTime.get should equal(new DateTime(year, month, day, 0, 0)) } "not convert to a valid date time when DayMonthYear contains invalid day" in { val dayMonthYear = DayMonthYear(day = 32, month = 11, year = validFourDigitYear) dayMonthYear.toDateTime.isEmpty should equal(true) // Indicates we get a None back from the Option[T] } "not convert to a valid date time when DayMonthYear contains invalid month" in { val dayMonthYear = DayMonthYear(day = 25, month = 13, year = validFourDigitYear) dayMonthYear.toDateTime.isEmpty should equal(true) } "not convert to a valid date time when DayMonthYear contains invalid year" in { val tooBigYear : Int = org.joda.time.Years.MAX_VALUE.getYears + 1 val dayMonthYear = DayMonthYear(day = 25, month = 11, year = tooBigYear) dayMonthYear.toDateTime.isEmpty should equal(true) } } "compareTo" should { "return less than a date 1 year in the future" in { val present = DayMonthYear(1, 1, validFourDigitYear) val futureDate = DayMonthYear(1, 1, validFourDigitYear + 1) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return less than a date 1 month in the future" in { val present = DayMonthYear(1, 1, validFourDigitYear) val futureDate = DayMonthYear(1, 2, validFourDigitYear) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return less than a date 1 day in the future" in { val present = DayMonthYear(1, 1, validFourDigitYear) val futureDate = DayMonthYear(2, 1, validFourDigitYear) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return less than a date 1 hour in the future" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(0)) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(1)) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return less than a date 1 minute in the future" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(0), Some(0)) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(0), Some(1)) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return less than a date when hour not specified in present" in { val present = DayMonthYear(1, 1, validFourDigitYear, None) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(1)) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return less than a date when minute not specified in present" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(0), None) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(0), Some(1)) present < futureDate shouldEqual true present > futureDate shouldEqual false } "return false when dates are equal" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(0), Some(0)) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(0), Some(0)) present.compare(futureDate) shouldEqual 0 present < futureDate shouldEqual false present > futureDate shouldEqual false } "return false when dates are equal but no hours" in { val present = DayMonthYear(1, 1, validFourDigitYear, None) val futureDate = DayMonthYear(1, 1, validFourDigitYear, None) present.compare(futureDate) shouldEqual 0 present < futureDate shouldEqual false present > futureDate shouldEqual false } "return false when dates are equal but no minutes" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(0), None) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(0), None) present.compare(futureDate) shouldEqual 0 present < futureDate shouldEqual false present > futureDate shouldEqual false } "return greater than a date when hour not specified in future" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(1)) val futureDate = DayMonthYear(1, 1, validFourDigitYear, None) present < futureDate shouldEqual false present > futureDate shouldEqual true } "return greater than a date when minute not specified in future" in { val present = DayMonthYear(1, 1, validFourDigitYear, Some(0), Some(1)) val futureDate = DayMonthYear(1, 1, validFourDigitYear, Some(0), None) present < futureDate shouldEqual false present > futureDate shouldEqual true } } "toDateTime" should { "return None given an invalid date" in { DayMonthYear(32, 13, validFourDigitYear).toDateTime match { case Some(_) => fail("should not have parsed") case None => } } "return Some given a valid date" in { DayMonthYear(1, 1, validFourDigitYear).toDateTime match { case Some(_) => case None => fail("should have parsed") } } } "minus" should { "return unchanged if DateTime is invalid" in { val dmy = DayMonthYear(32, 13, validFourDigitYear) (dmy - 1 day) shouldEqual dmy } "subtract day" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 1 day) shouldEqual DayMonthYear(25, 6, validFourDigitYear) } "subtract week" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 1 week) shouldEqual DayMonthYear(19, 6, validFourDigitYear) } "subtract week with change in month" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 1 week) shouldEqual DayMonthYear(19, 6, validFourDigitYear) } "subtract week with change in month and year" in { val dmy = DayMonthYear(1, 1, validFourDigitYear) (dmy - 1 week) shouldEqual DayMonthYear(25, 12, validFourDigitYear - 1) } "subtract month" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 1 month) shouldEqual DayMonthYear(26, 5, validFourDigitYear) } "subtract month with chnage in day" in { val dmy = DayMonthYear(31, 10, validFourDigitYear) (dmy - 1 month) shouldEqual DayMonthYear(30, 9, validFourDigitYear) } "subtract days" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 6 days) shouldEqual DayMonthYear(20, 6, validFourDigitYear) (dmy - 6 day) shouldEqual DayMonthYear(20, 6, validFourDigitYear) } "subtract weeks" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 2 weeks) shouldEqual DayMonthYear(12, 6, validFourDigitYear) } "subtract weeks with change in month" in { val dmy = DayMonthYear(8, 12, validFourDigitYear) (dmy - 2 weeks) shouldEqual DayMonthYear(24, 11, validFourDigitYear) } "subtract months without year decrement" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 5 months) shouldEqual DayMonthYear(26, 1, validFourDigitYear) } "subtract months giving year decrement" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 6 months) shouldEqual DayMonthYear(26, 12, validFourDigitYear - 1) } "subtract years" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) (dmy - 1 years) shouldEqual DayMonthYear(26, 6, validFourDigitYear - 1) } //complex subtration - confirmed via date calculator http://www.timeanddate.com/date/dateadd.html "subtract days, months, weeks and year with a change in each" in { val dmy = DayMonthYear(26, 6, validFourDigitYear) ((((dmy - 60 days) - 5 weeks) - 13 months) - 2 year) shouldEqual DayMonthYear(23, 2, validFourDigitYear - 3) } } "from" should { "accept a Joda DateTime" in { val dmy = DayMonthYear.from(new DateTime(validFourDigitYear, 2, 23, 0, 0)) dmy.day should equal(23) dmy.month should equal(2) dmy.year should equal(validFourDigitYear) } } "withTime" should { "include time" in { val dmyWithTime = DayMonthYear(23, 9, validFourDigitYear).withTime(hour = 14, minutes = 55) dmyWithTime shouldEqual DayMonthYear(23, 9, validFourDigitYear, Some(14), Some(55)) } } }
dvla/vehicles-online
test/models/DayMonthYearSpec.scala
Scala
mit
10,652
package fpgatidbits.streams import Chisel._ // combinational fork for one stream -> two streams // fork functions can be customized class StreamFork[Ti <: Data, ToA <: Data, ToB <: Data] (genIn: Ti, genA: ToA, genB: ToB, forkA: Ti => ToA, forkB: Ti => ToB) extends Module { val io = new Bundle { val in = Decoupled(genIn).flip val outA = Decoupled(genA) val outB = Decoupled(genB) } io.in.ready := io.outA.ready & io.outB.ready io.outA.bits := forkA(io.in.bits) io.outB.bits := forkB(io.in.bits) io.outA.valid := io.in.valid & io.outB.ready io.outB.valid := io.in.valid & io.outA.ready } // convenience constructor for making two identical copies of the stream object StreamCopy { def apply[T <: Data] (in: DecoupledIO[T], outA: DecoupledIO[T], outB: DecoupledIO[T]) = { val m = Module(new StreamFork( genIn = in.bits, genA = outA.bits, genB = outB.bits, forkA = {x: T => x}, forkB = {x: T => x} )).io in <> m.in m.outA <> outA m.outB <> outB } def apply[T <: Data] (in: DecoupledIO[T], out: Seq[DecoupledIO[T]]) = { for(o <- out) { o.bits := in.bits o.valid := in.valid & out.filterNot(_ == o).map(_.ready).reduce(_&_) } in.ready := out.map(_.ready).reduce(_ & _) } }
maltanar/fpga-tidbits
src/main/scala/fpgatidbits/streams/StreamFork.scala
Scala
bsd-2-clause
1,275
package io.vamp.workflow_driver import akka.actor.ActorSystem import akka.pattern.ask import com.typesafe.scalalogging.LazyLogging import io.vamp.common.Config import io.vamp.common.akka.CommonSupportForActors import io.vamp.common.akka.IoC._ import io.vamp.common.notification.Notification import io.vamp.common.vitals.InfoRequest import io.vamp.container_driver.{ ContainerDriverActor, Docker } import io.vamp.model.artifact.Workflow.Status import io.vamp.model.artifact.Workflow.Status.RestartingPhase import io.vamp.model.artifact._ import io.vamp.model.reader.{ MegaByte, Quantity } import io.vamp.model.resolver.WorkflowValueResolver import io.vamp.persistence.{ ArtifactSupport, KeyValueStoreActor, PersistenceActor } import io.vamp.pulse.notification.PulseFailureNotifier import io.vamp.workflow_driver.WorkflowDriverActor.{ GetScheduled, Schedule, Unschedule } import io.vamp.workflow_driver.notification.WorkflowDriverNotificationProvider import scala.concurrent.Future import scala.language.postfixOps object WorkflowDriver { val root = "workflows" val config = "vamp.workflow-driver" val workflowConfig = s"$config.workflow" val deployablesConfig = s"$workflowConfig.deployables" def path(workflow: Workflow) = root :: workflow.name :: Nil } trait WorkflowDriver extends ArtifactSupport with PulseFailureNotifier with CommonSupportForActors with WorkflowDriverNotificationProvider with WorkflowValueResolver with LazyLogging { import WorkflowDriver._ implicit def actorSystem: ActorSystem implicit val timeout = ContainerDriverActor.timeout() val defaultScale = DefaultScale( Quantity.of(Config.double(s"$workflowConfig.scale.cpu")()), MegaByte.of(Config.string(s"$workflowConfig.scale.memory")()), Config.int(s"$workflowConfig.scale.instances")() ) def defaultArguments() = Config.stringList("vamp.operation.deployment.arguments")().map(Argument(_)) val deployables: Map[String, String] = Config.list(deployablesConfig)().collect { case config: Map[_, _] ⇒ config.asInstanceOf[Map[String, String]]("type").trim → config.asInstanceOf[Map[String, String]]("breed").trim } toMap def receive = { case InfoRequest ⇒ reply(info) case GetScheduled(workflows) ⇒ request(workflows) case Schedule(workflow, data) ⇒ reply((schedule(data) orElse { case _ ⇒ Future.successful(false) }: PartialFunction[Workflow, Future[Any]])(workflow)) case Unschedule(workflow) ⇒ reply((unschedule() orElse { case _ ⇒ Future.successful(false) }: PartialFunction[Workflow, Future[Any]])(workflow)) } protected def info: Future[Map[_, _]] protected def request(workflows: List[Workflow]): Unit protected def schedule(data: Any): PartialFunction[Workflow, Future[Any]] protected def unschedule(): PartialFunction[Workflow, Future[Any]] protected def enrich(workflow: Workflow, data: Any): Future[Workflow] = { artifactFor[DefaultBreed](workflow.breed, force = true).flatMap { breed ⇒ (deployables.get(breed.deployable.defaultType()) match { case Some(reference) ⇒ artifactFor[DefaultBreed](reference) case _ ⇒ Future.successful(breed) }).flatMap { executor ⇒ val environmentVariables = (executor.environmentVariables ++ breed.environmentVariables ++ workflow.environmentVariables). map(env ⇒ env.name → resolveEnvironmentVariable(workflow, data)(env)).toMap.values.toList val scale = workflow.scale.getOrElse(defaultScale).asInstanceOf[DefaultScale] val network = workflow.network.getOrElse(Docker.network()) val arguments = (defaultArguments ++ executor.arguments ++ breed.arguments ++ workflow.arguments).map(arg ⇒ arg.key → arg).toMap.values.toList val healthChecks = if (breed.healthChecks.isEmpty) executor.healthChecks else breed.healthChecks val workflowBreed = breed.copy( deployable = executor.deployable, ports = executor.ports, environmentVariables = environmentVariables, healthChecks = healthChecks ) for { _ ← actorFor[PersistenceActor] ? PersistenceActor.UpdateWorkflowEnvironmentVariables(workflow, environmentVariables) _ ← actorFor[PersistenceActor] ? PersistenceActor.UpdateWorkflowScale(workflow, scale) _ ← actorFor[PersistenceActor] ? PersistenceActor.UpdateWorkflowNetwork(workflow, network) _ ← actorFor[PersistenceActor] ? PersistenceActor.UpdateWorkflowArguments(workflow, arguments) _ ← actorFor[PersistenceActor] ? PersistenceActor.UpdateWorkflowBreed(workflow, workflowBreed) kv ← actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Get(WorkflowDriver.path(workflow)) _ ← kv match { case Some(_) ⇒ Future.successful(kv) case _ ⇒ actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Set(WorkflowDriver.path(workflow), Option(breed.deployable.definition)) } } yield workflow.copy( breed = workflowBreed, scale = Option(scale), arguments = arguments, network = Option(network), environmentVariables = environmentVariables ) } } } protected def runnable(workflow: Workflow) = { logger.info("WorkflowDriver - The workflow status is {}", workflow.status) val result = workflow.status match { case Status.Starting | Status.Running | Status.Restarting(Some(RestartingPhase.Starting)) ⇒ true case _ ⇒ false } logger.info("WorkflowDriver - result is {}", result) logger.info("WorkflowDriver - normal comparison is {}", (workflow.status == Status.Starting)) result } override def failure(failure: Any, `class`: Class[_ <: Notification] = errorNotificationClass): Exception = super[PulseFailureNotifier].failure(failure, `class`) }
magneticio/vamp
workflow_driver/src/main/scala/io/vamp/workflow_driver/WorkflowDriver.scala
Scala
apache-2.0
5,891
package mbilski.spray.hmac class AuthenticationSpec extends BaseSpec { property("returns account when account exists and hashes match") { forAll {(uuid: String, secret: String, uri: String) => whenever(notEmpty(uuid, secret, uri)) { val hash = Signer.generate(secret, uri, Signer.timestamp) val auth = Auth(Some(Account(uuid, secret)), Some(secret)) auth.authenticate(HmacData(uuid, hash), uri).get should be(Account(uuid, secret)) } } } property("returns none when account or secret does not exist") { forAll { (uuid: String, secret: String, uri: String) => whenever(notEmpty(uuid, secret, uri)) { val hash = Signer.generate(secret, uri, Signer.timestamp) Auth(None, None).authenticate(HmacData(uuid, hash), uri) should be(None) Auth(Some(Account(uuid, secret)), None).authenticate(HmacData(uuid, hash), uri) should be(None) Auth(None, Some(secret)).authenticate(HmacData(uuid, hash), uri) should be(None) } } } }
mbilski/spray-hmac
src/test/scala/mbilski/spray/hmac/AuthenticationSpec.scala
Scala
apache-2.0
1,023
package org.zalando.jsonapi.json import org.scalactic.TypeCheckedTripleEquals import org.scalatest.{ EitherValues, WordSpec } import org.zalando.jsonapi.JsonapiRootObjectWriter import org.zalando.jsonapi.json.sprayhttpx.SprayJsonapiSupport import org.zalando.jsonapi.model.{ JsonApiObject, JsonApiProperty, RootObject } import spray.httpx.marshalling._ import spray.httpx.unmarshalling._ trait JsonapiSupportSpec extends WordSpec with TypeCheckedTripleEquals with EitherValues with SprayJsonapiSupport { def jsonapiSupportClassName: String implicit def jsonapiRootObjectMarshaller: Marshaller[RootObject] implicit def jsonapiRootObjectUnmarshaller: Unmarshaller[RootObject] s"$jsonapiSupportClassName" must { trait Context { case class Foo(bar: String) val rootObject = RootObject(jsonApi = Some(List(JsonApiProperty("foo", JsonApiObject.StringValue("bar"))))) val foo = Foo("bar") } "allow marshalling a Jsonapi root object with the correct content type" in new Context { val httpEntityString = """HttpEntity(application/vnd.api+json; charset=UTF-8,{"jsonapi":{"foo":"bar"}})""" assert(marshal(rootObject).right.value.toString === httpEntityString) } "allow marshalling a value of a type that can be converted to a Jsonapi root object" in new Context { implicit val fooWriter = new JsonapiRootObjectWriter[Foo] { override def toJsonapi(a: Foo) = rootObject } assert(marshal(foo).right.value.toString === """HttpEntity(application/vnd.api+json; charset=UTF-8,{"jsonapi":{"foo":"bar"}})""") } // TODO Fix Play framework unmarshalling problem (issue #25 on github) // "allow unmarshalling a Json to a root object with the correct content type" in new Context { // val httpEntity = HttpEntity(`application/vnd.api+json`, """{"jsonapi":{"foo":"bar"}}""") // assert(httpEntity.as[RootObject] === Right(rootObject)) // } // // "allow unmarshalling Jsonapi root object to a value type" in new Context { // implicit val fooReader = new JsonapiRootObjectReader[Foo] { // override def fromJsonapi(ro: RootObject) = foo // } // val httpEntity = HttpEntity(`application/vnd.api+json`, """{"jsonapi":{"foo":"bar"}}""") // assert(httpEntity.as[Foo] === Right(foo)) // } } }
texvex/scala-jsonapi
src/test/scala/org/zalando/jsonapi/json/JsonapiSupportSpec.scala
Scala
mit
2,365
package io.eels.component.jdbc.dialect import java.sql.{ResultSetMetaData, Types} import com.sksamuel.exts.Logging import io.eels.Row import io.eels.schema._ class GenericJdbcDialect extends JdbcDialect with Logging { // generic jdbc will just return the values as is override def sanitize(value: Any): Any = value override def toJdbcType(field: Field): String = toJdbcType(field.dataType) def toJdbcType(dataType: DataType): String = dataType match { case BigIntType => "int" case BinaryType => "binary" case BooleanType => "boolean" case CharType(size) => s"char($size)" case DateType => "date" case DecimalType(precision, scale) => s"decimal(${precision.value}, ${scale.value})" case DoubleType => "double" case FloatType => "float" case EnumType(_, _) => "varchar(255)" case IntType(_) => "int" case LongType(_) => "bigint" case ShortType(_) => "smallint" case StringType => "text" case TimestampMillisType => "timestamp" case TimestampMicrosType => sys.error("Not supported by JDBC") case VarcharType(size) => if (size > 0) s"varchar($size)" else { logger.warn(s"Invalid size $size specified for varchar; defaulting to 255") "varchar(255)" } case _ => sys.error(s"Unsupported data type with JDBC Sink: $dataType") } private def decimalType(column: Int, metadata: ResultSetMetaData): DecimalType = { val precision = metadata.getPrecision(column) val scale = metadata.getScale(column) require(scale <= precision, "Scale must be less than precision") DecimalType(precision, scale) } override def fromJdbcType(column: Int, metadata: ResultSetMetaData): DataType = { metadata.getColumnType(column) match { case Types.BIGINT => LongType.Signed case Types.BINARY => BinaryType case Types.BIT => BooleanType case Types.BLOB => BinaryType case Types.BOOLEAN => BooleanType case Types.CHAR => CharType(metadata.getPrecision(column)) case Types.CLOB => StringType case Types.DATALINK => throw new UnsupportedOperationException() case Types.DATE => DateType case Types.DECIMAL => decimalType(column, metadata) case Types.DISTINCT => throw new UnsupportedOperationException() case Types.DOUBLE => DoubleType case Types.FLOAT => FloatType case Types.INTEGER => IntType.Signed case Types.JAVA_OBJECT => BinaryType case Types.LONGNVARCHAR => StringType case Types.LONGVARBINARY => BinaryType case Types.LONGVARCHAR => StringType case Types.NCHAR => StringType case Types.NCLOB => StringType case Types.NULL => StringType case Types.NUMERIC => decimalType(column, metadata) case Types.NVARCHAR => StringType case Types.OTHER => StringType case Types.REAL => DoubleType case Types.REF => StringType case Types.ROWID => LongType.Signed case Types.SMALLINT => ShortType.Signed case Types.SQLXML => StringType case Types.STRUCT => StringType case Types.TIME => TimeMillisType case Types.TIMESTAMP => TimestampMillisType case Types.TINYINT => ShortType.Signed case Types.VARBINARY => BinaryType case Types.VARCHAR => VarcharType(metadata.getPrecision(column)) case other => logger.warn(s"Unknown jdbc type $other; defaulting to StringType") StringType } } override def create(schema: StructType, table: String): String = { val columns = schema.fields.map { it => s"${it.name} ${toJdbcType(it)}" }.mkString("(", ",", ")") s"CREATE TABLE $table $columns" } override def insertQuery(schema: StructType, table: String): String = { val columns = schema.fieldNames().mkString(",") val parameters = List.fill(schema.fields.size)("?").mkString(",") s"INSERT INTO $table ($columns) VALUES ($parameters)" } override def insert(row: Row, table: String): String = { // todo use proper statements val columns = row.schema.fieldNames().mkString(",") val values = row.values.mkString("'", "','", "'") s"INSERT INTO $table ($columns) VALUES ($values)" } }
eel-lib/eel
eel-core/src/main/scala/io/eels/component/jdbc/dialect/GenericJdbcDialect.scala
Scala
mit
4,161
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.util import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.util.StringUtils._ class StringUtilsSuite extends SparkFunSuite { test("escapeLikeRegex") { assert(escapeLikeRegex("abdef") === "(?s)\\\\Qa\\\\E\\\\Qb\\\\E\\\\Qd\\\\E\\\\Qe\\\\E\\\\Qf\\\\E") assert(escapeLikeRegex("a\\\\__b") === "(?s)\\\\Qa\\\\E_.\\\\Qb\\\\E") assert(escapeLikeRegex("a_%b") === "(?s)\\\\Qa\\\\E..*\\\\Qb\\\\E") assert(escapeLikeRegex("a%\\\\%b") === "(?s)\\\\Qa\\\\E.*%\\\\Qb\\\\E") assert(escapeLikeRegex("a%") === "(?s)\\\\Qa\\\\E.*") assert(escapeLikeRegex("**") === "(?s)\\\\Q*\\\\E\\\\Q*\\\\E") assert(escapeLikeRegex("a_b") === "(?s)\\\\Qa\\\\E.\\\\Qb\\\\E") } }
ArvinDevel/onlineAggregationOnSparkV2
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/StringUtilsSuite.scala
Scala
apache-2.0
1,475
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.runtime.batch.sql.join import org.apache.flink.table.api.{TableSchema, Types} import org.apache.flink.table.planner.factories.TestValuesTableFactory import org.apache.flink.table.planner.runtime.utils.{BatchTestBase, InMemoryLookupableTableSource} import org.apache.flink.types.Row import org.junit.Assert.assertEquals import org.junit.runner.RunWith import org.junit.runners.Parameterized import org.junit.{After, Before, Test} import java.lang.{Boolean => JBoolean} import java.util import scala.collection.JavaConversions._ @RunWith(classOf[Parameterized]) class LookupJoinITCase(legacyTableSource: Boolean, isAsyncMode: Boolean) extends BatchTestBase { val data = List( rowOf(1L, 12L, "Julian"), rowOf(2L, 15L, "Hello"), rowOf(3L, 15L, "Fabian"), rowOf(8L, 11L, "Hello world"), rowOf(9L, 12L, "Hello world!")) val dataWithNull = List( rowOf(null, 15L, "Hello"), rowOf(3L, 15L, "Fabian"), rowOf(null, 11L, "Hello world"), rowOf(9L, 12L, "Hello world!")) val userData = List( rowOf(11, 1L, "Julian"), rowOf(22, 2L, "Jark"), rowOf(33, 3L, "Fabian")) val userDataWithNull = List( rowOf(11, 1L, "Julian"), rowOf(22, null, "Hello"), rowOf(33, 3L, "Fabian"), rowOf(44, null, "Hello world")) @Before override def before() { super.before() createScanTable("T", data) createScanTable("nullableT", dataWithNull) createLookupTable("userTable", userData) createLookupTable("userTableWithNull", userDataWithNull) // TODO: enable object reuse until [FLINK-12351] is fixed. env.getConfig.disableObjectReuse() } @After override def after(): Unit = { if (legacyTableSource) { assertEquals(0, InMemoryLookupableTableSource.RESOURCE_COUNTER.get()) } else { assertEquals(0, TestValuesTableFactory.RESOURCE_COUNTER.get()) } } private def createLookupTable(tableName: String, data: List[Row]): Unit = { if (legacyTableSource) { val userSchema = TableSchema.builder() .field("age", Types.INT) .field("id", Types.LONG) .field("name", Types.STRING) .build() InMemoryLookupableTableSource.createTemporaryTable( tEnv, isAsyncMode, data, userSchema, tableName, isBounded = true) } else { val dataId = TestValuesTableFactory.registerData(data) tEnv.executeSql( s""" |CREATE TABLE $tableName ( | `age` INT, | `id` BIGINT, | `name` STRING |) WITH ( | 'connector' = 'values', | 'data-id' = '$dataId', | 'async' = '$isAsyncMode', | 'bounded' = 'true' |) |""".stripMargin) } } private def createScanTable(tableName: String, data: List[Row]): Unit = { val dataId = TestValuesTableFactory.registerData(data) tEnv.executeSql( s""" |CREATE TABLE $tableName ( | `id` BIGINT, | `len` BIGINT, | `content` STRING, | `proctime` AS PROCTIME() |) WITH ( | 'connector' = 'values', | 'data-id' = '$dataId', | 'bounded' = 'true' |) |""".stripMargin) } @Test def testLeftJoinTemporalTableWithLocalPredicate(): Unit = { val sql = s"SELECT T.id, T.len, T.content, D.name, D.age FROM T LEFT JOIN userTable " + "for system_time as of T.proctime AS D ON T.id = D.id " + "AND T.len > 1 AND D.age > 20 AND D.name = 'Fabian' " + "WHERE T.id > 1" val expected = Seq( BatchTestBase.row(2, 15, "Hello", null, null), BatchTestBase.row(3, 15, "Fabian", "Fabian", 33), BatchTestBase.row(8, 11, "Hello world", null, null), BatchTestBase.row(9, 12, "Hello world!", null, null)) checkResult(sql, expected) } @Test def testJoinTemporalTable(): Unit = { val sql = s"SELECT T.id, T.len, T.content, D.name FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON T.id = D.id" val expected = Seq( BatchTestBase.row(1, 12, "Julian", "Julian"), BatchTestBase.row(2, 15, "Hello", "Jark"), BatchTestBase.row(3, 15, "Fabian", "Fabian")) checkResult(sql, expected) } @Test def testJoinTemporalTableWithPushDown(): Unit = { val sql = s"SELECT T.id, T.len, T.content, D.name FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON T.id = D.id AND D.age > 20" val expected = Seq( BatchTestBase.row(2, 15, "Hello", "Jark"), BatchTestBase.row(3, 15, "Fabian", "Fabian")) checkResult(sql, expected) } @Test def testJoinTemporalTableWithNonEqualFilter(): Unit = { val sql = s"SELECT T.id, T.len, T.content, D.name, D.age FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON T.id = D.id WHERE T.len <= D.age" val expected = Seq( BatchTestBase.row(2, 15, "Hello", "Jark", 22), BatchTestBase.row(3, 15, "Fabian", "Fabian", 33)) checkResult(sql, expected) } @Test def testJoinTemporalTableOnMultiFields(): Unit = { val sql = s"SELECT T.id, T.len, D.name FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON T.id = D.id AND T.content = D.name" val expected = Seq( BatchTestBase.row(1, 12, "Julian"), BatchTestBase.row(3, 15, "Fabian")) checkResult(sql, expected) } @Test def testJoinTemporalTableOnMultiFieldsWithUdf(): Unit = { val sql = s"SELECT T.id, T.len, D.name FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON mod(T.id, 4) = D.id AND T.content = D.name" val expected = Seq( BatchTestBase.row(1, 12, "Julian"), BatchTestBase.row(3, 15, "Fabian")) checkResult(sql, expected) } @Test def testJoinTemporalTableOnMultiKeyFields(): Unit = { val sql = s"SELECT T.id, T.len, D.name FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON T.content = D.name AND T.id = D.id" val expected = Seq( BatchTestBase.row(1, 12, "Julian"), BatchTestBase.row(3, 15, "Fabian")) checkResult(sql, expected) } @Test def testLeftJoinTemporalTable(): Unit = { val sql = s"SELECT T.id, T.len, D.name, D.age FROM T LEFT JOIN userTable " + "for system_time as of T.proctime AS D ON T.id = D.id" val expected = Seq( BatchTestBase.row(1, 12, "Julian", 11), BatchTestBase.row(2, 15, "Jark", 22), BatchTestBase.row(3, 15, "Fabian", 33), BatchTestBase.row(8, 11, null, null), BatchTestBase.row(9, 12, null, null)) checkResult(sql, expected) } @Test def testJoinTemporalTableOnMultiKeyFieldsWithNullData(): Unit = { val sql = s"SELECT T.id, T.len, D.name FROM nullableT T JOIN userTableWithNull " + "for system_time as of T.proctime AS D ON T.content = D.name AND T.id = D.id" val expected = Seq( BatchTestBase.row(3,15,"Fabian")) checkResult(sql, expected) } @Test def testLeftJoinTemporalTableOnMultiKeyFieldsWithNullData(): Unit = { val sql = s"SELECT D.id, T.len, D.name FROM nullableT T LEFT JOIN userTableWithNull " + "for system_time as of T.proctime AS D ON T.content = D.name AND T.id = D.id" val expected = Seq( BatchTestBase.row(null,15,null), BatchTestBase.row(3,15,"Fabian"), BatchTestBase.row(null,11,null), BatchTestBase.row(null,12,null)) checkResult(sql, expected) } @Test def testJoinTemporalTableOnNullConstantKey(): Unit = { val sql = s"SELECT T.id, T.len, T.content FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON D.id = null" val expected = Seq() checkResult(sql, expected) } @Test def testJoinTemporalTableOnMultiKeyFieldsWithNullConstantKey(): Unit = { val sql = s"SELECT T.id, T.len, D.name FROM T JOIN userTable " + "for system_time as of T.proctime AS D ON T.content = D.name AND null = D.id" val expected = Seq() checkResult(sql, expected) } } object LookupJoinITCase { @Parameterized.Parameters(name = "LegacyTableSource={0}, isAsyncMode = {1}") def parameters(): util.Collection[Array[java.lang.Object]] = { Seq[Array[AnyRef]]( Array(JBoolean.TRUE, JBoolean.TRUE), Array(JBoolean.TRUE, JBoolean.FALSE), Array(JBoolean.FALSE, JBoolean.TRUE), Array(JBoolean.FALSE, JBoolean.FALSE) ) } }
jinglining/flink
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/join/LookupJoinITCase.scala
Scala
apache-2.0
9,164
/* * Copyright (c) 2021, salesforce.com, inc. * All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause */ package com.krux.hyperion.common import java.util.UUID import scala.language.implicitConversions trait PipelineObjectId extends Ordered[PipelineObjectId] { def toOption: Option[String] = Option(this.toString) def compare(that: PipelineObjectId): Int = this.toString.compare(that.toString) def named(name: String) = PipelineObjectId.withName(name, this) def groupedBy(group: String) = PipelineObjectId.withGroup(group, this) } object PipelineObjectId { def apply[T](klass: Class[T]) = RandomizedObjectId(klass.getSimpleName.stripSuffix("$")) def apply(seed: String) = RandomizedObjectId(seed) def apply(name: String, group: String) = NameGroupObjectId(name, group) def fixed(seed: String) = FixedObjectId(seed) def withName(name: String, id: PipelineObjectId) = id match { case NameGroupObjectId(_, c, r) => NameGroupObjectId(name, c, r) case RandomizedObjectId(_, r) => NameGroupObjectId(name, "", r) case _ => NameGroupObjectId(name, "") } def withGroup(group: String, id: PipelineObjectId) = id match { case NameGroupObjectId(n, _, r) => NameGroupObjectId(n, group, r) case RandomizedObjectId(_, r) => NameGroupObjectId("", group, r) case _ => NameGroupObjectId("", group) } implicit def string2UniquePipelineId(prefix: String): PipelineObjectId = PipelineObjectId(prefix) } case class NameGroupObjectId(name: String, group: String, rand: String = UUID.randomUUID.toString) extends PipelineObjectId { val uniqueId = (name, group) match { case ("", "") => rand case ("", g) => (g :: rand :: Nil).mkString("_") case (n, "") => (n :: rand :: Nil).mkString("_") case (n, g) => (n :: g :: rand :: Nil).mkString("_") } override def toString = uniqueId } case class RandomizedObjectId(seed: String, rand: String = UUID.randomUUID.toString) extends PipelineObjectId { val uniqueId = (seed :: rand :: Nil).mkString("_") override def toString = uniqueId } case class FixedObjectId(seed: String) extends PipelineObjectId { override def toString = seed } object ScheduleObjectId extends PipelineObjectId { override def toString = "PipelineSchedule" } object TerminateObjectId extends PipelineObjectId { override def toString = "TerminateAction" } object DefaultObjectId extends PipelineObjectId { override def toString = "Default" }
realstraw/hyperion
core/src/main/scala/com/krux/hyperion/common/PipelineObjectId.scala
Scala
bsd-3-clause
2,560
package com.arcusys.valamis.web.configuration.ioc import com.arcusys.learn.liferay.LiferayClasses._ import com.arcusys.learn.liferay.util.LanguageHelper import com.arcusys.valamis.certificate.storage.CertificateRepository import com.arcusys.valamis.course._ import com.arcusys.valamis.course.service._ import com.arcusys.valamis.course.storage.{CourseCertificateRepository, CourseExtendedRepository, CourseInstructorRepository} import com.arcusys.valamis.file.service.{FileEntryService, FileEntryServiceImpl, FileService, FileServiceImpl} import com.arcusys.valamis.file.storage.FileStorage import com.arcusys.valamis.member.service.MemberService import com.arcusys.valamis.persistence.common.SlickDBInfo import com.arcusys.valamis.settings.SettingStorage import com.arcusys.valamis.settings.service.{LRSToActivitySettingService, LRSToActivitySettingServiceImpl, SettingService, SettingServiceImpl} import com.arcusys.valamis.settings.storage.StatementToActivityStorage import com.arcusys.valamis.slide.service.contentProvider.ContentProviderService import com.arcusys.valamis.slide.service.contentProvider.impl.ContentProviderServiceImpl import com.arcusys.valamis.slide.service.lti.LTIDataService import com.arcusys.valamis.slide.service.lti.impl.LTIDataServiceImpl import com.arcusys.valamis.slide.storage.{ContentProviderRepository, LTIDataRepository} import com.arcusys.valamis.tag.TagService import com.arcusys.valamis.uri.service.{TincanURIService, TincanURIServiceImpl} import com.arcusys.valamis.uri.storage.TincanURIStorage import com.arcusys.valamis.user.service.{UserCertificateRepository, UserService, UserServiceImpl} import com.arcusys.valamis.web.util.ForkJoinPoolWithLRCompany import com.escalatesoft.subcut.inject.{BindingModule, NewBindingModule} import scala.concurrent.ExecutionContext /** * Created by mminin on 26.02.16. */ class CommonConfiguration(db: => SlickDBInfo)(implicit configuration: BindingModule) extends NewBindingModule(module => { import configuration.inject import module.bind bind[TagService[LGroup]].toSingle(new TagService[LGroup]) bind[CourseNotificationService].toSingle(new CourseNotificationServiceImpl) bind[FileEntryService].toSingle(new FileEntryServiceImpl) bind[FileService] toSingle new FileServiceImpl { lazy val fileStorage = inject[FileStorage](None) } bind[LRSToActivitySettingService] toSingle new LRSToActivitySettingServiceImpl { lazy val lrsToActivitySettingStorage = inject[StatementToActivityStorage](None) } bind[TincanURIService] toSingle new TincanURIServiceImpl { lazy val uriStorage = inject[TincanURIStorage](None) } bind[UserService] toSingle new UserServiceImpl { def removedUserPrefix = LanguageHelper.get("deleted-user") lazy val userCertificateRepository = inject[UserCertificateRepository](None) } bind[SettingService] toSingle new SettingServiceImpl { lazy val settingStorage = inject[SettingStorage](None) } bind[api.CourseService] toSingle new api.CourseServiceImpl { lazy val courseTagService = inject[TagService[LGroup]](None) lazy val courseRepository = inject[CourseExtendedRepository](None) lazy val courseMemberService = inject[CourseMemberService](None) lazy val courseCertificateRepository = inject[CourseCertificateRepository](None) } bind[CourseService] toSingle new CourseServiceImpl { lazy val courseTagService = inject[TagService[LGroup]](None) lazy val courseRepository = inject[CourseExtendedRepository](None) lazy val courseMemberService = inject[CourseMemberService](None) lazy val courseCertificateRepository = inject[CourseCertificateRepository](None) } bind[InstructorService] toSingle new InstructorServiceImpl { lazy val instructorRepository = inject[CourseInstructorRepository](None) } bind[CourseMemberService] toSingle new CourseMemberServiceImpl { lazy val memberService = inject[MemberService](None) lazy val courseNotification = inject[CourseNotificationService](None) lazy val courseRepository = inject[CourseExtendedRepository](None) lazy val courseUserQueueService = inject[CourseUserQueueService](None) } bind[ContentProviderService] toSingle new ContentProviderServiceImpl { lazy val contentProviderRepository = inject[ContentProviderRepository](None) } bind[LTIDataService] toSingle new LTIDataServiceImpl(db) { val executionContext: ExecutionContext = ForkJoinPoolWithLRCompany.ExecutionContext lazy val ltiDataRepository = inject[LTIDataRepository](None) } bind[CourseUserQueueService] toSingle new CourseUserQueueServiceImpl(db.databaseDef, db.slickDriver) { } })
arcusys/Valamis
valamis-portlets/src/main/scala/com/arcusys/valamis/web/configuration/ioc/CommonConfiguration.scala
Scala
gpl-3.0
4,648