code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package mesosphere.marathon.tasks
import java.io._
import javax.inject.Inject
import mesosphere.marathon.Protos._
import mesosphere.marathon.state.{ PathId, StateMetrics, Timestamp }
import mesosphere.marathon.{ Main, MarathonConf }
import com.codahale.metrics.MetricRegistry
import org.apache.log4j.Logger
import org.apache.mesos.Protos.TaskStatus
import org.apache.mesos.state.{ State, Variable }
import scala.collection.JavaConverters._
import scala.collection._
import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Set
import scala.concurrent.Future
class TaskTracker @Inject() (
state: State,
config: MarathonConf,
val registry: MetricRegistry)
extends StateMetrics {
import mesosphere.marathon.tasks.TaskTracker._
import mesosphere.util.BackToTheFuture.futureToFuture
import mesosphere.util.ThreadPoolContext.context
implicit val timeout = config.zkFutureTimeout
private[this] val log = Logger.getLogger(getClass.getName)
val PREFIX = "task:"
val ID_DELIMITER = ":"
private[this] val apps = TrieMap[PathId, InternalApp]()
private[tasks] def fetchFromState(id: String): Variable = timedRead {
state.fetch(id).get(timeout.duration.length, timeout.duration.unit)
}
private[tasks] def getKey(appId: PathId, taskId: String): String = {
PREFIX + appId.safePath + ID_DELIMITER + taskId
}
def get(appId: PathId): Set[MarathonTask] =
getInternal(appId).values.toSet
def getVersion(appId: PathId, taskId: String): Option[Timestamp] =
get(appId).collectFirst {
case mt: MarathonTask if mt.getId == taskId =>
Timestamp(mt.getVersion)
}
private def getInternal(appId: PathId): TrieMap[String, MarathonTask] =
apps.getOrElseUpdate(appId, fetchApp(appId)).tasks
def list: Map[PathId, App] = apps.mapValues(_.toApp).toMap
def count(appId: PathId): Int = getInternal(appId).size
def contains(appId: PathId): Boolean = apps.contains(appId)
def take(appId: PathId, n: Int): Set[MarathonTask] = get(appId).take(n)
def created(appId: PathId, task: MarathonTask): Unit = {
// Keep this here so running() can pick it up
getInternal(appId) += (task.getId -> task)
}
def running(appId: PathId, status: TaskStatus): Future[MarathonTask] = {
val taskId = status.getTaskId.getValue
get(appId).find(_.getId == taskId) match {
case Some(oldTask) if !oldTask.hasStartedAt => // staged
val task = oldTask.toBuilder
.setStartedAt(System.currentTimeMillis)
.setStatus(status)
.build
getInternal(appId) += (task.getId -> task)
store(appId, task).map(_ => task)
case Some(oldTask) => // running
val msg = s"Task for ID $taskId already running, ignoring"
log.warn(msg)
Future.failed(new Exception(msg))
case _ =>
val msg = s"No staged task for ID $taskId, ignoring"
log.warn(msg)
Future.failed(new Exception(msg))
}
}
def terminated(appId: PathId, status: TaskStatus): Future[Option[MarathonTask]] = {
val appTasks = getInternal(appId)
val app = apps(appId)
val taskId = status.getTaskId.getValue
appTasks.get(taskId) match {
case Some(task) =>
app.tasks.remove(task.getId)
val variable = fetchFromState(getKey(appId, taskId))
timedWrite { state.expunge(variable) }
log.info(s"Task $taskId expunged and removed from TaskTracker")
if (app.shutdown && app.tasks.isEmpty) {
// Are we shutting down this app? If so, remove it
remove(appId)
}
Future.successful(Some(task))
case None =>
if (app.shutdown && app.tasks.isEmpty) {
// Are we shutting down this app? If so, remove it
remove(appId)
}
Future.successful(None)
}
}
def shutdown(appId: PathId): Unit = {
apps.getOrElseUpdate(appId, fetchApp(appId)).shutdown = true
if (apps(appId).tasks.isEmpty) remove(appId)
}
private[this] def remove(appId: PathId): Unit = {
apps.remove(appId)
log.warn(s"App $appId removed from TaskTracker")
}
def statusUpdate(appId: PathId, status: TaskStatus): Future[Option[MarathonTask]] = {
val taskId = status.getTaskId.getValue
getInternal(appId).get(taskId) match {
case Some(task) if statusDidChange(task.getStatus, status) =>
val updatedTask = task.toBuilder
.setStatus(status)
.build
getInternal(appId) += (task.getId -> updatedTask)
store(appId, updatedTask).map(_ => Some(updatedTask))
case Some(task) =>
log.debug(s"Ignoring status update for ${task.getId}. Status did not change.")
Future.successful(Some(task))
case _ =>
log.warn(s"No task for ID $taskId")
Future.successful(None)
}
}
def stagedTasks(): Iterable[MarathonTask] = apps.values.flatMap(_.tasks.values.filter(_.getStartedAt == 0))
def checkStagedTasks: Iterable[MarathonTask] = {
// stagedAt is set when the task is created by the scheduler
val now = System.currentTimeMillis
val expires = now - Main.conf.taskLaunchTimeout()
val toKill = stagedTasks.filter(_.getStagedAt < expires)
toKill.foreach(t => {
log.warn(s"Task '${t.getId}' was staged ${(now - t.getStagedAt) / 1000}s ago and has not yet started")
})
toKill
}
def expungeOrphanedTasks(): Unit = {
// Remove tasks that don't have any tasks associated with them. Expensive!
log.info("Expunging orphaned tasks from store")
val stateTaskKeys = timedRead { state.names.get.asScala.filter(_.startsWith(PREFIX)) }
val appsTaskKeys = apps.values.flatMap { app =>
app.tasks.keys.map(taskId => getKey(app.appName, taskId))
}.toSet
for (stateTaskKey <- stateTaskKeys) {
if (!appsTaskKeys.contains(stateTaskKey)) {
log.info(s"Expunging orphaned task with key $stateTaskKey")
val variable = timedRead {
state.fetch(stateTaskKey).get(timeout.duration.length, timeout.duration.unit)
}
timedWrite { state.expunge(variable) }
}
}
}
private[tasks] def fetchApp(appId: PathId): InternalApp = {
log.debug(s"Fetching app from store $appId")
val names = timedRead { state.names().get.asScala.toSet }
val tasks = TrieMap[String, MarathonTask]()
val taskKeys = names.filter(name => name.startsWith(PREFIX + appId.safePath + ID_DELIMITER))
for {
taskKey <- taskKeys
task <- fetchTask(taskKey)
} tasks += (task.getId -> task)
new InternalApp(appId, tasks, false)
}
def fetchTask(appId: PathId, taskId: String): Option[MarathonTask] =
fetchTask(getKey(appId, taskId))
private[tasks] def fetchTask(taskKey: String): Option[MarathonTask] = {
val bytes = fetchFromState(taskKey).value
if (bytes.length > 0) {
val source = new ObjectInputStream(new ByteArrayInputStream(bytes))
deserialize(taskKey, source)
}
else None
}
def deserialize(taskKey: String, source: ObjectInputStream): Option[MarathonTask] = {
if (source.available > 0) {
try {
val size = source.readInt
val bytes = new Array[Byte](size)
source.readFully(bytes)
Some(MarathonTask.parseFrom(bytes))
}
catch {
case e: com.google.protobuf.InvalidProtocolBufferException =>
log.warn(s"Unable to deserialize task state for $taskKey", e)
None
}
}
else {
log.warn(s"Unable to deserialize task state for $taskKey")
None
}
}
def legacyDeserialize(appId: PathId, source: ObjectInputStream): TrieMap[String, MarathonTask] = {
var results = TrieMap[String, MarathonTask]()
if (source.available > 0) {
try {
val size = source.readInt
val bytes = new Array[Byte](size)
source.readFully(bytes)
val app = MarathonApp.parseFrom(bytes)
if (app.getName != appId.toString) {
log.warn(s"App name from task state for $appId is wrong! Got '${app.getName}' Continuing anyway...")
}
results ++= app.getTasksList.asScala.map(x => x.getId -> x)
}
catch {
case e: com.google.protobuf.InvalidProtocolBufferException =>
log.warn(s"Unable to deserialize task state for $appId", e)
}
}
else {
log.warn(s"Unable to deserialize task state for $appId")
}
results
}
def serialize(task: MarathonTask, sink: ObjectOutputStream): Unit = {
val size = task.getSerializedSize
sink.writeInt(size)
sink.write(task.toByteArray)
sink.flush()
}
def store(appId: PathId, task: MarathonTask): Future[Variable] = {
val oldVar = fetchFromState(getKey(appId, task.getId))
val bytes = new ByteArrayOutputStream()
val output = new ObjectOutputStream(bytes)
serialize(task, output)
val newVar = oldVar.mutate(bytes.toByteArray)
timedWrite { state.store(newVar) }
}
private[tasks] def statusDidChange(statusA: TaskStatus, statusB: TaskStatus): Boolean = {
val healthy = statusB.hasHealthy &&
(!statusA.hasHealthy || statusA.getHealthy != statusB.getHealthy)
healthy || statusA.getState != statusB.getState
}
}
object TaskTracker {
private[marathon] class InternalApp(
val appName: PathId,
var tasks: TrieMap[String, MarathonTask],
var shutdown: Boolean) {
def toApp: App = App(appName, tasks.values.toSet, shutdown)
}
case class App(appName: PathId, tasks: Set[MarathonTask], shutdown: Boolean)
}
| 14Zen/marathon | src/main/scala/mesosphere/marathon/tasks/TaskTracker.scala | Scala | apache-2.0 | 9,521 |
package com.sutol.scalgen.proj2
// Created by sutol on 14/04/2016. Part of scalgen.
object proj2 {
def main(args: Array[String]) {
val pop = new Pop2()
var num1: Int = 0
var num2: Int = 0
while (pop.step()) {
num1 = pop.getBest.genes & 1111
num2 = pop.getBest.genes >> 4
println(num1.toString + " " + num2.toString + " : " + pop.getBest.getFitness.toString)
}
}
}
| sutolll/scalgen | proj2/proj2.scala | Scala | mit | 450 |
package parser.json.detail
import parser.json.GenericJsonParser
import play.api.libs.json.JsValue
import models.Skimbo
import parser.json.providers.FacebookWallParser
object FacebookPostDetails extends GenericJsonParser {
override def asSkimbo(json: JsValue): Option[Skimbo] = FacebookWallParser.asSkimbo(json)
override def cut(json: JsValue) = List(json)
} | Froggies/Skimbo | app/parser/json/detail/FacebookPostDetails.scala | Scala | agpl-3.0 | 370 |
package contege.seqgen
import scala.collection.JavaConversions._
import scala.collection.mutable.Set
import scala.collection.mutable.Map
import java.util.ArrayList
import contege.ClassReader
import contege.Random
import contege.Atom
import contege.ConstructorAtom
import contege.MethodAtom
import contege.Stats
import contege.Config
import contege.GlobalState
/**
* Finds a variable of the given type.
* If necessary, appends calls to the sequence to create such a variable.
* Possibly uses some variable already in the given sequence.
*/
class GetParamTask[CallSequence <: AbstractCallSequence[CallSequence]](seqBefore: CallSequence, typ: String,
nullAllowed: Boolean, global: GlobalState)
extends Task[CallSequence](global) {
var param: Option[Variable] = None
private val maxRecursion = 50 // getting one param may require getting another; to avoid infinite recursion/very long computation, stop at some point
private var currentRecursion = 0
override def run = {
global.stats.paramTasksStarted.add("GetParamTask for "+typ)
val ret = super.run
if (!ret.isDefined) global.stats.paramTasksFailed.add("GetParamTask for "+typ)
ret
}
def computeSequenceCandidate = {
val newSequence = seqBefore.copy
param = findVarOfType(typ, newSequence, nullAllowed)
if (param.isDefined) {
Some(newSequence)
} else {
None
}
}
private def findVarOfType(typ: String, sequence: CallSequence, nullAllowed: Boolean): Option[Variable] = {
if (currentRecursion > maxRecursion) {
return None
}
currentRecursion += 1
if (sequence.types.contains(typ) && global.random.nextBool) { // reuse existing var of this type
val vars = sequence.varsOfType(typ)
val selectedVar = vars(global.random.nextInt(vars.size))
return Some(selectedVar)
} else if (!global.typeProvider.primitiveProvider.isNonRefType(typ) && nullAllowed && global.random.nextBool) {
return Some(NullConstant) // occasionally, use null (reduce probability?)
} else {
if (global.typeProvider.primitiveProvider.isPrimitiveOrWrapper(typ)) {
return Some(new Constant(global.typeProvider.primitiveProvider.next(typ)))
} else { // append calls to the sequence to create a new var of this type
var atomOption = global.typeProvider.atomGivingType(typ)
var downcast = false
if (!atomOption.isDefined) {
if (nullAllowed && global.random.nextBool) {
global.stats.nullParams.add(typ)
return Some(NullConstant)
} else {
// try to call a method where we downcast the return value
val atomWithDowncastOption = global.typeProvider.atomGivingTypeWithDowncast(typ)
if (atomWithDowncastOption.isDefined) {
atomOption = atomWithDowncastOption
downcast = true
} else {
return None
}
}
}
val atom = atomOption.get
val receiver = if (atom.isStatic || atom.isConstructor) None
else {
// recursively try to find a variable we can use as receiver
findVarOfType(atom.declaringType, sequence, false) match {
case Some(r) => {
// if the receiver is the OUT, we should only use CUT methods (only important for subclass testing)
if (seqBefore.getCutVariable != null && seqBefore.getCutVariable == r && !global.typeProvider.cutMethods.contains(atom)) {
return None
}
Some(r)
}
case None => return None // cannot find any receiver, stop searching this path
}
}
val args = new ArrayList[Variable]()
atom.paramTypes.foreach(t => {
val arg = findVarOfType(t, sequence, true) match {
case Some(a) => {
args.add(a)
}
case None => return None // cannot find any argument, stop searching this path
}
})
assert(atom.returnType.isDefined)
val retVal = Some(new ObjectVariable)
var downcastType = if (downcast) Some(typ) else None
sequence.appendCall(atom, receiver, args, retVal, downcastType)
return retVal
}
}
}
} | michaelpradel/ConTeGe | src/contege/seqgen/GetParamTask.scala | Scala | gpl-2.0 | 4,110 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package example {
package lib {
import _root_.net.liftweb._
import http._
import http.rest._
import common._
import json._
import util._
import _root_.net.liftweb.example.model._
object WebServices extends RestHelper {
// a JSON-able class that holds a User
case class UserInfo(firstName: String, lastName: String,
email: String) {
def toXml = <user firstname={firstName}
lastName={lastName} email={email}/>
def toJson = Extraction.decompose(this)
}
// a JSON-able class that holds all the users
case class AllUsers(users: List[UserInfo]) {
def toJson = Extraction.decompose(this)
def toXml = <users>{users.map(_.toXml)}</users>
}
// define a REST handler for an XML request
serve {
case "webservices" :: "all_users" :: _ XmlGet _ =>
AllUsers(User.findAll()).toXml
}
// define a REST handler for a JSON reqest
serve {
case "webservices" :: "all_users" :: _ JsonGet _ =>
AllUsers(User.findAll()).toJson
}
/*
* While many on the Web use GET requests in this way, a client shouldn't
* be given the expectation of resource state change or creation
* through a GET. GET should be idempotent and safe. This doesn't mean
* that a service couldn't create or modify state as as result
* (e.g. logging, counting the number of requests, creating business
* objects). It's just that any such state-related operations should
* not be visible through GET. In the above example, it is implied
* that a client could send a GET request in order to create a user.
*
* AKA -- don't do it this way in the real world, this is an example
* of using Scala's guards
*/
serveJx {
case Req("webservices" :: "add_user" :: _, _, rt) if rt.post_? || rt.get_? =>
addUser()
} { // How do we convert a UserInfo to either XML or JSON?
case (JsonSelect, u, _) => u.toJson
case (XmlSelect, u, _) => u.toXml
}
// a couple of helpful conversion rules
implicit def userToInfo(u: User): UserInfo =
UserInfo(u.firstName, u.lastName, u.email)
implicit def uLstToInfo(ul: List[User]): List[UserInfo] =
ul.map(userToInfo)
// extract the parameters, create a user
// return the appropriate response
def addUser(): Box[UserInfo] =
for {
firstname <- S.param("firstname") ?~ "firstname parameter missing" ~> 400
lastname <- S.param("lastname") ?~ "lastname parameter missing"
email <- S.param("email") ?~ "email parameter missing"
} yield {
val u = User.create.firstName(firstname).
lastName(lastname).email(email)
S.param("password") foreach u.password.set
u.saveMe
}
}
}
}
}
| wsaccaco/lift | examples/example/src/main/scala/net/liftweb/example/lib/WebServices.scala | Scala | apache-2.0 | 3,312 |
package lore.compiler.feedback
import lore.compiler.core.Position
import lore.compiler.semantics.NamePath
import lore.compiler.types.{TupleType, Type}
object CoreFeedback {
object Trait {
case class NotFound(name: NamePath) extends Feedback.Error(Position.unknown) {
override def message: String = s"The core trait $name is not defined. Please include Pyramid in your project" +
s" dependencies or write your own trait definition."
}
case class TraitExpected(name: NamePath) extends Feedback.Error(Position.unknown) {
override def message: String = s"The type $name is not a trait. Please include Pyramid in your project" +
s" dependencies or write your own proper trait definition."
}
}
object MultiFunction {
case class NotFound(name: NamePath, inputType: TupleType) extends Feedback.Error(Position.unknown) {
override def message: String = s"The core multi-function $name is not defined for the argument types $inputType." +
s" Please include Pyramid in your project dependencies or write your own function definition."
}
case class IllegalOutputType(name: NamePath, inputType: TupleType, outputType: Type) extends Feedback.Error(Position.unknown) {
override def message: String = s"The core multi-function $name for argument types $inputType has the wrong output" +
s" type. Please include Pyramid in your project dependencies or ensure that the function has the following" +
s" output type: $outputType."
}
}
}
| marcopennekamp/lore | compiler/src/lore/compiler/feedback/CoreFeedback.scala | Scala | mit | 1,525 |
package com.greencatsoft.d3.selection
import scala.scalajs.js
import scala.scalajs.js.UndefOr
import org.scalajs.dom.Node
trait DataDriven[A <: Node, B <: Selection[A, B]] extends js.Object {
import DataDriven._
def data[T](): js.Array[T] = js.native
def data[T](values: js.Array[T]): BoundSelection[A, B] = js.native
def data[T](values: js.Array[T], key: KeyFunction[T]): BoundSelection[A, B] = js.native
def data(provider: js.Function1[Any, Any]): BoundSelection[A, B] = js.native
def data[T](provider: js.Function1[Any, T], key: KeyFunction[T]): BoundSelection[A, B] = js.native
def datum[T](): UndefOr[T] = js.native
def datum(value: Any): B = js.native
def datum[T](value: ElementIterator[A, T]): B = js.native
def filter(selector: String): B = js.native
def filter[T](filter: ElementIterator[A, T]): B = js.native
def sort[T](comparator: js.Function2[T, T, Int]): B = js.native
def order(): B = js.native
}
object DataDriven {
type KeyFunction[A] = js.ThisFunction2[Any, A, Int, Any]
trait BoundSelection[A <: Node, B <: Selection[A, B]] extends js.Object {
this: B =>
def enter(): SelectionBuilder[A, B] = js.native
def exit(): B = js.native
}
trait SelectionBuilder[A <: Node, B <: Selection[A, B]] extends Container[A, B]
}
| sid-kap/scalajs-d3 | src/main/scala/com/greencatsoft/d3/selection/DataDriven.scala | Scala | apache-2.0 | 1,301 |
package com.twitter.inject.thrift.integration.http_server
import com.google.inject.{Provides, Singleton}
import com.twitter.finagle.thrift.ClientId
import com.twitter.inject.thrift.ThriftClientModule
import com.twitter.test.thriftscala.EchoService
import com.twitter.util.Future
import com.twitter.conversions.time._
object EchoThriftClientModule extends ThriftClientModule[EchoService[Future]] {
@Provides
@Singleton
def clientId: ClientId = ClientId("echo-http-service")
override val label = "echo-service"
override val dest = "flag!thrift-echo-service"
override val connectTimeout = 1L.seconds
override val requestTimeout = 1L.seconds
}
| tom-chan/finatra | inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/integration/http_server/EchoThriftClientModule.scala | Scala | apache-2.0 | 658 |
package org.woodyalen202
/**
* Created by lichuansun on 14-6-24.
*/
trait TestTrait {
}
| woodyalen202/based-scala | src/main/java/org/woodyalen202/TestTrait.scala | Scala | mit | 92 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package extraction
package object inlining {
object trees extends Trees with inox.ast.SimpleSymbols {
case class Symbols(
functions: Map[Identifier, FunDef],
sorts: Map[Identifier, ADTSort]
) extends SimpleSymbols with StainlessAbstractSymbols {
override val symbols: this.type = this
}
override def mkSymbols(functions: Map[Identifier, FunDef], sorts: Map[Identifier, ADTSort]): Symbols = {
Symbols(functions, sorts)
}
object printer extends Printer { val trees: inlining.trees.type = inlining.trees }
}
def extractor(using inox.Context) = {
utils.DebugPipeline("FunctionSpecialization", FunctionSpecialization(trees)) andThen
utils.DebugPipeline("UnfoldOpaque", UnfoldOpaque(trees)) andThen
utils.DebugPipeline("CallSiteInline", CallSiteInline(trees)) andThen
utils.DebugPipeline("ChooseInjector", ChooseInjector(trees)) andThen
utils.DebugPipeline("ChooseEncoder", ChooseEncoder(trees)) andThen
utils.DebugPipeline("FunctionInlining", FunctionInlining(trees, trace.trees))
}
def fullExtractor(using inox.Context) = extractor andThen nextExtractor
def nextExtractor(using inox.Context) = trace.fullExtractor
def phaseSemantics(using inox.Context): inox.SemanticsProvider { val trees: inlining.trees.type } = {
extraction.phaseSemantics(inlining.trees)(fullExtractor)
}
def nextPhaseSemantics(using inox.Context): inox.SemanticsProvider { val trees: trace.trees.type } = {
trace.phaseSemantics
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/extraction/inlining/package.scala | Scala | apache-2.0 | 1,567 |
package de.htwg.zeta.server.model.modelValidator.validator.rules.metaModelIndependent
import de.htwg.zeta.common.models.project.instance.elements.NodeInstance
import de.htwg.zeta.server.model.modelValidator.validator.rules.SingleNodeRule
/**
* This file was created by Tobias Droth as part of his master thesis at HTWG Konstanz (03/2017 - 09/2017).
*/
class NodesAttributesNamesNotEmpty extends SingleNodeRule {
override val name: String = getClass.getSimpleName
override val description: String = "Attribute names of nodes attributes must not be empty."
override val possibleFix: String = "Add name to every attribute."
override def isValid(node: NodeInstance): Option[Boolean] = Some(!node.attributeValues.keys.toSeq.contains(""))
}
| Zeta-Project/zeta | api/server/app/de/htwg/zeta/server/model/modelValidator/validator/rules/metaModelIndependent/NodesAttributesNamesNotEmpty.scala | Scala | bsd-2-clause | 748 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import java.util.concurrent.atomic.AtomicLong
import minitest.TestSuite
import monix.execution.BufferCapacity.{Bounded, Unbounded}
import monix.execution.ChannelType.{MPMC, MPSC, SPMC, SPSC}
import monix.execution.internal.Platform
import monix.execution.schedulers.TestScheduler
import scala.collection.immutable.Queue
import scala.concurrent.Future
import scala.concurrent.duration._
object AsyncQueueFakeSuite extends BaseAsyncQueueSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(env: TestScheduler): Unit =
assert(env.state.tasks.isEmpty, "should not have tasks left to execute")
def testFuture(name: String, times: Int)(f: Scheduler => Future[Unit]): Unit = {
def repeatTest(test: Future[Unit], n: Int)(implicit ec: Scheduler): Future[Unit] =
if (n > 0)
test.flatMap(_ => repeatTest(test, n - 1))
else
Future.successful(())
test(name) { implicit ec =>
repeatTest(f(ec), times)
ec.tick(1.day)
}
}
}
object AsyncQueueGlobalSuite extends BaseAsyncQueueSuite[Scheduler] {
def setup() = Scheduler.global
def tearDown(env: Scheduler): Unit = ()
def testFuture(name: String, times: Int)(f: Scheduler => Future[Unit]): Unit = {
def repeatTest(test: Future[Unit], n: Int)(implicit ec: Scheduler): Future[Unit] =
if (n > 0)
FutureUtils
.timeout(test, 60.seconds)
.flatMap(_ => repeatTest(test, n - 1))
else
Future.successful(())
testAsync(name) { implicit ec =>
repeatTest(f(ec), times)
}
}
}
abstract class BaseAsyncQueueSuite[S <: Scheduler] extends TestSuite[S] {
val repeatForFastTests = {
if (Platform.isJVM) 1000 else 100
}
val repeatForSlowTests = {
if (Platform.isJVM) 50 else 1
}
/** TO IMPLEMENT ... */
def testFuture(name: String, times: Int = 1)(f: Scheduler => Future[Unit]): Unit
testFuture("simple offer and poll", times = repeatForFastTests) { implicit s =>
val queue = AsyncQueue.bounded[Int](10)
for {
_ <- queue.offer(1)
_ <- queue.offer(2)
_ <- queue.offer(3)
r1 <- queue.poll()
r2 <- queue.poll()
r3 <- queue.poll()
} yield {
assertEquals(r1, 1)
assertEquals(r2, 2)
assertEquals(r3, 3)
}
}
testFuture("async poll", times = repeatForFastTests) { implicit s =>
val queue = AsyncQueue.bounded[Int](10)
for {
_ <- queue.offer(1)
r1 <- queue.poll()
_ <- Future(assertEquals(r1, 1))
f <- Future(queue.poll())
_ <- Future(assertEquals(f.value, None))
_ <- queue.offer(2)
r2 <- f
} yield {
assertEquals(r2, 2)
}
}
testFuture("offer/poll over capacity", times = repeatForFastTests) { implicit s =>
val queue = AsyncQueue.bounded[Long](10)
val count = 1000L
def producer(n: Long): Future[Unit] =
if (n > 0) queue.offer(count - n).flatMap(_ => producer(n - 1))
else Future.successful(())
def consumer(n: Long, acc: Queue[Long] = Queue.empty): Future[Long] =
if (n > 0)
queue.poll().flatMap { a =>
consumer(n - 1, acc.enqueue(a))
} else
Future.successful(acc.foldLeft(0L)(_ + _))
val p = producer(count)
val c = consumer(count)
for {
_ <- p
r <- c
} yield {
assertEquals(r, count * (count - 1) / 2)
}
}
testFuture("tryOffer / tryPoll", times = repeatForFastTests) { implicit ec =>
val queue = AsyncQueue.bounded[Long](16)
val count = 1000L
def producer(n: Long): Future[Unit] =
if (n > 0) Future(queue.tryOffer(count - n)).flatMap {
case true =>
producer(n - 1)
case false =>
FutureUtils.delayedResult(10.millis)(()).flatMap(_ => producer(n))
}
else {
Future.successful(())
}
def consumer(n: Long, acc: Queue[Long] = Queue.empty): Future[Long] =
if (n > 0)
Future(queue.tryPoll()).flatMap {
case Some(a) => consumer(n - 1, acc.enqueue(a))
case None =>
FutureUtils.delayedResult(10.millis)(()).flatMap(_ => consumer(n, acc))
}
else
Future.successful(acc.foldLeft(0L)(_ + _))
val c = consumer(count)
val p = producer(count)
for {
_ <- p
r <- c
} yield {
assertEquals(r, count * (count - 1) / 2)
}
}
testFuture("drain; MPMC; unbounded", times = repeatForFastTests) { implicit ec =>
testDrain(Unbounded(), MPMC)
}
testFuture("drain; MPSC; unbounded", times = repeatForFastTests) { implicit ec =>
testDrain(Unbounded(), MPSC)
}
testFuture("drain; SPMC; unbounded", times = repeatForFastTests) { implicit ec =>
testDrain(Unbounded(), SPMC)
}
testFuture("drain; SPMC; unbounded", times = repeatForFastTests) { implicit ec =>
testDrain(Unbounded(), SPSC)
}
testFuture("drain; MPMC; bounded", times = repeatForFastTests) { implicit ec =>
testDrain(Bounded(32), MPMC)
}
testFuture("drain; MPSC; bounded", times = repeatForFastTests) { implicit ec =>
testDrain(Bounded(32), MPSC)
}
testFuture("drain; SPMC; bounded", times = repeatForFastTests) { implicit ec =>
testDrain(Bounded(32), SPMC)
}
testFuture("drain; SPMC; bounded", times = repeatForFastTests) { implicit ec =>
testDrain(Bounded(32), SPSC)
}
def testDrain(bc: BufferCapacity, ct: ChannelType)(implicit ec: Scheduler): Future[Unit] = {
val count = 1000
val elems = for (i <- 0 until count) yield i
val queue = AsyncQueue.withConfig[Int](bc, ct)
val f1 = queue.drain(1000, 1000)
val f2 = queue.offerMany(elems)
for {
_ <- f2
r <- f1
} yield {
assertEquals(r.sum, count * (count - 1) / 2)
}
}
testFuture("clear") { implicit s =>
val queue = AsyncQueue.bounded[Int](10)
for {
_ <- queue.offer(1)
_ <- Future(queue.clear())
r <- Future(queue.tryPoll())
} yield {
assertEquals(r, None)
}
}
testFuture("clear after overflow") { implicit s =>
val queue = AsyncQueue.bounded[Int](512)
val fiber = queue.offerMany(0 until 1000)
for {
_ <- FutureUtils.timeoutTo(fiber, 3.millis, Future.successful(()))
_ <- Future(queue.clear())
_ <- fiber
} yield ()
}
testFuture("concurrent producer - consumer; MPMC; bounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Bounded(128), MPMC)
testConcurrency(queue, count, 3)
}
testFuture("concurrent producer - consumer; MPMC; unbounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Unbounded(), MPMC)
testConcurrency(queue, count, 3)
}
testFuture("concurrent producer - consumer; MPSC; bounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Bounded(128), MPSC)
testConcurrency(queue, count, 1)
}
testFuture("concurrent producer - consumer; MPSC; unbounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Unbounded(), MPSC)
testConcurrency(queue, count, 1)
}
testFuture("concurrent producer - consumer; SPMC; bounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Bounded(128), SPMC)
testConcurrency(queue, count, 3)
}
testFuture("concurrent producer - consumer; SPMC; unbounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Unbounded(), SPMC)
testConcurrency(queue, count, 3)
}
testFuture("concurrent producer - consumer; SPSC; bounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Bounded(128), SPSC)
testConcurrency(queue, count, 1)
}
testFuture("concurrent producer - consumer; SPSC; unbounded") { implicit ec =>
val count = if (Platform.isJVM) 10000 else 1000
val queue = AsyncQueue.withConfig[Int](Unbounded(), SPSC)
testConcurrency(queue, count, 1)
}
def testConcurrency(queue: AsyncQueue[Int], n: Int, workers: Int)(implicit s: Scheduler): Future[Unit] = {
def producer(n: Int): Future[Unit] = {
def offerViaTry(n: Int): Future[Unit] =
Future(queue.tryOffer(n)).flatMap {
case true => Future.successful(())
case false =>
FutureUtils.delayedResult(10.millis)(()).flatMap(_ => offerViaTry(n))
}
if (n > 0) {
val offer = if (n % 2 == 0) queue.offer(n) else offerViaTry(n)
offer.flatMap(_ => producer(n - 1))
} else {
queue.offerMany(for (_ <- 0 until workers) yield 0)
}
}
val atomic = new AtomicLong(0)
def consumer(idx: Int = 0): Future[Unit] = {
def pollViaTry(): Future[Int] =
Future(queue.tryPoll()).flatMap {
case Some(v) => Future.successful(v)
case None =>
FutureUtils.delayedResult(10.millis)(()).flatMap(_ => pollViaTry())
}
val poll = if (idx % 2 == 0) queue.poll() else pollViaTry()
poll.flatMap { i =>
if (i > 0) {
atomic.addAndGet(i.toLong)
consumer(idx + 1)
} else {
Future.successful(())
}
}
}
val tasks = (producer(n) +: (0 until workers).map(_ => consumer())).toList
for (_ <- Future.sequence(tasks)) yield {
assertEquals(atomic.get(), n.toLong * (n + 1) / 2)
}
}
}
| monix/monix | monix-execution/shared/src/test/scala/monix/execution/AsyncQueueSuite.scala | Scala | apache-2.0 | 10,251 |
import leon.instrumentation._
import leon.collection._
import leon.lang._
import ListSpecs._
import leon.annotation._
import conctrees.ConcTrees._
object Conqueue {
def max(x: BigInt, y: BigInt): BigInt = if (x >= y) x else y
def abs(x: BigInt): BigInt = if (x < 0) -x else x
sealed abstract class ConQ[T] {
val isLazy: Boolean = this match {
case PushLazy(_, _) => true
case _ => false
}
val isSpine: Boolean = this match {
case Spine(_, _) => true
case _ => false
}
val pushLazyInv: Boolean = this match {
case PushLazy(ys, xs) =>
!ys.isEmpty && (xs match {
case Spine(h, rear) =>
!h.isEmpty && rear.pushLazyInv //note: head cannot be empty for a lazy closure
//h.level == ys.level (omitting this for now)
case _ => false
})
case Spine(_, rear) => rear.pushLazyInv
case _ => true
}
val zeroPreceedsLazy: Boolean = {
this match {
case Spine(h, PushLazy(_, q)) =>
(h == Empty[T]()) && q.zeroPreceedsLazy // the position before pushlazy is Empty
case Spine(Empty(), rear) =>
rear.weakZeroPreceedsLazy // here we have seen a zero
case Spine(h, rear) =>
rear.zeroPreceedsLazy //here we have not seen a zero
case Tip(_) => true
case _ => false // this implies that a ConQ cannot start with a lazy closure
}
} ensuring (res => !res || weakZeroPreceedsLazy) //zeroPreceedsLazy is a stronger property
val weakZeroPreceedsLazy: Boolean = {
this match {
case Spine(h, PushLazy(_, q)) =>
q.zeroPreceedsLazy
case Spine(_, rear) =>
rear.weakZeroPreceedsLazy
case Tip(_) => true
case _ => false // this implies that a ConQ cannot start with a lazy closure
}
}
val valid = {
zeroPreceedsLazy && pushLazyInv
}
val weakValid = {
weakZeroPreceedsLazy && pushLazyInv
}
val isConcrete: Boolean = {
this match {
case Spine(_, rear) =>
rear.isConcrete
case Tip(_) =>
true
case _ => false
}
} ensuring (res => !res || valid)
val firstLazyClosure: ConQ[T] = {
require(this.pushLazyInv)
this match {
case Spine(_, pl: PushLazy[T]) => pl
case Spine(_, tail) =>
tail.firstLazyClosure
case _ =>
this
}
} ensuring (res => !res.isSpine && res.pushLazyInv)
def suffix(sch: ConQ[T]): Boolean = { //checks if sch is a suffix of 'this'
(this == sch) || {
this match {
case Spine(_, rear) =>
rear.suffix(sch)
case _ =>
false
}
}
} ensuring (res => sch match {
case Spine(_, rear) =>
!res || suffix(rear)
case _ => true
})
}
case class Tip[T](t: Conc[T]) extends ConQ[T]
case class Spine[T](head: Conc[T], rear: ConQ[T]) extends ConQ[T]
// a closure corresponding to 'push' operations
case class PushLazy[T](ys: Conc[T], xs: Spine[T]) extends ConQ[T]
def queueScheduleProperty[T](xs: ConQ[T], sch: PushLazy[T]) = {
sch match {
case PushLazy(_, _) =>
xs.valid && xs.firstLazyClosure == sch //sch is the first lazy closure of 's'
case _ => false
}
}
def weakScheduleProperty[T](xs: ConQ[T], sch: PushLazy[T]) = {
sch match {
case PushLazy(_, _) =>
xs.weakValid && xs.firstLazyClosure == sch //sch is the first lazy closure of 's'
case _ => false
}
}
def schedulesProperty[T](q: ConQ[T], schs: List[ConQ[T]]): Boolean = {
schs match {
case Cons(pl @ PushLazy(_, nestq), tail) =>
queueScheduleProperty(q, pl) &&
schedulesProperty(nestq, tail)
case Nil() =>
//q.valid // here, for now we do not enforce that q should not have any closures.
q.isConcrete
case _ =>
false // other cases are not valid
}
}
def weakSchedulesProperty[T](q: ConQ[T], schs: List[ConQ[T]]): Boolean = {
schs match {
case Cons(pl @ PushLazy(_, nestq), tail) =>
weakScheduleProperty(q, pl) &&
schedulesProperty(nestq, tail)
case Nil() =>
//q.valid
q.isConcrete
case _ =>
false
}
}
case class Wrapper[T](queue: ConQ[T], schedule: List[ConQ[T]]) {
val valid: Boolean = {
schedulesProperty(queue, schedule)
}
}
def pushLeft[T](ys: Single[T], xs: ConQ[T]): (ConQ[T], BigInt) = {
require(xs.valid)
xs match {
case Tip(CC(_, _)) =>
(Spine(ys, xs), 1)
case Tip(Empty()) =>
(Tip(ys), 1)
case Tip(t @ Single(_)) =>
(Tip(CC(ys, t)), 1)
case s @ Spine(_, _) =>
val (r, t) = pushLeftLazy(ys, s) //ensure precondition here
(r, t + 1)
}
} ensuring (res => !res._1.isLazy && res._2 <= 2)
def pushLeftLazy[T](ys: Conc[T], xs: Spine[T]): (Spine[T], BigInt) = {
require(!ys.isEmpty && xs.valid) // &&
//(xs.head.isEmpty || xs.head.level == ys.level))
xs match {
case Spine(Empty(), rear) => //note: 'rear' is not materialized here
(Spine(ys, rear), 1) // if rear was 'PushLazy', this would temporarily break the 'zeroPreceedsLazy' invariant
case Spine(head, rear) =>
val carry = CC(head, ys) //here, head and ys are of the same level
rear match { //here, rear cannot be 'PushLazy' by the 'zeroPreceedsLazy' invariant
case s @ Spine(Empty(), srear) =>
(Spine(Empty(), Spine(carry, srear)), 1)
case s @ Spine(_, _) =>
(Spine(Empty(), PushLazy(carry, s)), 1)
case t @ Tip(tree) if tree.level > carry.level => // can this happen ? this means tree is of level at least two greater than rear ?
(Spine(Empty(), Spine(carry, t)), 1)
case Tip(tree) =>
// here tree level and carry level are equal
(Spine(Empty(), Spine(Empty(), Tip(CC(tree, carry)))), 1)
}
}
} ensuring (res => res._1.isSpine && res._1.weakValid && res._2 <= 1)
/**
* Materialize will evaluate ps and update the references to
* ps in xs. Ideally, the second argument should include every
* structure that may contain 'pl'.
*/
def materialize[T](mat: ConQ[T], xs: ConQ[T], schs: Cons[ConQ[T]]): (Spine[T], ConQ[T], BigInt) = {
require(weakSchedulesProperty(xs, schs) && schs.head == mat)
mat match {
case PushLazy(elem, q) =>
val (nr, t) = pushLeftLazy(elem, q)
(nr, updateReferences(xs, mat, schs), t + 1)
}
} ensuring (res => (res._1 match {
case Spine(_, pl @ PushLazy(_, _)) =>
schedulesProperty(res._2, Cons(pl, schs.tail))
case _ =>
schedulesProperty(res._2, schs.tail)
}) &&
res._3 <= 2)
/**
* This does not take any time, by the definition of laziness
*/
def updateReferences[T](xs: ConQ[T], mat: ConQ[T], schs: Cons[ConQ[T]]): ConQ[T] = {
require(weakSchedulesProperty(xs, schs) && schs.head == mat)
xs match {
case Spine(h, pl @ PushLazy(elem, q)) if (pl == mat) =>
//ADT property implies that we need not search in the sub-structure 'q'.
Spine(h, pushLeftLazy(elem, q)._1) //here, we can ignore the time, this only captures the semantics
case Spine(h, rear) => //here mat and xs cannot be equal, so look in the substructures
Spine(h, updateReferences(rear, mat, schs))
}
} ensuring (res => mat match {
case PushLazy(elem, q) =>
pushLeftLazy(elem, q)._1 match {
case Spine(_, pl @ PushLazy(_, _)) =>
schedulesProperty(res, Cons(pl, schs.tail))
case _ =>
schedulesProperty(res, schs.tail)
}
})
def pushLeftAndPay[T](ys: Single[T], w: Wrapper[T]): (Wrapper[T], BigInt) = {
require(w.valid)
val (nq, t1) = pushLeft(ys, w.queue) // the queue invariant could be temporarily broken
// update the schedule
val nschs = nq match {
case Spine(_, pl @ PushLazy(_, nest)) =>
w.queue match {
case Spine(head, rear) if !head.isEmpty =>
Cons[ConQ[T]](pl, w.schedule)
case _ =>
w.schedule
}
case Tip(_) =>
w.schedule
case Spine(_, rear) =>
w.schedule
}
val (fschs, fq, t2) = pay(nschs, nq)
(Wrapper(fq, fschs), t1 + t2 + 1)
} ensuring (res => res._1.valid && res._2 <= 6)
def pay[T](schs: List[ConQ[T]], xs: ConQ[T]): (List[ConQ[T]], ConQ[T], BigInt) = {
require(weakSchedulesProperty(xs, schs))
schs match {
case c @ Cons(pl @ PushLazy(_, nestq), rest) =>
val (matr, nxs, matt) = materialize(pl, xs, c)
matr match {
case Spine(_, pl @ PushLazy(_, _)) =>
(Cons(pl, rest), nxs, matt + 1)
case _ =>
(rest, nxs, matt + 1)
}
case Nil() =>
(Nil(), xs, 1) // here every thing is concretized
}
} ensuring (res => schedulesProperty(res._2, res._1) &&
res._3 <= 3)
}
| epfl-lara/leon | testcases/lazy-datastructures/ManualnOutdated/Conqueue-Manual.scala | Scala | gpl-3.0 | 9,047 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2014-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package org.kiama
package relation
import scala.language.higherKinds
/**
* A template trait for Relation-like types. `T` and `U` are the domain
* and range types of the relation, respectively. `Repr` is the type
* constructor for the concrete representation of a particular relation
* type.
*/
trait RelationLike[T,U,Repr[_,_]] {
import org.kiama.util.Comparison.{contains, distinct, same}
/**
* A companion object that provides factory methods for this kind of
* relation.
*/
def companion : RelationFactory[Repr]
/**
* The graph of this relation.
*/
def graph : List[(T,U)]
/**
* Apply this relation (same as `image`).
*/
def apply (t : T) : List[U] =
image (t)
/**
* Build a new relation by collecting pairs produced by the partial
* function `f` wherever it is defined on pairs of this relation.
*/
def collect[V,W] (f : ((T,U)) ==> (V,W)) : Repr[V,W] =
companion.fromGraph (graph.collect (f))
/**
* Compose this relation with `st`.
*/
def compose[S] (st : RelationLike[S,T,Repr]) : Repr[S,U] =
companion.fromGraph (
for ((s, t1) <- st.graph; (t2, u) <- graph; if same (t1, t2))
yield (s, u)
)
/**
* Does the domain of this relation contain the value `t`?
*/
def containsInDomain (t : T) : Boolean =
contains (domain, t)
/**
* Does the range of this relation contain the value `u`?
*/
def containsInRange (u : U) : Boolean =
contains (range, u)
/**
* The domain of this relation.
*/
lazy val domain : List[T] =
distinct (graph.map (_._1))
/**
* The image of a value of the relation's domain is a set of the
* values in the range that are related to that domain value.
*/
def image (t : T) : List[U] =
graph.collect { case (t1, u) if same (t, t1) => u }
/**
* A relation that maps each element of the range to its position
* (starting counting at zero).
*/
lazy val index : Repr[U,Int] =
companion.fromGraph (graph.map (_._2).zipWithIndex)
/**
* Invert this relation. In other words, if `(t,u)` is in the relation,
* then `(u,t)` is in the inverted relation.
*/
lazy val inverse : Repr[U,T] =
companion.fromGraph (graph.map (_.swap))
/**
* Is this relation empty (i.e., contains no pairs)?
*/
lazy val isEmpty : Boolean =
graph.isEmpty
/**
* An auxiliary extractor for this relation that matches pairs. The
* match succeeds if and only if the matched value `t` has a unique
* image in the relation. Both `t` and its unique image value are
* returned for a successful match.
*/
object pair {
def unapply (t : T) : Option[(T,U)] =
image (t) match {
case List (u) => Some ((t, u))
case _ => None
}
}
/**
* The preImage of a value of the relation's range is a set of the
* values in the domain that are related to that range value.
*/
def preImage (u : U) : List[T] =
graph.collect { case (t, u1) if same (u, u1) => t }
/**
* A relation that maps each element of the domain to its position
* starting at zero.
*/
lazy val preIndex : Repr[T,Int] =
companion.fromGraph (graph.map (_._1).zipWithIndex)
/**
* Domain projection, i.e., form a relation that relates each
* value in the domain to all of the related values in the range.
*/
lazy val projDomain : Repr[T,List[U]] =
companion.fromGraph (domain.map (t => (t, image (t))))
/**
* Range projection, i.e., form a relation that relates each
* value in the range to all of the related values in the domain.
*/
lazy val projRange : Repr[U,List[T]] =
companion.fromGraph (range.map (u => (u, preImage (u))))
/**
* The range of this relation.
*/
lazy val range : List[U] =
distinct (graph.map (_._2))
/**
* A relation can be used as an extractor that matches if and only if
* the matched value `t` has a unique image in the relation. The unique
* image value is returned for a successful match.
*/
def unapply (t : T) : Option[U] =
image (t) match {
case List (u) => Some (u)
case _ => None
}
/**
* A relation can be used as an extractor that returns the image for a
* given domain value `t`. Fails if `t` is not in the domain.
*/
def unapplySeq (t : T) : Option[List[U]] = {
val ti = image (t)
if (ti.isEmpty)
None
else
Some (ti)
}
/**
* Union this relation with `r`.
*/
def union (r : RelationLike[T,U,Repr]) : Repr[T,U] =
companion.fromGraph (graph ++ r.graph)
/**
* Return the sub-relation of this relation that contains just those
* pairs that have `t` as their domain element.
*/
def withDomain (t : T) : Repr[T,U] =
companion.fromGraph (graph.filter { case (t1, _) => same (t, t1) })
/**
* Return the sub-relation of this relation that contains just those
* pairs that have `u` as their range element.
*/
def withRange (u : U) : Repr[T,U] =
companion.fromGraph (graph.filter { case (_, u1) => same (u, u1) })
}
| solomono/kiama | library/src/org/kiama/relation/RelationLike.scala | Scala | gpl-3.0 | 6,257 |
package chrome.utils
import chrome.app.runtime.Runtime
import chrome.app.runtime.bindings.{LaunchData, Request}
trait ChromeApp {
def main(args: Array[String]): Unit = {
Runtime.onLaunched.listen(onLaunched)
Runtime.onRestarted.listen((_) => onRestart)
Runtime.onEmbedRequested.listen(onEmbedRequested)
}
def onLaunched(launchData: LaunchData): Unit = {}
def onRestart(): Unit = {}
def onEmbedRequested(request: Request): Unit = {}
}
| lucidd/scala-js-chrome | bindings/src/main/scala/chrome/utils/ChromeApp.scala | Scala | mit | 463 |
/* scala-stm - (c) 2009-2011, Stanford University, PPL */
package scala.concurrent.stm
package skel
import scala.collection.mutable
private[stm] object HashTrieTMap {
def empty[A, B]: TMap[A, B] = new HashTrieTMap(Ref(TxnHashTrie.emptyMapNode[A, B]).single)
def newBuilder[A, B]: mutable.Builder[(A, B), TMap[A, B]] = new mutable.Builder[(A, B), TMap[A, B]] {
var root: TxnHashTrie.BuildingNode[A, B] = TxnHashTrie.emptyMapBuildingNode[A, B]
def clear(): Unit = { root = TxnHashTrie.emptyMapBuildingNode[A, B] }
def += (kv: (A, B)): this.type = { root = TxnHashTrie.buildingPut(root, kv._1, kv._2) ; this }
def result(): TMap[A, B] = {
val r = root
root = null
new HashTrieTMap(Ref(r.endBuild).single)
}
}
}
private[skel] class HashTrieTMap[A, B] private (root0: Ref.View[TxnHashTrie.Node[A, B]]
) extends TxnHashTrie[A, B](root0) with TMapViaClone[A, B] {
//// construction
override def empty: TMap.View[A, B] = new HashTrieTMap(Ref(TxnHashTrie.emptyMapNode[A, B]).single)
override def clone: HashTrieTMap[A, B] = new HashTrieTMap(cloneRoot)
//// TMap.View aggregates
override def isEmpty: Boolean = singleIsEmpty
override def size: Int = singleSize
override def iterator: Iterator[(A, B)] = mapIterator
override def keysIterator: Iterator[A] = mapKeyIterator
override def valuesIterator: Iterator[B] = mapValueIterator
override def foreach[U](f: ((A, B)) => U): Unit = singleMapForeach(f)
override def clear(): Unit = { root() = TxnHashTrie.emptyMapNode[A, B] }
//// TMap.View per-element
override def contains(key: A): Boolean = singleContains(key)
override def apply(key: A): B = singleGetOrThrow(key)
def get(key: A): Option[B] = singleGet(key)
override def put(key: A, value: B): Option[B] = singlePut(key, value)
override def update(key: A, value: B): Unit = singlePut(key, value)
override def += (kv: (A, B)): this.type = { singlePut(kv._1, kv._2) ; this }
override def remove(key: A): Option[B] = singleRemove(key)
override def -= (key: A): this.type = { singleRemove(key) ; this }
//// optimized TMap versions
def isEmpty(implicit txn: InTxn): Boolean = txnIsEmpty
def size(implicit txn: InTxn): Int = singleSize
def foreach[U](f: ((A, B)) => U)(implicit txn: InTxn): Unit = txnMapForeach(f)
def contains(key: A)(implicit txn: InTxn): Boolean = txnContains(key)
def apply(key: A)(implicit txn: InTxn): B = txnGetOrThrow(key)
def get(key: A)(implicit txn: InTxn): Option[B] = txnGet(key)
def put(key: A, value: B)(implicit txn: InTxn): Option[B] = txnPut(key, value)
def remove(key: A)(implicit txn: InTxn): Option[B] = txnRemove(key)
def transform(f: (A, B) => B)(implicit txn: InTxn): this.type = { single transform f ; this }
def retain(p: (A, B) => Boolean)(implicit txn: InTxn): this.type = { single retain p ; this }
}
| nbronson/scala-stm | src/main/scala/scala/concurrent/stm/skel/HashTrieTMap.scala | Scala | bsd-3-clause | 2,913 |
package im.mange.shoreditch
import im.mange.shoreditch.api._
import im.mange.shoreditch.handler.HttpMethodPartialFunctions._
import im.mange.shoreditch.handler.{Request, Route, ShoreditchHandler}
case class Shoreditch(base: String = "shoreditch",
version: String,
longName: String,
alias: String,
checksEnabled: Boolean = true,
actionsEnabled: Boolean = true,
debug: Boolean = false,
routes: Seq[Route[Service]]) {
private val handler = new ShoreditchHandler(this)
def handle(request: Request) = {
val theHandler = handler.handler(request)
// println(theHandler)
theHandler.map(_())
}
val actions = handler.actions
val checks = handler.checks
if (debug) println(
s"""\\nShoreditch: /$base => $longName ($alias) V$version, checksEnabled: $checksEnabled, actionsEnabled: $actionsEnabled
| (${checks.size}) checks:\\n${describe(checks.toSeq)}
| (${actions.size}) actions:\\n${describe(actions.toSeq)}
""".stripMargin
)
private def describe(x: Seq[(String, Service)]) = x.map(c => s" - ${c._1 + " -> " + c._2}").mkString("\\n")
}
object Shoreditch {
implicit class CheckRouteBuildingString(val path: String) extends AnyVal {
def action(f: ⇒ Action): Route[Service] = POST0("action/" + path)(f)
def check(f: ⇒ Check): Route[Service] = GET0("check/" + path)(f)
def check(f: (String) ⇒ Check): Route[Service] = GET1("check/" + path)(f)
def check(f: (String,String) ⇒ Check): Route[Service] = GET2("check/" + path)(f)
}
}
| alltonp/shoreditch | src/main/scala/im/mange/shoreditch/Shoreditch.scala | Scala | apache-2.0 | 1,688 |
package assets.mustache.overseas
import uk.gov.gds.ier.transaction.overseas.lastUkAddress.LastUkAddressLookupMustache
import uk.gov.gds.ier.test._
class LastUkAddressLookupTemplateTest
extends TemplateTestSuite
with LastUkAddressLookupMustache {
it should "properly render" in {
running(FakeApplication()) {
val data = new LookupModel(
question = Question(),
postcode = Field(
id = "postcodeId",
name = "postcodeName",
classes = "postcodeClasses",
value = "postcodeValue"
)
)
val html = Mustache.render("overseas/lastUkAddressLookup", data)
val doc = Jsoup.parse(html.toString)
val fieldset = doc.select("fieldset").first()
val label = fieldset.select("label").first()
label.attr("for") should be("postcodeId")
val divWrapper = fieldset.select("div").first()
divWrapper.attr("class") should include("postcodeClasses")
val input = divWrapper.select("input").first()
input.attr("id") should be("postcodeId")
input.attr("name") should be("postcodeName")
input.attr("value") should be("postcodeValue")
input.attr("class") should include("postcodeClasses")
}
}
}
| michaeldfallen/ier-frontend | test/assets/mustache/overseas/LastUkAddressLookupTemplateTest.scala | Scala | mit | 1,230 |
/**
* Copyright 2010-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package examples
object FunSpecExamples extends StyleTraitExamples {
val name: String = "FunSpec"
val description: String = """For teams coming from Ruby's RSpec tool, FunSpec will feel very familiar; More generally, for any team that prefers BDD, FunSpec's nesting and gentle guide to structuring text (with describe and it) provides an excellent general-purpose choice for writing specification-style tests."""
/*
val exampleUsage: String =
"""<span class="stImport">import org.scalatest.FunSpec</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| describe(<span class="stLiteral">"A Set"</span>) {
| describe(<span class="stLiteral">"when empty"</span>) {
| it(<span class="stLiteral">"should have size 0"</span>) { assert(<span class="stType">Set</span>.empty.size === <span class="stLiteral">0</span>) }
| it(<span class="stLiteral">"should produce NoSuchElementException when head is invoked"</span>) {
| intercept[<span class="stType">NoSuchElementException]</span> { <span class="stType">Set</span>.empty.head }
| }
| }
| }
|} """.stripMargin
*/
val exampleUsage: String =
"""<span class="stImport">import org.scalatest._</span>
|
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| <span class="stReserved">override</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">NoArgTest</span>) = { <span class="stExplain">// Define a shared fixture</span>
| <span class="stExplain">// Shared setup (run at beginning of each test)</span>
| <span class="stReserved">try</span> test()
| <span class="stReserved">finally</span> {
| <span class="stExplain">// Shared cleanup (run at end of each test)</span>
| }
| }
|
| <span class="stExplain">// Describe a <em>scope</em> for a <em>subject</em>, in this case: "A Set"</span>
| describe(<span class="stLiteral">"A Set"</span>) { <span class="stExplain">// All tests within these curly braces are about "A Set"</span>
|
| <span class="stExplain">// Can describe nested scopes that "narrow" its outer scopes</span>
| describe(<span class="stLiteral">"(when empty)"</span>) { <span class="stExplain">// All tests within these curly braces are about "A Set (when empty)"</span>
|
| it(<span class="stLiteral">"should have size 0"</span>) { <span class="stExplain">// Here, 'it' refers to "A Set (when empty)". The full name</span>
| assert(<span class="stType">Set</span>.empty.size == <span class="stLiteral">0</span>) <span class="stExplain">// of this test is: "A Set (when empty) should have size 0"</span>
| }
| it(<span class="stLiteral">"should produce NoSuchElementException when head is invoked"</span>) { // <span class="stExplain">Define another test</span>
| intercept[<span class="stType">NoSuchElementException</span>] {
| <span class="stType">Set</span>.empty.head
| }
| }
| ignore(<span class="stLiteral">"should be empty"</span>) { <span class="stExplain">// To ignore a test, change 'it' to 'ignore'...</span>
| assert(<span class="stType">Set</span>.empty.isEmpty)
| }
| }
|
| <span class="stExplain">// Describe a second nested scope that narrows "A Set" in a different way</span>
| describe(<span class="stLiteral">"(when non-empty)"</span>) { <span class="stExplain">// All tests within these curly braces are about "A Set (when non-empty)"</span>
|
| it(<span class="stLiteral">"should have the correct size"</span>) { <span class="stExplain">// Here, 'it' refers to "A Set (when non-empty)". This test's full</span>
| assert(<span class="stType">Set</span>(<span class="stLiteral">1</span>, <span class="stLiteral">2</span>, <span class="stLiteral">3</span>).size == <span class="stLiteral">3</span>) <span class="stExplain">// name is: "A Set (when non-empty) should have the correct size"</span>
| }
| <span class="stExplain">// Define a pending test by using (pending) for the body</span>
| it(<span class="stLiteral">"should return a contained value when head is invoked"</span>) (pending)
| <span class="stImport">import tagobjects.Slow</span>
| it(<span class="stLiteral">"should be non-empty"</span>, <span class="stType">Slow</span>) { <span class="stExplain">// Tag a test by placing a tag object after the test name</span>
| assert(<span class="stType">Set</span>(<span class="stLiteral">1</span>, <span class="stLiteral">2</span>, <span class="stLiteral">3</span>).nonEmpty)
| }
| }
| }
|}
|
|<span class="stExplain">// Can also pass fixtures into tests with fixture.FunSpec</span>
|<span class="stReserved">class</span> <span class="stType">StringSpec</span> <span class="stReserved">extends</span> <span class="stType">fixture.FunSpec</span> {
| <span class="stReserved">type</span> FixtureParam = <span class="stType">String</span> <span class="stExplain">// Define the type of the passed fixture object</span>
| <span class="stReserved">override</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">OneArgTest</span>) = {
| <span class="stExplain">// Shared setup (run before each test), including...</span>
| <span class="stReserved">val</span> fixture = <span class="stLiteral">"a fixture object"</span> <span class="stExplain">// ...creating a fixture object</span>
| <span class="stReserved">try</span> test(fixture) <span class="stExplain">// Pass the fixture into the test</span>
| <span class="stReserved">finally</span> {
| <span class="stExplain">// Shared cleanup (run at end of each test)</span>
| }
| }
| describe(<span class="stLiteral">"The passed fixture"</span>) {
| it(<span class="stLiteral">"can be used in the test"</span>) { s => <span class="stExplain">// Fixture passed in as s</span>
| assert(s == <span class="stLiteral">"a fixture object"</span>)
| }
| }
|}
|
|@DoNotDiscover <span class="stExplain">// Disable discovery of a test class</span>
|<span class="stReserved">class</span> <span class="stType">InvisibleSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
|
|@Ignore <span class="stExplain">// Ignore all tests in a test class</span>
|<span class="stReserved">class</span> <span class="stType">IgnoredSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
|
|<span class="stImport">import tags.Slow</span>
|@Slow <span class="stExplain">// Mark all tests in a test class with a tag</span>
|<span class="stReserved">class</span> <span class="stType">SlowSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
|""".stripMargin
val play2Example: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stImport">import play.api.test._</span>
|<span class="stImport">import play.api.test.Helpers._</span>
|
|<span class="stReserved">class</span> <span class="stType">ExampleSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> <span class="stReserved">with</span> <span class="stType">Matchers</span> {
| describe(<span class="stLiteral">"Application should"</span>) {
| it(<span class="stLiteral">"send 404 on a bad request"</span>) {
| running(<span class="stType">FakeApplication</span>()) {
| route(<span class="stType">FakeRequest</span>(GET, <span class="stLiteral">"/boum"</span>)) shouldBe <span class="stType">None</span>
| }
| }
| it(<span class="stLiteral">"render the index page"</span>) {
| running(<span class="stType">FakeApplication</span>()) {
| <span class="stReserved">val</span> home = route(<span class="stType">FakeRequest</span>(GET, <span class="stLiteral">"/"</span>)).get
| status(home) shouldBe OK
| contentType(home) shouldBe <span class="stType">Some(<span class="stLiteral">"text/html"</span>)</span>
| contentAsString(home) should include (<span class="stLiteral">"ScalaTest"</span>)
| }
| }
| }
|}""".stripMargin
val doNotDiscover: String =
"""<span class="stImport">import org.scalatest._</span>
|@DoNotDiscover
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> { <span class="stBlockComment">/*code omitted*/</span> }
""".stripMargin
val ignoreTest: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| ignore(<span class="stLiteral">"should have size 0"</span>) { <span class="stBlockComment">/*code omitted*/</span> }
|}""".stripMargin
val pendingTest: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| it(<span class="stLiteral">"should have size 0"</span>) (pending)
|}""".stripMargin
val taggingTest: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stReserved">object</span> <span class="stType">SlowTest</span> <span class="stReserved">extends</span> <span class="stType">Tag</span>(<span class="stLiteral">"com.mycompany.tags.SlowTest"</span>)
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| it(<span class="stLiteral">"should have size 0"</span>, <span class="stType">SlowTest</span>) {
| <span class="stBlockComment">/*code omitted*/</span>
| }
|}""".stripMargin
val infoTest: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| it(<span class="stLiteral">"should have size 0"</span>) {
| info(<span class="stLiteral">"Some information."</span>)
| <span class="stBlockComment">/*code omitted*/</span>
| }
|}""".stripMargin
val fixtureNoArgTest: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> {
| <span class="stReserved">def</span> setup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">def</span> cleanup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">override</span> <span class="stReserved">protected</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">NoArgTest</span>) = {
| setup()
| <span class="stReserved">try</span> test() <span class="stReserved">finally</span> cleanup()
| }
|}""".stripMargin
val fixtureOneArgTest: String =
"""<span class="stImport">import org.scalatest._</span>
|<span class="stReserved">class</span> <span class="stType">SetSpec</span> <span class="stReserved">extends</span> <span class="stType">fixture.FunSpec</span> {
| <span class="stReserved">def</span> setup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">def</span> cleanup() { <span class="stBlockComment">/*code omitted*/</span> }
| <span class="stReserved">type</span> FixtureParam = <span class="stType">String</span>
| <span class="stReserved">override</span> <span class="stReserved">protected</span> <span class="stReserved">def</span> withFixture(test: <span class="stType">OneArgTest</span>) = {
| setup()
| <span class="stReserved">try</span> test(<span class="stLiteral">"this is a fixture param"</span>) <span class="stReserved">finally</span> cleanup()
| }
|}""".stripMargin
val seleniumExample: String =
"""<span class="stImport">import org.scalatest._
|import selenium._</span>
|<span class="stReserved">class</span> <span class="stType">BlogSpec</span> <span class="stReserved">extends</span> <span class="stType">FunSpec</span> <span class="stReserved">with</span> <span class="stType">WebBrowser</span> <span class="stReserved">with</span> <span class="stType">HtmlUnit</span> {
| <span class="stReserved">val</span> host = <span class="stLiteral">"http://localhost:9000/"</span>
| it(<span class="stLiteral">"should have the correct title"</span>) {
| go to (host + <span class="stLiteral">"index.html"</span>)
| pageTitle should be (<span class="stLiteral">"Awesome Blog"</span>)
| }
|}""".stripMargin
}
| jedesah/scalatest-website | app/examples/FunSpecExamples.scala | Scala | apache-2.0 | 14,454 |
package io.buoyant.namerd.iface
import com.twitter.conversions.DurationOps._
import com.twitter.finagle._
import com.twitter.finagle.naming.NameInterpreter
import com.twitter.logging.Level
import com.twitter.util._
import io.buoyant.namer.{ConfiguredDtabNamer, DelegateTree, Metadata, RichActivity}
import io.buoyant.namerd.NullDtabStore
import io.buoyant.test.{Awaits, FunSuite}
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.time._
class HttpNamerEndToEndTest extends FunSuite with Eventually with IntegrationPatience with Awaits {
implicit override val patienceConfig = PatienceConfig(
timeout = scaled(Span(5, Seconds)),
interval = scaled(Span(100, Milliseconds))
)
def retryIn() = 1.second
val clientId = Path.empty
val ns = "testns"
test("service resurrection") {
val serverState = Var[Activity.State[NameTree[Name.Bound]]](Activity.Pending)
@volatile var clientState: Activity.State[NameTree[Name.Bound]] = Activity.Pending
val reqDtab = Dtab.read("/woop => /w00t")
val reqPath = Path.read("/woop/woop")
val id = Path.read("/io.l5d.w00t/woop")
val namer = new Namer {
def lookup(path: Path) = path match {
case Path.Utf8("woop") => Activity(serverState)
case _ => Activity.exception(new Exception)
}
}
def interpreter(ns: String) = new NameInterpreter {
def bind(dtab: Dtab, path: Path) =
if (dtab == reqDtab && path == reqPath) Activity(serverState)
else Activity.exception(new Exception)
}
val namers = Map(Path.read("/io.l5d.w00t") -> namer)
val service = new HttpControlService(NullDtabStore, interpreter, namers)
val client = new StreamingNamerClient(service, ns)
val act = client.bind(reqDtab, reqPath)
val obs = act.states.respond { s =>
clientState = s
}
assert(clientState == Activity.Pending)
val serverAddr0 = Var[Addr](Addr.Bound())
serverState() = Activity.Ok(NameTree.Leaf(Name.Bound(serverAddr0, id)))
eventually { assert(clientState == serverState.sample()) }
val Activity.Ok(NameTree.Leaf(bound0)) = clientState
assert(bound0.id == id)
@volatile var clientAddr0: Addr = Addr.Pending
bound0.addr.changes.respond(clientAddr0 = _)
assert(clientAddr0 == Addr.Bound())
serverAddr0() = Addr.Bound(
Set(Address("127.1", 4321)),
Addr.Metadata(Metadata.authority -> "acme.co")
)
eventually {
assert(clientAddr0 == Addr.Bound(Set(Address("127.1", 4321)), Addr.Metadata(Metadata.authority -> "acme.co")))
}
serverAddr0() = Addr.Bound(
Set(Address("127.1", 5432)),
Addr.Metadata(Metadata.authority -> "acme.co")
)
eventually {
assert(clientAddr0 == Addr.Bound(Set(Address("127.1", 5432)), Addr.Metadata(Metadata.authority -> "acme.co")))
}
serverState() = Activity.Ok(NameTree.Neg)
eventually { assert(clientState == serverState.sample()) }
eventually { assert(clientAddr0 == Addr.Neg) }
val serverAddr1 = Var[Addr](Addr.Bound())
serverState() = Activity.Ok(NameTree.Leaf(Name.Bound(serverAddr1, id)))
eventually { assert(clientState == serverState.sample()) }
val Activity.Ok(NameTree.Leaf(bound1)) = clientState
assert(bound1.id == id)
@volatile var clientAddr1: Addr = Addr.Pending
bound1.addr.changes.respond(clientAddr1 = _)
serverAddr1() = Addr.Bound(Address("127.1", 5432))
eventually { assert(clientAddr1 == serverAddr1.sample()) }
serverAddr1() = Addr.Bound(Address("127.1", 6543))
eventually { assert(clientAddr1 == serverAddr1.sample()) }
}
test("delegation") {
val id = Path.read("/io.l5d.w00t")
val namer = new Namer {
def lookup(path: Path) = {
path match {
case Path.Utf8("woop") => Activity.value(NameTree.Leaf(Name.Bound(
Var(
Addr.Bound(
Set(Address("localhost", 9000)),
Addr.Metadata(Metadata.authority -> "acme.co")
)
),
Path.read("/io.l5d.w00t/woop"),
Path.empty
)))
case _ => Activity.value(NameTree.Neg)
}
}
}
val namers = Seq(id -> namer)
def interpreter(ns: String) = new ConfiguredDtabNamer(
Activity.value(Dtab.read("/srv => /io.l5d.w00t; /host => /srv; /svc => /host")),
namers
)
val service = new HttpControlService(NullDtabStore, interpreter, namers.toMap)
val client = new StreamingNamerClient(service, ns)
val tree = await(client.delegate(
Dtab.read("/host/poop => /srv/woop"),
Path.read("/svc/poop")
))
assert(tree ==
DelegateTree.Delegate(
Path.read("/svc/poop"),
Dentry.nop,
DelegateTree.Alt(
Path.read("/host/poop"),
Dentry.read("/svc=>/host"),
List(
DelegateTree.Delegate(
Path.read("/srv/woop"),
Dentry.read("/host/poop=>/srv/woop"),
DelegateTree.Leaf(
Path.read("/io.l5d.w00t/woop"),
Dentry.read("/srv=>/io.l5d.w00t"),
Path.read("/io.l5d.w00t/woop")
)
),
DelegateTree.Delegate(
Path.read("/srv/poop"),
Dentry.read("/host=>/srv"),
DelegateTree.Neg(
Path.read("/io.l5d.w00t/poop"),
Dentry.read("/srv=>/io.l5d.w00t")
)
)
): _*
)
))
}
test("use last good bind data") {
val id = Path.read("/io.l5d.w00t")
val (act, witness) = Activity[NameTree[Name]]()
val namer = new Namer {
def lookup(path: Path) = act
}
val namers = Seq(id -> namer)
def interpreter(ns: String) = new ConfiguredDtabNamer(
Activity.value(Dtab.read("/svc => /io.l5d.w00t")),
namers
)
val service = new HttpControlService(NullDtabStore, interpreter, namers.toMap)
val client = new StreamingNamerClient(service, ns)
witness.notify(Return(NameTree.Leaf(Name.Bound(
Var(Addr.Bound(Address("localhost", 9000))),
Path.read("/io.l5d.w00t/foo"),
Path.empty
))))
val bindAct = client.bind(Dtab.empty, Path.read("/svc/foo"))
var bound: NameTree[Name.Bound] = null
// hold activity open so that it doesn't get restarted and lose state
val bindObs = bindAct.values.respond(_ => ())
try {
val NameTree.Leaf(bound0) = await(bindAct.toFuture)
// hold var open so that it doesn't get restarted and lose state
val bound0Obs = bound0.addr.changes.respond(_ => ())
try {
assert(bound0.id == Path.read("/io.l5d.w00t/foo"))
assert(bound0.addr.sample == Addr.Bound(Address("localhost", 9000)))
witness.notify(Throw(new Exception("bind failure")))
val NameTree.Leaf(bound1) = await(bindAct.toFuture)
assert(bound1.id == Path.read("/io.l5d.w00t/foo"))
assert(bound1.addr.sample == Addr.Bound(Address("localhost", 9000)))
} finally await(bound0Obs.close())
} finally await(bindObs.close())
}
}
| linkerd/linkerd | namerd/iface/control-http/src/test/scala/io/buoyant/namerd/iface/HttpNamerEndToEndTest.scala | Scala | apache-2.0 | 7,095 |
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.vcf
import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.commons.io.PathUtil
import com.fulcrumgenomics.testing.UnitSpec
import com.fulcrumgenomics.util.Io
import com.fulcrumgenomics.vcf.HapCutType.{HapCut1, HapCut2, HapCutType}
import htsjdk.variant.variantcontext.VariantContext
import htsjdk.variant.vcf.VCFFileReader
import org.scalatest.ParallelTestExecution
/**
* Tests for HapCutToVcf.
*/
class HapCutToVcfTest extends UnitSpec with ParallelTestExecution {
private val dir = PathUtil.pathTo("src/test/resources/com/fulcrumgenomics/vcf/testdata")
private val originalVcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.vcf")
private val hapCut1Out = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut")
private val hapCut1Vcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut.vcf")
private val hapCut1GatkVcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut.gatk.vcf")
private val hapCut2Out = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut2")
private val hapCut2Vcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut2.vcf")
private val hapCut2GatkVcf = dir.resolve("NA12878.GIABPedigreev0.2.17.41100000.41300000.hapcut2.gatk.vcf")
// For testing HapCut2 producing phased blocks overlapping other phased blocks.
private val outOfOrderIn = dir.resolve("blocks_out_of_order.vcf")
private val outOfOrderOut = dir.resolve("blocks_out_of_order.hapcut2")
private val outOfOrderOutVcf = dir.resolve("blocks_out_of_order.hapcut2.vcf")
// For testing HapCut2 with missing variants in the input VCF
private val missingVariantsIn = dir.resolve("missing_leading_variants.vcf")
private val missingVariantsOut = dir.resolve("missing_leading_variants.hapcut2")
// For testing HapCut2 with missing genotype info
private val missingGenotyeInfoIn = dir.resolve("hapcut2_for_missing_genotype_info.vcf")
private val noSwitchErrorsIn = dir.resolve("no_switch_errors.hapcut2")
private val skipPruneIn = dir.resolve("skip_prune.hapcut2")
private val withSwitchErrors = dir.resolve("with_switch_errors.hapcut2")
// For testing HapCutToVcf with IUPAC codes
private val withIupacIn = dir.resolve("with_iupac.vcf")
private val withIupacOut = dir.resolve("with_iupac.hapcut")
private val withIupacOutVcf = dir.resolve("with_iupac.hapcut.vcf")
private def countVcfRecords(vcf: PathToVcf): Int = {
val vcfReader = new VCFFileReader(vcf.toFile, false)
yieldAndThen(vcfReader.iterator().length)(vcfReader.close())
}
private def compareVcfs(newVcf: PathToVcf, originalVcf: PathToVcf): Unit = {
val newVcfReader = new VCFFileReader(newVcf.toFile, false)
val originalVcfReader = new VCFFileReader(originalVcf.toFile, false)
for (newVariantCtx <- newVcfReader) {
originalVcfReader.exists { originalVariantCtx =>
originalVariantCtx.getContig == newVariantCtx.getContig &&
originalVariantCtx.getStart == newVariantCtx.getStart &&
originalVariantCtx.getEnd == newVariantCtx.getEnd
} shouldBe true
}
}
private def isPhased(ctx: VariantContext, gatkPhasingFormat: Boolean): Boolean = {
if (gatkPhasingFormat) ctx.isNotFiltered // are marked as passed filter
else ctx.getGenotypes.exists(_.isPhased) // are marked as phased
}
private def getNumPhasedFromVcf(path: PathToVcf, gatkPhasingFormat: Boolean): Int = {
val vcfReader = new VCFFileReader(path.toFile, false)
val numPhased = vcfReader.iterator().count { ctx => isPhased(ctx, gatkPhasingFormat) }
vcfReader.close()
numPhased
}
private def hasPhasingSetFormatTagButUnphased(path: PathToVcf, gatkPhasingFormat: Boolean): Boolean = {
val vcfReader = new VCFFileReader(path.toFile, false)
val hasPhasingSetTag = vcfReader
.iterator()
.filterNot { ctx => isPhased(ctx, gatkPhasingFormat) }
.exists { ctx => ctx.getGenotypes.exists(_.hasExtendedAttribute(HapCut1VcfHeaderLines.PhaseSetFormatTag)) }
vcfReader.close()
hasPhasingSetTag
}
private def checkHapCutReader(path: FilePath, hapCutType: HapCutType): Unit = {
val reader = HapCutReader(path)
reader.hapCutType shouldBe hapCutType
val allCalls = reader.toSeq
val calls = allCalls.flatMap(_.call)
allCalls.length shouldBe 342 // 342 total variants
calls.length shouldBe 237 // 237 phased variants
calls.map(_.phaseSet).distinct.length shouldBe 8 // eight phased blocks
// Check the second block (two variants). The first variants is 1/0 while the second is 0/1.
{
val call = calls(3)
call.phaseSet shouldBe 41106449
call.hap1Allele shouldBe 1
call.hap2Allele shouldBe 0
val ctx = call.toVariantContext("Sample")
ctx.getGenotype(0).isPhased shouldBe true
ctx.getGenotype(0).getAlleles.map(_.getBaseString).toList should contain theSameElementsInOrderAs Seq("CT", "C")
}
{
val call = calls(4)
call.phaseSet shouldBe 41106449
call.hap1Allele shouldBe 0
call.hap2Allele shouldBe 1
val ctx = call.toVariantContext("Sample")
ctx.getGenotype(0).isPhased shouldBe true
ctx.getGenotype(0).getAlleles.map(_.getBaseString).toList should contain theSameElementsInOrderAs Seq("T", "G")
}
}
"HapCutReader" should "read in a HAPCUT1 file" in {
checkHapCutReader(hapCut1Out, HapCut1)
}
it should "read in a HAPCUT2 file" in {
checkHapCutReader(hapCut2Out, HapCut2)
}
it should "read in a HAPCUT1 file that has phased genotypes" in {
val input = dir.resolve("block_has_phased_genotypes.hapcut")
val reader = HapCutReader(input)
val allCalls = reader.toSeq
val calls = allCalls.flatMap(_.call)
allCalls.length shouldBe 8 // 8 total variants
calls.length shouldBe 3 // 3 phased variants
calls.map(_.phaseSet).distinct.length shouldBe 1 // a single phased block
reader.close() }
"HapCutToVcf" should "convert a HAPCUT1 file to VCF in both GATK and VCF-spec phasing format" in {
Iterator(true, false).foreach { gatkPhasingFormat =>
val expectedOutput = if (gatkPhasingFormat) hapCut1GatkVcf else hapCut1Vcf
val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf")
new HapCutToVcf(
vcf = originalVcf,
input = hapCut1Out,
output = out,
gatkPhasingFormat = gatkPhasingFormat
).execute()
// check that we have the same # of records in the output as the input
countVcfRecords(out) shouldBe countVcfRecords(originalVcf)
// check that all records in the output are found in the input
compareVcfs(out, originalVcf)
// get the # of phased variants from the output
val numPhasedFromOut = getNumPhasedFromVcf(out, gatkPhasingFormat)
// check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut
val hapCutReader = HapCutReader(hapCut1Out)
val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length
numPhasedFromOut shouldBe numPhasedFromHapCut
hapCutReader.close()
// check that the # of variants phased in the output agrees with the # of phased calls in the expected output
numPhasedFromOut shouldBe getNumPhasedFromVcf(expectedOutput, gatkPhasingFormat)
// check that if a variant is not phased it does not have a PS tag
hasPhasingSetFormatTagButUnphased(out, gatkPhasingFormat) shouldBe false
}
}
it should "convert a HAPCUT2 file to VCF in both GATK and VCF-spec phasing format" in {
Iterator(true, false).foreach { gatkPhasingFormat =>
val expectedOutput = if (gatkPhasingFormat) hapCut2GatkVcf else hapCut2Vcf
val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf")
new HapCutToVcf(
vcf = originalVcf,
input = hapCut2Out,
output = out,
gatkPhasingFormat = gatkPhasingFormat
).execute()
// check that we have the same # of records in the output as the input
countVcfRecords(out) shouldBe countVcfRecords(originalVcf)
// check that all records in the output are found in the input
compareVcfs(out, originalVcf)
// get the # of phased variants from the output
val numPhasedFromOut = getNumPhasedFromVcf(out, gatkPhasingFormat)
// check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut
val hapCutReader = HapCutReader(hapCut2Out)
val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length
numPhasedFromOut shouldBe numPhasedFromHapCut
hapCutReader.close()
// check that the # of variants phased in the output agrees with the # of phased calls in the expected output
numPhasedFromOut shouldBe getNumPhasedFromVcf(expectedOutput, gatkPhasingFormat)
// check that if a variant is not phased it does not have a PS tag
hasPhasingSetFormatTagButUnphased(out, gatkPhasingFormat) shouldBe false
}
}
it should "convert an HAPCUT2 file to VCF when there are overlapping phase blocks" in {
val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf")
new HapCutToVcf(
vcf = outOfOrderIn,
input = outOfOrderOut,
output = out,
gatkPhasingFormat = false
).execute()
// check that we have the same # of records in the output as the input
countVcfRecords(out) shouldBe countVcfRecords(outOfOrderIn)
// check that all records in the output are found in the input
compareVcfs(out, outOfOrderIn)
// get the # of phased variants from the output
val numPhasedFromOut = getNumPhasedFromVcf(out, false)
// check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut
val hapCutReader = HapCutReader(outOfOrderOut)
val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length
numPhasedFromOut shouldBe numPhasedFromHapCut
hapCutReader.close()
// check that the # of variants phased in the output agrees with the # of phased calls in the expected output
numPhasedFromOut shouldBe getNumPhasedFromVcf(outOfOrderOutVcf, false)
// check that if a variant is not phased it does not have a PS tag
hasPhasingSetFormatTagButUnphased(out, false) shouldBe false
}
it should "convert an empty HAPCUT1/HAPCUT2 file to VCF in both GATK and VCF-spec phasing format" in {
Iterator(true, false).foreach { gatkPhasingFormat =>
val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf")
val hapCutOut = makeTempFile("hap_cut_to_vcf.hapcut", ".hapcut")
Io.writeLines(hapCutOut, Seq.empty)
new HapCutToVcf(
vcf = originalVcf,
input = hapCutOut,
output = out,
gatkPhasingFormat = gatkPhasingFormat
).execute()
// check that we have the same # of records in the output as the input
countVcfRecords(out) shouldBe countVcfRecords(originalVcf)
// check that all records in the output are found in the input
compareVcfs(out, originalVcf)
// get the # of phased variants from the output
getNumPhasedFromVcf(out, gatkPhasingFormat) shouldBe 0
// check that if a variant is not phased it does not have a PS tag
hasPhasingSetFormatTagButUnphased(out, gatkPhasingFormat) shouldBe false
}
}
it should "fail when there are missing variants in the input VCF" in {
val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf")
an[Exception] should be thrownBy new HapCutToVcf(
vcf = missingVariantsIn,
input = missingVariantsOut,
output = out,
gatkPhasingFormat = false
).execute()
}
it should "support missing genotype info in HapCut2" in {
Seq(noSwitchErrorsIn, skipPruneIn, withSwitchErrors).foreach { hapCut2In =>
val expectedOutput = PathUtil.replaceExtension(hapCut2In, ".vcf")
val out = makeTempFile("hap_cut_to_vcf.hapcut2", ".vcf")
new HapCutToVcf(
vcf = missingGenotyeInfoIn,
input = hapCut2In,
output = out,
gatkPhasingFormat = false
).execute()
// check that we have the same # of records in the output as the input
countVcfRecords(out) shouldBe countVcfRecords(missingGenotyeInfoIn)
// check that all records in the output are found in the input
compareVcfs(out, missingGenotyeInfoIn)
// get the # of phased variants from the output
val numPhasedFromOut = getNumPhasedFromVcf(out, false)
// check that the # of variants phased in the output agrees with the # of phased calls produced by HapCut
val hapCutReader = HapCutReader(hapCut2In)
val numPhasedFromHapCut = hapCutReader.flatMap(_.call).length
numPhasedFromOut shouldBe numPhasedFromHapCut
hapCutReader.close()
// check that the # of variants phased in the output agrees with the # of phased calls in the expected output
//Files.copy(out, expectedOutput, StandardCopyOption.REPLACE_EXISTING)
numPhasedFromOut shouldBe getNumPhasedFromVcf(expectedOutput, false)
// check that if a variant is not phased it does not have a PS tag
hasPhasingSetFormatTagButUnphased(out, false) shouldBe false
}
}
it should "fail when IUPAC codes are found in the VCF" in {
val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf")
an[Exception] should be thrownBy new HapCutToVcf(
vcf = withIupacIn,
input = withIupacOut,
output = out,
gatkPhasingFormat = false,
fixAmbiguousReferenceAlleles = false
).execute()
}
it should "succeed when IUPAC codes are found in the VCF and --fix-ambiguous-reference-alleles is specified" in {
val out = makeTempFile("hap_cut_to_vcf.hapcut", ".vcf")
new HapCutToVcf(
vcf = withIupacIn,
input = withIupacOut,
output = out,
gatkPhasingFormat = false,
fixAmbiguousReferenceAlleles = true
).execute()
// check the reference alleles are not IUPAC
val referenceBases = Io.readLines(out).filterNot(_.startsWith("#")).flatMap(_.split('\\t')(3)).mkString("")
referenceBases shouldBe "CCTGCCTG" // last G changed from K
}
"HapCutToVcf.HapCut2GenotypeInfo" should "ignore the values of pruned, SE, and NE when they are '.'" in {
// HapCut2 run with default options: switch error scores are blank
HapCut2GenotypeInfo(info="0\\t.\\t16.00", missingAlleles=false, thresholdPruning=false).asInstanceOf[HapCut2GenotypeInfo] shouldBe HapCut2GenotypeInfo(pruned=Some(false), None, Some(16.00))
// HapCut2 run with error analysis: switch error scores are present
HapCut2GenotypeInfo(info="0\\t1.00\\t16.00", missingAlleles=false, thresholdPruning=false).asInstanceOf[HapCut2GenotypeInfo] shouldBe HapCut2GenotypeInfo(pruned=Some(false), Some(1.00), Some(16.00))
// HapCut2 with skip prune: all info are blank
HapCut2GenotypeInfo(info=".\\t.\\t.", missingAlleles=false, thresholdPruning=false).asInstanceOf[HapCut2GenotypeInfo] shouldBe HapCut2GenotypeInfo(pruned=None, None, None)
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/vcf/HapCutToVcfTest.scala | Scala | mit | 16,626 |
package com.adamsresearch.mbs.fanniemae.monthlyfiles
import java.text.SimpleDateFormat
import java.util.Date
/**
* Created by wma on 2/2/15.
*
* TODO: parse these COBOL PICTURE elements...
*/
class FixedRateQuartile
case class FixedQuartilesHeader(quartileRecordType: String,
poolNumber: String,
prefix: String,
reportingPeriod: Date, // yyyyMMdd
cusipNumber: String,
issueDate: Date) // yyyyMMdd
extends FixedRateQuartile
case class FixedQuartilesDetails(quartileRecordType: String)
extends FixedRateQuartile
object FixedRateQuartile {
val recordLength = 199
// parses a record, returning either a Header or Detail-type record
// depending on the quartileRecordType field, or None if we have supplied
// something that doesn't work.
def parseFixedRateQuartile(record: String): Option[FixedRateQuartile] = {
try {
record.length match {
case `recordLength` =>
// look at first char, which tells us if this is a header or details record:
record.substring(0, 1) match {
case "1" =>
Some(new FixedQuartilesHeader(record.substring(0, 1),
record.substring(1, 7).trim,
record.substring(7, 10).trim,
new SimpleDateFormat("yyyyMMdd").parse(record.substring(10, 16)),
record.substring(16, 25).trim,
new SimpleDateFormat("yyyyMMdd").parse(record.substring(25, 34))
))
case "2" =>
Some(new FixedQuartilesDetails(record.substring(0, 1)))
case _ => None
}
case _ => None
}
} catch {
case ex: Exception => None
}
}
}
| waynemadams/mbs-parser | src/main/scala/com/adamsresearch/mbs/fanniemae/monthlyfiles/FixedRateQuartile.scala | Scala | apache-2.0 | 1,973 |
package org.adridadou.ethereum.propeller.values
import org.adridadou.ethereum.propeller.keystore.AccountProvider
import org.ethereum.crypto.ECKey
import org.scalacheck.Arbitrary._
import org.scalacheck.Prop._
import org.scalatest.check.Checkers
import org.scalatest.{Matchers, _}
import scala.util.{Failure, Success, Try}
/**
* Created by davidroon on 26.03.17.
* This code is released under Apache 2 license
*/
class EthAccountTest extends FlatSpec with Matchers with Checkers {
"An ethereum account" should "generate the same key than the one in ethereumJ if given a specific seed" in {
check(forAll(arbitrary[BigInt])(checkSameAddressGenerated))
}
it should "be able to generate and then recover from a random account" in {
val account = AccountProvider.random()
val data = account.getDataPrivateKey
val account2 = AccountProvider.fromPrivateKey(data)
account.getAddress shouldEqual account2.getAddress
}
private def checkSameAddressGenerated(seed: BigInt) = {
if(seed === 0 ){
Try(ECKey.fromPrivate(seed.bigInteger)) match {
case Success(_) =>
throw new RuntimeException("it should not be possible to create a private key from int 0")
case Failure(ex) =>
ex.getMessage shouldEqual "Public key must not be a point at infinity, probably your private key is incorrect"
true
}
} else {
val ethjVersion = ECKey.fromPrivate(seed.bigInteger)
val propellerVersion = new EthAccount(seed.bigInteger)
val ethjAddress = EthAddress.of(ethjVersion.getAddress)
ethjVersion.getPubKeyPoint shouldEqual propellerVersion.getPublicKey
ethjAddress shouldEqual propellerVersion.getAddress
true
}
}
}
| adridadou/eth-propeller-core | src/test/scala/org/adridadou/ethereum/propeller/values/EthAccountTest.scala | Scala | apache-2.0 | 1,735 |
package is.hail.methods
import is.hail.HailContext
import is.hail.annotations._
import is.hail.expr.ir._
import is.hail.expr.ir.functions.MatrixToTableFunction
import is.hail.types.physical.{PCanonicalString, PCanonicalStruct, PFloat64, PInt64, PString, PStruct}
import is.hail.types.virtual.{TFloat64, TStruct}
import is.hail.types.{MatrixType, TableType}
import is.hail.rvd.RVDContext
import is.hail.sparkextras.ContextRDD
import is.hail.utils._
import is.hail.variant.{Call, Genotype, HardCallView}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import scala.language.higherKinds
object IBDInfo {
def apply(Z0: Double, Z1: Double, Z2: Double): IBDInfo = {
IBDInfo(Z0, Z1, Z2, Z1 / 2 + Z2)
}
val pType =
PCanonicalStruct(("Z0", PFloat64()), ("Z1", PFloat64()), ("Z2", PFloat64()), ("PI_HAT", PFloat64()))
def fromRegionValue(offset: Long): IBDInfo = {
val Z0 = Region.loadDouble(pType.loadField(offset, 0))
val Z1 = Region.loadDouble(pType.loadField(offset, 1))
val Z2 = Region.loadDouble(pType.loadField(offset, 2))
val PI_HAT = Region.loadDouble(pType.loadField(offset, 3))
IBDInfo(Z0, Z1, Z2, PI_HAT)
}
}
case class IBDInfo(Z0: Double, Z1: Double, Z2: Double, PI_HAT: Double) {
def pointwiseMinus(that: IBDInfo): IBDInfo =
IBDInfo(Z0 - that.Z0, Z1 - that.Z1, Z2 - that.Z2, PI_HAT - that.PI_HAT)
def hasNaNs: Boolean = Array(Z0, Z1, Z2, PI_HAT).exists(_.isNaN)
def toAnnotation: Annotation = Annotation(Z0, Z1, Z2, PI_HAT)
def toRegionValue(rvb: RegionValueBuilder) {
rvb.addDouble(Z0)
rvb.addDouble(Z1)
rvb.addDouble(Z2)
rvb.addDouble(PI_HAT)
}
}
object ExtendedIBDInfo {
val pType =
PCanonicalStruct(("ibd", IBDInfo.pType), ("ibs0", PInt64()), ("ibs1", PInt64()), ("ibs2", PInt64()))
def fromRegionValue(offset: Long): ExtendedIBDInfo = {
val ibd = IBDInfo.fromRegionValue(pType.loadField(offset, 0))
val ibs0 = Region.loadLong(pType.loadField(offset, 1))
val ibs1 = Region.loadLong(pType.loadField(offset, 2))
val ibs2 = Region.loadLong(pType.loadField(offset, 3))
ExtendedIBDInfo(ibd, ibs0, ibs1, ibs2)
}
}
case class ExtendedIBDInfo(ibd: IBDInfo, ibs0: Long, ibs1: Long, ibs2: Long) {
def pointwiseMinus(that: ExtendedIBDInfo): ExtendedIBDInfo =
ExtendedIBDInfo(ibd.pointwiseMinus(that.ibd), ibs0 - that.ibs0, ibs1 - that.ibs1, ibs2 - that.ibs2)
def hasNaNs: Boolean = ibd.hasNaNs
def makeRow(i: Any, j: Any): Row = Row(i, j, ibd.toAnnotation, ibs0, ibs1, ibs2)
def toRegionValue(rvb: RegionValueBuilder) {
rvb.startStruct()
ibd.toRegionValue(rvb)
rvb.endStruct()
rvb.addLong(ibs0)
rvb.addLong(ibs1)
rvb.addLong(ibs2)
}
}
case class IBSExpectations(
E00: Double, E10: Double, E20: Double,
E11: Double, E21: Double, E22: Double = 1, nonNaNCount: Int = 1) {
def hasNaNs: Boolean = Array(E00, E10, E20, E11, E21).exists(_.isNaN)
def normalized: IBSExpectations =
IBSExpectations(E00 / nonNaNCount, E10 / nonNaNCount, E20 / nonNaNCount, E11 / nonNaNCount, E21 / nonNaNCount, E22, this.nonNaNCount)
def scaled(N: Long): IBSExpectations =
IBSExpectations(E00 * N, E10 * N, E20 * N, E11 * N, E21 * N, E22 * N, this.nonNaNCount)
def join(that: IBSExpectations): IBSExpectations =
if (this.hasNaNs)
that
else if (that.hasNaNs)
this
else
IBSExpectations(E00 + that.E00,
E10 + that.E10,
E20 + that.E20,
E11 + that.E11,
E21 + that.E21,
nonNaNCount = nonNaNCount + that.nonNaNCount)
}
object IBSExpectations {
def empty: IBSExpectations = IBSExpectations(0, 0, 0, 0, 0, nonNaNCount = 0)
}
object IBD {
def indicator(b: Boolean): Int = if (b) 1 else 0
def countRefs(gtIdx: Int): Int = {
val gt = Genotype.allelePair(gtIdx)
indicator(gt.j == 0) + indicator(gt.k == 0)
}
def ibsForGenotypes(gs: HardCallView, maybeMaf: Option[Double]): IBSExpectations = {
def calculateCountsFromMAF(maf: Double) = {
var count = 0
var i = 0
while (i < gs.getLength) {
gs.setGenotype(i)
if (gs.hasGT) count += 1
i += 1
}
val Na = count * 2.0
val p = 1 - maf
val q = maf
val x = Na * p
val y = Na * q
(Na, x, y, p, q)
}
def estimateFrequenciesFromSample = {
var na = 0
var x = 0.0
var i = 0
while (i < gs.getLength) {
gs.setGenotype(i)
if (gs.hasGT) {
na += 2
x += countRefs(Call.unphasedDiploidGtIndex(gs.getGT))
}
i += 1
}
val Na = na.toDouble
val y = Na - x
val p = x / Na
val q = y / Na
(Na, x, y, p, q)
}
val (na, x, y, p, q) =
maybeMaf.map(calculateCountsFromMAF).getOrElse(estimateFrequenciesFromSample)
val Na = na
val a00 = 2 * p * p * q * q * ((x - 1) / x * (y - 1) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3)))
val a10 = 4 * p * p * p * q * ((x - 1) / x * (x - 2) / x * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) + 4 * p * q * q * q * ((y - 1) / y * (y - 2) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3)))
val a20 = q * q * q * q * ((y - 1) / y * (y - 2) / y * (y - 3) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) + p * p * p * p * ((x - 1) / x * (x - 2) / x * (x - 3) / x * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3))) + 4 * p * p * q * q * ((x - 1) / x * (y - 1) / y * (Na / (Na - 1)) * (Na / (Na - 2)) * (Na / (Na - 3)))
val a11 = 2 * p * p * q * ((x - 1) / x * Na / (Na - 1) * Na / (Na - 2)) + 2 * p * q * q * ((y - 1) / y * Na / (Na - 1) * Na / (Na - 2))
val a21 = p * p * p * ((x - 1) / x * (x - 2) / x * Na / (Na - 1) * Na / (Na - 2)) + q * q * q * ((y - 1) / y * (y - 2) / y * Na / (Na - 1) * Na / (Na - 2)) + p * p * q * ((x - 1) / x * Na / (Na - 1) * Na / (Na - 2)) + p * q * q * ((y - 1) / y * Na / (Na - 1) * Na / (Na - 2))
IBSExpectations(a00, a10, a20, a11, a21)
}
def calculateIBDInfo(N0: Long, N1: Long, N2: Long, ibse: IBSExpectations, bounded: Boolean): ExtendedIBDInfo = {
val ibseN = ibse.scaled(N0 + N1 + N2)
val Z0 = N0 / ibseN.E00
val Z1 = (N1 - Z0 * ibseN.E10) / ibseN.E11
val Z2 = (N2 - Z0 * ibseN.E20 - Z1 * ibseN.E21) / ibseN.E22
val ibd = if (bounded) {
if (Z0 > 1) {
IBDInfo(1, 0, 0)
} else if (Z1 > 1) {
IBDInfo(0, 1, 0)
} else if (Z2 > 1) {
IBDInfo(0, 0, 1)
} else if (Z0 < 0) {
val S = Z1 + Z2
IBDInfo(0, Z1 / S, Z2 / S)
} else if (Z1 < 0) {
val S = Z0 + Z2
IBDInfo(Z0 / S, 0, Z2 / S)
} else if (Z2 < 0) {
val S = Z0 + Z1
IBDInfo(Z0 / S, Z1 / S, 0)
} else {
IBDInfo(Z0, Z1, Z2)
}
} else {
IBDInfo(Z0, Z1, Z2)
}
ExtendedIBDInfo(ibd, N0, N1, N2)
}
final val chunkSize = 1024
def computeIBDMatrix(input: MatrixValue,
computeMaf: Option[(RegionValue) => Double],
min: Option[Double],
max: Option[Double],
sampleIds: IndexedSeq[String],
bounded: Boolean): ContextRDD[Long] = {
val nSamples = input.nCols
val rowPType = input.rvRowPType
val unnormalizedIbse = input.rvd.mapPartitions { (ctx, it) =>
val rv = RegionValue(ctx.r)
val view = HardCallView(rowPType)
it.map { ptr =>
rv.setOffset(ptr)
view.set(ptr)
ibsForGenotypes(view, computeMaf.map(f => f(rv)))
}
}.fold(IBSExpectations.empty)(_ join _)
val ibse = unnormalizedIbse.normalized
val chunkedGenotypeMatrix = input.rvd.mapPartitions { (_, it) =>
val view = HardCallView(rowPType)
it.map { ptr =>
view.set(ptr)
Array.tabulate[Byte](view.getLength) { i =>
view.setGenotype(i)
if (view.hasGT)
IBSFFI.gtToCRep(Call.unphasedDiploidGtIndex(view.getGT))
else
IBSFFI.missingGTCRep
}
}
}
.zipWithIndex()
.flatMap { case (gts, variantId) =>
val vid = (variantId % chunkSize).toInt
gts.grouped(chunkSize)
.zipWithIndex
.map { case (gtGroup, i) => ((i, variantId / chunkSize), (vid, gtGroup)) }
}
.aggregateByKey(Array.fill(chunkSize * chunkSize)(IBSFFI.missingGTCRep))({ case (x, (vid, gs)) =>
for (i <- gs.indices) x(vid * chunkSize + i) = gs(i)
x
}, { case (x, y) =>
for (i <- y.indices)
if (x(i) == IBSFFI.missingGTCRep)
x(i) = y(i)
x
})
.map { case ((s, v), gs) => (v, (s, IBSFFI.pack(chunkSize, chunkSize, gs))) }
val joined = ContextRDD.weaken(chunkedGenotypeMatrix.join(chunkedGenotypeMatrix)
// optimization: Ignore chunks below the diagonal
.filter { case (_, ((i, _), (j, _))) => j >= i }
.map { case (_, ((s1, gs1), (s2, gs2))) =>
((s1, s2), IBSFFI.ibs(chunkSize, chunkSize, gs1, gs2))
}
.reduceByKey { (a, b) =>
var i = 0
while (i != a.length) {
a(i) += b(i)
i += 1
}
a
})
joined
.cmapPartitions { (ctx, it) =>
val rvb = new RegionValueBuilder(ctx.region)
for {
((iChunk, jChunk), ibses) <- it
si <- (0 until chunkSize).iterator
sj <- (0 until chunkSize).iterator
i = iChunk * chunkSize + si
j = jChunk * chunkSize + sj
if j > i && j < nSamples && i < nSamples
idx = si * chunkSize + sj
eibd = calculateIBDInfo(ibses(idx * 3), ibses(idx * 3 + 1), ibses(idx * 3 + 2), ibse, bounded)
if min.forall(eibd.ibd.PI_HAT >= _) && max.forall(eibd.ibd.PI_HAT <= _)
} yield {
rvb.start(ibdPType)
rvb.startStruct()
rvb.addString(sampleIds(i))
rvb.addString(sampleIds(j))
eibd.toRegionValue(rvb)
rvb.endStruct()
rvb.end()
}
}
}
private val ibdPType =
PCanonicalStruct(required = true, Array(("i", PCanonicalString()), ("j", PCanonicalString())) ++ ExtendedIBDInfo.pType.fields.map(f => (f.name, f.typ)): _*)
private val ibdKey = FastIndexedSeq("i", "j")
private[methods] def generateComputeMaf(input: MatrixValue, fieldName: String): (RegionValue) => Double = {
val rvRowType = input.rvRowType
val rvRowPType = input.rvRowPType
val field = rvRowType.field(fieldName)
assert(field.typ == TFloat64)
val rowKeysF = input.typ.extractRowKey
val entriesIdx = input.entriesIdx
val idx = rvRowType.fieldIdx(fieldName)
(rv: RegionValue) => {
val isDefined = rvRowPType.isFieldDefined(rv.offset, idx)
val maf = Region.loadDouble(rvRowPType.loadField(rv.offset, idx))
if (!isDefined) {
val row = new UnsafeRow(rvRowPType, rv).deleteField(entriesIdx)
fatal(s"The minor allele frequency expression evaluated to NA at ${ rowKeysF(row) }.")
}
if (maf < 0.0 || maf > 1.0) {
val row = new UnsafeRow(rvRowPType, rv).deleteField(entriesIdx)
fatal(s"The minor allele frequency expression for ${ rowKeysF(row) } evaluated to $maf which is not in [0,1].")
}
maf
}
}
}
case class IBD(
mafFieldName: Option[String] = None,
bounded: Boolean = true,
min: Option[Double] = None,
max: Option[Double] = None) extends MatrixToTableFunction {
min.foreach(min => optionCheckInRangeInclusive(0.0, 1.0)("minimum", min))
max.foreach(max => optionCheckInRangeInclusive(0.0, 1.0)("maximum", max))
min.liftedZip(max).foreach { case (min, max) =>
if (min > max) {
fatal(s"minimum must be less than or equal to maximum: ${ min }, ${ max }")
}
}
def preservesPartitionCounts: Boolean = false
def typ(childType: MatrixType): TableType =
TableType(IBD.ibdPType.virtualType, IBD.ibdKey, TStruct.empty)
def execute(ctx: ExecuteContext, input: MatrixValue): TableValue = {
input.requireUniqueSamples("ibd")
val computeMaf = mafFieldName.map(IBD.generateComputeMaf(input, _))
val crdd = IBD.computeIBDMatrix(input, computeMaf, min, max, input.stringSampleIds, bounded)
TableValue(ctx, IBD.ibdPType, IBD.ibdKey, crdd)
}
}
| danking/hail | hail/src/main/scala/is/hail/methods/IBD.scala | Scala | mit | 12,135 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.{NumericWildcard, TensorNumeric}
import com.intel.analytics.bigdl.dllib.utils.{T, Table}
import scala.reflect.ClassTag
/**
* Stacks a list of n-dimensional tensors into one (n+1)-dimensional tensor.
* @param dimension the dimension to stack along
* @tparam T Numeric type. Only support float/double now
*/
@SerialVersionUID(3457313421501931556L)
class Pack[T: ClassTag] (val dimension: Int)(implicit ev: TensorNumeric[T])
extends AbstractModule[Activity, Tensor[_], T] {
private def getPositiveDimension(input: Table): Int = {
var nDim = this.dimension
val firstInput: Tensor[_] = input(1)
if (nDim < 0) {
nDim = firstInput.dim() + nDim + 1
}
require(nDim <= firstInput.dim() + 1, "dimension exceeds input dimensions" +
s"dimension $nDim, inputDimension ${firstInput.dim()}")
nDim
}
override def updateOutput(input: Activity): Tensor[_] = {
val tableInput = input match {
case t: Tensor[_] => T(t)
case t: Table => t
}
val dimension = getPositiveDimension(tableInput)
val firstInput: Tensor[_] = tableInput(1)
val nDim = firstInput.nDimension()
val size: Array[Int] = new Array[Int](nDim + 1)
var i = 1
while(i <= nDim + 1) {
if (i < dimension) {
size(i-1) = firstInput.size(i)
} else if (i == dimension) {
size(i-1) = tableInput.length()
} else {
size(i-1) = firstInput.size(i - 1)
}
i = i + 1
}
if (output.getType() != firstInput.getType()) {
output = firstInput.emptyInstance()
}
output.resize(size)
i = 1
while (i <= tableInput.length()) {
val currentOutput = tableInput[Tensor[NumericWildcard]](i)
output.narrow(dimension, i, 1).asInstanceOf[Tensor[NumericWildcard]]
.copy(currentOutput)
i += 1
}
output
}
override def updateGradInput(input: Activity, gradOutput: Tensor[_]): Activity = {
val tableInput = input match {
case t: Tensor[_] => T(t)
case t: Table => t
}
val dimension = getPositiveDimension(tableInput)
val firstInput = tableInput[Tensor[_]](1)
if (input.isTensor) {
if (gradInput == null ||
gradInput.asInstanceOf[Tensor[_]].getType() != firstInput.getType()) {
gradInput = firstInput.emptyInstance()
}
val gradInputTensor = gradInput.asInstanceOf[Tensor[NumericWildcard]]
gradInputTensor.resizeAs(firstInput)
gradInputTensor.copy(firstInput.asInstanceOf[Tensor[NumericWildcard]])
} else {
if (gradInput == null) gradInput = T()
val gradInputTable = gradInput.toTable
var i = 1
while (i <= tableInput.length()) {
if (!gradInputTable.contains(i)) {
gradInputTable(i) = gradOutput.emptyInstance()
}
gradInputTable[Tensor[_]](i).resizeAs(tableInput(i))
i += 1
}
i = 1
while (i <= tableInput.length()) {
val currentGradInput = gradOutput.select(dimension, i).asInstanceOf[Tensor[NumericWildcard]]
gradInputTable[Tensor[NumericWildcard]](i).copy(currentGradInput)
i += 1
}
}
gradInput
}
}
object Pack {
def apply[T: ClassTag](
dimension: Int)(implicit ev: TensorNumeric[T]): Pack[T] = {
new Pack[T](dimension)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/Pack.scala | Scala | apache-2.0 | 4,126 |
package play.api.libs.ws
import org.specs2.mutable._
import org.specs2.mock.Mockito
import com.ning.http.client.{
Response => AHCResponse,
Cookie => AHCCookie
}
import java.util
object WSSpec extends Specification with Mockito {
"WS" should {
"support several query string values for a parameter" in {
val req = WS.url("http://playframework.com/")
.withQueryString("foo"->"foo1", "foo"->"foo2")
.prepare("GET").build
req.getQueryParams.get("foo").contains("foo1") must beTrue
req.getQueryParams.get("foo").contains("foo2") must beTrue
req.getQueryParams.get("foo").size must equalTo (2)
}
}
"WS Response" should {
"get cookies from an AHC response" in {
val ahcResponse : AHCResponse = mock[AHCResponse]
val (domain, name, value, path, maxAge, secure) = ("example.com", "someName", "someValue", "/", 1000, false)
val ahcCookie : AHCCookie = new AHCCookie(domain, name, value, path, maxAge, secure)
ahcResponse.getCookies returns util.Arrays.asList(ahcCookie)
val response = Response(ahcResponse)
val cookies : Seq[Cookie] = response.cookies
val cookie = cookies(0)
cookie.domain must ===("example.com")
cookie.name must beSome("someName")
cookie.value must beSome("someValue")
cookie.path must ===("/")
cookie.maxAge must ===(1000)
cookie.secure must beFalse
}
"get a single cookie from an AHC response" in {
val ahcResponse : AHCResponse = mock[AHCResponse]
val (domain, name, value, path, maxAge, secure) = ("example.com", "someName", "someValue", "/", 1000, false)
val ahcCookie : AHCCookie = new AHCCookie(domain, name, value, path, maxAge, secure)
ahcResponse.getCookies returns util.Arrays.asList(ahcCookie)
val response = Response(ahcResponse)
val optionCookie = response.cookie("someName")
optionCookie must beSome[Cookie].which { cookie =>
cookie.domain must ===("example.com")
cookie.name must beSome("someName")
cookie.value must beSome("someValue")
cookie.path must ===("/")
cookie.maxAge must ===(1000)
cookie.secure must beFalse
}
}
}
}
| michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/play/src/test/scala/play/api/libs/ws/WSSpec.scala | Scala | mit | 2,223 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.{Ascending, Attribute, AttributeReference, Expression, Literal, SortOrder, UnsafeRow}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{ClusteredDistribution, Distribution}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.streaming.GroupStateImpl.NO_TIMESTAMP
import org.apache.spark.sql.execution.streaming.state._
import org.apache.spark.sql.streaming.{GroupStateTimeout, OutputMode}
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.util.CompletionIterator
/**
* Physical operator for executing `FlatMapGroupsWithState.`
*
* @param func function called on each group
* @param keyDeserializer used to extract the key object for each group.
* @param valueDeserializer used to extract the items in the iterator from an input row.
* @param groupingAttributes used to group the data
* @param dataAttributes used to read the data
* @param outputObjAttr used to define the output object
* @param stateEncoder used to serialize/deserialize state before calling `func`
* @param outputMode the output mode of `func`
* @param timeoutConf used to timeout groups that have not received data in a while
* @param batchTimestampMs processing timestamp of the current batch.
*/
case class FlatMapGroupsWithStateExec(
func: (Any, Iterator[Any], LogicalGroupState[Any]) => Iterator[Any],
keyDeserializer: Expression,
valueDeserializer: Expression,
groupingAttributes: Seq[Attribute],
dataAttributes: Seq[Attribute],
outputObjAttr: Attribute,
stateInfo: Option[StatefulOperatorStateInfo],
stateEncoder: ExpressionEncoder[Any],
outputMode: OutputMode,
timeoutConf: GroupStateTimeout,
batchTimestampMs: Option[Long],
override val eventTimeWatermark: Option[Long],
child: SparkPlan
) extends UnaryExecNode with ObjectProducerExec with StateStoreWriter with WatermarkSupport {
import GroupStateImpl._
private val isTimeoutEnabled = timeoutConf != NoTimeout
private val timestampTimeoutAttribute =
AttributeReference("timeoutTimestamp", dataType = IntegerType, nullable = false)()
private val stateAttributes: Seq[Attribute] = {
val encSchemaAttribs = stateEncoder.schema.toAttributes
if (isTimeoutEnabled) encSchemaAttribs :+ timestampTimeoutAttribute else encSchemaAttribs
}
// Get the serializer for the state, taking into account whether we need to save timestamps
private val stateSerializer = {
val encoderSerializer = stateEncoder.namedExpressions
if (isTimeoutEnabled) {
encoderSerializer :+ Literal(GroupStateImpl.NO_TIMESTAMP)
} else {
encoderSerializer
}
}
// Get the deserializer for the state. Note that this must be done in the driver, as
// resolving and binding of deserializer expressions to the encoded type can be safely done
// only in the driver.
private val stateDeserializer = stateEncoder.resolveAndBind().deserializer
private val watermarkPresent = child.output.exists {
case a: Attribute if a.metadata.contains(EventTimeWatermark.delayKey) => true
case _ => false
}
/** Distribute by grouping attributes */
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(groupingAttributes, stateInfo.map(_.numPartitions)) :: Nil
/** Ordering needed for using GroupingIterator */
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
Seq(groupingAttributes.map(SortOrder(_, Ascending)))
override def keyExpressions: Seq[Attribute] = groupingAttributes
override protected def doExecute(): RDD[InternalRow] = {
metrics // force lazy init at driver
// Throw errors early if parameters are not as expected
timeoutConf match {
case ProcessingTimeTimeout =>
require(batchTimestampMs.nonEmpty)
case EventTimeTimeout =>
require(eventTimeWatermark.nonEmpty) // watermark value has been populated
require(watermarkExpression.nonEmpty) // input schema has watermark attribute
case _ =>
}
child.execute().mapPartitionsWithStateStore[InternalRow](
getStateInfo,
groupingAttributes.toStructType,
stateAttributes.toStructType,
indexOrdinal = None,
sqlContext.sessionState,
Some(sqlContext.streams.stateStoreCoordinator)) { case (store, iter) =>
val updater = new StateStoreUpdater(store)
// If timeout is based on event time, then filter late data based on watermark
val filteredIter = watermarkPredicateForData match {
case Some(predicate) if timeoutConf == EventTimeTimeout =>
iter.filter(row => !predicate.eval(row))
case _ =>
iter
}
// Generate a iterator that returns the rows grouped by the grouping function
// Note that this code ensures that the filtering for timeout occurs only after
// all the data has been processed. This is to ensure that the timeout information of all
// the keys with data is updated before they are processed for timeouts.
val outputIterator =
updater.updateStateForKeysWithData(filteredIter) ++ updater.updateStateForTimedOutKeys()
// Return an iterator of all the rows generated by all the keys, such that when fully
// consumed, all the state updates will be committed by the state store
CompletionIterator[InternalRow, Iterator[InternalRow]](
outputIterator,
{
store.commit()
setStoreMetrics(store)
}
)
}
}
/** Helper class to update the state store */
class StateStoreUpdater(store: StateStore) {
// Converters for translating input keys, values, output data between rows and Java objects
private val getKeyObj =
ObjectOperator.deserializeRowToObject(keyDeserializer, groupingAttributes)
private val getValueObj =
ObjectOperator.deserializeRowToObject(valueDeserializer, dataAttributes)
private val getOutputRow = ObjectOperator.wrapObjectToRow(outputObjAttr.dataType)
// Converters for translating state between rows and Java objects
private val getStateObjFromRow = ObjectOperator.deserializeRowToObject(
stateDeserializer, stateAttributes)
private val getStateRowFromObj = ObjectOperator.serializeObjectToRow(stateSerializer)
// Index of the additional metadata fields in the state row
private val timeoutTimestampIndex = stateAttributes.indexOf(timestampTimeoutAttribute)
// Metrics
private val numUpdatedStateRows = longMetric("numUpdatedStateRows")
private val numOutputRows = longMetric("numOutputRows")
/**
* For every group, get the key, values and corresponding state and call the function,
* and return an iterator of rows
*/
def updateStateForKeysWithData(dataIter: Iterator[InternalRow]): Iterator[InternalRow] = {
val groupedIter = GroupedIterator(dataIter, groupingAttributes, child.output)
groupedIter.flatMap { case (keyRow, valueRowIter) =>
val keyUnsafeRow = keyRow.asInstanceOf[UnsafeRow]
callFunctionAndUpdateState(
keyUnsafeRow,
valueRowIter,
store.get(keyUnsafeRow),
hasTimedOut = false)
}
}
/** Find the groups that have timeout set and are timing out right now, and call the function */
def updateStateForTimedOutKeys(): Iterator[InternalRow] = {
if (isTimeoutEnabled) {
val timeoutThreshold = timeoutConf match {
case ProcessingTimeTimeout => batchTimestampMs.get
case EventTimeTimeout => eventTimeWatermark.get
case _ =>
throw new IllegalStateException(
s"Cannot filter timed out keys for $timeoutConf")
}
val timingOutKeys = store.getRange(None, None).filter { rowPair =>
val timeoutTimestamp = getTimeoutTimestamp(rowPair.value)
timeoutTimestamp != NO_TIMESTAMP && timeoutTimestamp < timeoutThreshold
}
timingOutKeys.flatMap { rowPair =>
callFunctionAndUpdateState(rowPair.key, Iterator.empty, rowPair.value, hasTimedOut = true)
}
} else Iterator.empty
}
/**
* Call the user function on a key's data, update the state store, and return the return data
* iterator. Note that the store updating is lazy, that is, the store will be updated only
* after the returned iterator is fully consumed.
*
* @param keyRow Row representing the key, cannot be null
* @param valueRowIter Iterator of values as rows, cannot be null, but can be empty
* @param prevStateRow Row representing the previous state, can be null
* @param hasTimedOut Whether this function is being called for a key timeout
*/
private def callFunctionAndUpdateState(
keyRow: UnsafeRow,
valueRowIter: Iterator[InternalRow],
prevStateRow: UnsafeRow,
hasTimedOut: Boolean): Iterator[InternalRow] = {
val keyObj = getKeyObj(keyRow) // convert key to objects
val valueObjIter = valueRowIter.map(getValueObj.apply) // convert value rows to objects
val stateObj = getStateObj(prevStateRow)
val keyedState = GroupStateImpl.createForStreaming(
Option(stateObj),
batchTimestampMs.getOrElse(NO_TIMESTAMP),
eventTimeWatermark.getOrElse(NO_TIMESTAMP),
timeoutConf,
hasTimedOut,
watermarkPresent)
// Call function, get the returned objects and convert them to rows
val mappedIterator = func(keyObj, valueObjIter, keyedState).map { obj =>
numOutputRows += 1
getOutputRow(obj)
}
// When the iterator is consumed, then write changes to state
def onIteratorCompletion: Unit = {
val currentTimeoutTimestamp = keyedState.getTimeoutTimestamp
// If the state has not yet been set but timeout has been set, then
// we have to generate a row to save the timeout. However, attempting serialize
// null using case class encoder throws -
// java.lang.NullPointerException: Null value appeared in non-nullable field:
// If the schema is inferred from a Scala tuple / case class, or a Java bean, please
// try to use scala.Option[_] or other nullable types.
if (!keyedState.exists && currentTimeoutTimestamp != NO_TIMESTAMP) {
throw new IllegalStateException(
"Cannot set timeout when state is not defined, that is, state has not been" +
"initialized or has been removed")
}
if (keyedState.hasRemoved) {
store.remove(keyRow)
numUpdatedStateRows += 1
} else {
val previousTimeoutTimestamp = getTimeoutTimestamp(prevStateRow)
val stateRowToWrite = if (keyedState.hasUpdated) {
getStateRow(keyedState.get)
} else {
prevStateRow
}
val hasTimeoutChanged = currentTimeoutTimestamp != previousTimeoutTimestamp
val shouldWriteState = keyedState.hasUpdated || hasTimeoutChanged
if (shouldWriteState) {
if (stateRowToWrite == null) {
// This should never happen because checks in GroupStateImpl should avoid cases
// where empty state would need to be written
throw new IllegalStateException("Attempting to write empty state")
}
setTimeoutTimestamp(stateRowToWrite, currentTimeoutTimestamp)
store.put(keyRow, stateRowToWrite)
numUpdatedStateRows += 1
}
}
}
// Return an iterator of rows such that fully consumed, the updated state value will be saved
CompletionIterator[InternalRow, Iterator[InternalRow]](mappedIterator, onIteratorCompletion)
}
/** Returns the state as Java object if defined */
def getStateObj(stateRow: UnsafeRow): Any = {
if (stateRow != null) getStateObjFromRow(stateRow) else null
}
/** Returns the row for an updated state */
def getStateRow(obj: Any): UnsafeRow = {
assert(obj != null)
getStateRowFromObj(obj)
}
/** Returns the timeout timestamp of a state row is set */
def getTimeoutTimestamp(stateRow: UnsafeRow): Long = {
if (isTimeoutEnabled && stateRow != null) {
stateRow.getLong(timeoutTimestampIndex)
} else NO_TIMESTAMP
}
/** Set the timestamp in a state row */
def setTimeoutTimestamp(stateRow: UnsafeRow, timeoutTimestamps: Long): Unit = {
if (isTimeoutEnabled) stateRow.setLong(timeoutTimestampIndex, timeoutTimestamps)
}
}
}
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FlatMapGroupsWithStateExec.scala | Scala | apache-2.0 | 13,642 |
package philosophy.finallytagless
import language.higherKinds
import scala.scalajs.js.JSApp
import org.scalajs.dom._
import cats.{~>, Id, Monad}
import cats.syntax.flatMap._
import cats.syntax.functor._
import philosophy.{Graph, wikiapi, RFuture}
import philosophy.RFuture._
import philosophy.IO._
import philosophy.crawlStates._
import philosophy.finallytagless.Interpreter.~
trait Wiki[F[_]] {
def randomPage : F[String]
def nextLinks(current: String ) : F[List[String]]
}
object Wiki {
def randomPage : Term[Wiki,String] = Term[Wiki] { _.randomPage }
def nextLinks(current: String ) : Term[Wiki,List[String]] = Term[Wiki] { _.nextLinks(current) }
}
object JsonpWiki extends Wiki[RFuture] {
def randomPage: RFuture[String] = (ec) => wikiapi.randomPage(ec)
def nextLinks(current: String): RFuture[List[String]] = (ec) => wikiapi.nextLinks(current)(ec)
}
object TestWiki extends Wiki[IO] {
def randomPage: IO[String] = Io{ wikiapi.testrandom }
def nextLinks(current: String): IO[List[String]] = Io{ wikiapi.testnextlink(current) }
}
trait Output[F[_]] {
def firstPage( name: String ) : F[Unit]
def pageStep( from:String, to:String, idx : Int ) : F[Unit]
def statusMsg( msg:String ) : F[Unit]
}
object ConsoleOutput extends Output[IO] {
def firstPage(name: String): IO[Unit] = Io{ println( s"1: $name") }
def pageStep(from: String, to: String, idx: Int): IO[Unit] = Io{ println(s"$idx: $to") }
def statusMsg(msg: String): IO[Unit] = Io{ println(msg) }
}
class HTMLOutput( outputElement: Element ) extends Output[IO] {
def firstPage( name:String ) : IO[Unit] = Io {
outputElement.innerHTML = s"<p>1: <a href='http://en.wikipedia.org/wiki/$name'>$name</a></p>\\n"
}
def pageStep( from:String, to:String, idx: Int ) : IO[Unit] = Io {
outputElement.innerHTML += s"<p>$idx: <a href='http://en.wikipedia.org/wiki/$to'>$to</a></p>\\n"
}
def statusMsg(msg:String ): IO[Unit] = Io {
outputElement.innerHTML += s"<p>$msg</p>\\n"
}
}
object GraphOutput extends Output[IO] {
def firstPage(name: String): IO[Unit] = Io { Graph.addNode( name ) }
def pageStep(from: String, to: String, idx: Int): IO[Unit] = Io {
Graph.addNode( to )
Graph.addLink( from, to )
}
def statusMsg(msg: String): IO[Unit] = Io {
println( msg )
}
}
trait Input[F[_]] {
def getPage : F[String]
}
object RandomPageInput extends ( Input ~~> Wiki ) {
def embed[M[_] : Monad]( wiki: Interpreter[Wiki, M]): Input[M] = new Input[M] {
def getPage: M[String] = wiki( _.randomPage )
}
}
class ConstInput( input:String ) extends Input[Id] {
def getPage : String = input
}
trait UI[F[_]] {
def getStartPage : F[Continue]
def stepPage( state: Continue ) : F[Unit]
def finished( state: CrawlStateFinished ) : F[Unit]
}
object UI {
def getStartPage : Term[UI,Continue] = Term[UI]{ _.getStartPage }
def showStep(state: Continue ) : Term[UI,Unit] = Term[UI]{ _.stepPage(state) }
def finished(state: CrawlStateFinished ) : Term[UI,Unit] = Term[UI]{ _.finished(state) }
}
object UIToInputOutput extends ( UI ~~> (Input~Output)#Pair ) {
def embed[M[_] : Monad]( inputOutput: Interpreter[(Input~Output)#Pair, M]): UI[M] = new UI[M] {
val (input,output) = Interpreter.pairOf( inputOutput )
def getStartPage: M[Continue] = for {
page <- input {_.getPage}
_ <- output {_.firstPage(page)}
} yield Continue(page, Nil)
def stepPage(state: Continue): M[Unit] = output {_.pageStep(state.visitedPages.head, state.currentPage, state.visitedPages.length + 1)}
def finished(state: CrawlStateFinished): M[Unit] =
output{_.statusMsg(state match {
case Loop(currentPage, to) => s"Page '$currentPage' loops back to $to"
case Error(currentPage, e) => s"Error '$e' at Page '$currentPage'"
case NoLinks(currentPage) => s"No suitable links from Page '$currentPage'"
case AtPhilosophy(steps) => s"Got to Philosophy in $steps steps!"
})}
}
}
object program {
type WikiAndUI[M[_]] = (Wiki~UI)#Pair[M]
type PRG[X] = Term[WikiAndUI,X]
def pure[X]( x:X ) : PRG[X] = Term.pure[WikiAndUI,X]( x )
def stepNext( state : Continue ) : PRG[CrawlState] =
for {
links <- Wiki.nextLinks( state.currentPage ).embed[WikiAndUI]
nextState <- links.headOption.fold( pure( NoLinks(state.currentPage) : CrawlState ) ) {
nextPage =>
val cont = Continue(nextPage, state.currentPage :: state.visitedPages)
UI.showStep( cont ).embed[WikiAndUI].map{ ignore =>
if (state.visitedPages.contains(nextPage))
Loop(state.currentPage, nextPage)
else if (nextPage.toLowerCase == "philosophy")
AtPhilosophy(state.visitedPages.length + 2)
else
cont
}
}
} yield nextState
def recurseStep( state: Continue ) : PRG[CrawlStateFinished] =
stepNext( state )
.flatMap {
case c : Continue => recurseStep( c )
case f : CrawlStateFinished => pure( f )
}
def run : PRG[CrawlStateFinished] =
UI.getStartPage.embed[WikiAndUI]
.flatMap( recurseStep )
.flatMap{ state => UI.finished(state).map( x => state ).embed[WikiAndUI] }
}
| vtoro/getting-to-philosophy | src/main/scala/philosophy/finallytagless/finallytagless.scala | Scala | mit | 5,194 |
package com.avsystem.scex
package compiler
import com.avsystem.scex.parsing.PositionMapping
/**
* Created: 14-11-2013
* Author: ghik
*/
case class ExpressionDef(
profile: ExpressionProfile,
template: Boolean,
setter: Boolean,
expression: String,
header: String,
contextType: String,
resultType: String,
variableTypes: Map[String, String])(
val originalExpression: String,
val positionMapping: PositionMapping,
val rootObjectClass: Class[_]) {
}
| AVSystem/scex | scex-core/src/main/scala/com/avsystem/scex/compiler/ExpressionDef.scala | Scala | mit | 476 |
package justin.db.replica.read
import java.util.UUID
import justin.db.Data
import justin.db.actors.protocol.{StorageNodeFailedRead, StorageNodeFoundRead, StorageNodeNotFoundRead}
import justin.db.consistenthashing.NodeId
import justin.db.storage.GetStorageProtocol
import justin.db.storage.PluggableStorageProtocol.{DataOriginality, StorageGetData}
import justin.db.vectorclocks.VectorClock
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.Future
class ReplicaLocalReaderTest extends FlatSpec with Matchers with ScalaFutures {
behavior of "Replica Local Reader"
override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.seconds, 50.millis)
it should "found data for existing key" in {
// given
val id = UUID.randomUUID()
val data = Data(id, "value", VectorClock[NodeId]().increase(NodeId(1)))
val service = new ReplicaLocalReader(new GetStorageProtocol {
override def get(id: UUID)(resolveOriginality: (UUID) => DataOriginality): Future[StorageGetData] = {
Future.successful(StorageGetData.Single(data))
}
})
// when
val result = service.apply(id, null)
// then
whenReady(result) { _ shouldBe StorageNodeFoundRead(data) }
}
it should "not found data for non-existing key" in {
// given
val id = UUID.randomUUID()
val service = new ReplicaLocalReader(new GetStorageProtocol {
override def get(id: UUID)(resolveOriginality: (UUID) => DataOriginality): Future[StorageGetData] = {
Future.successful(StorageGetData.None)
}
})
// when
val result = service.apply(id, null)
// then
whenReady(result) { _ shouldBe StorageNodeNotFoundRead(id) }
}
it should "recover failure reading" in {
// given
val id = UUID.randomUUID()
val service = new ReplicaLocalReader(new GetStorageProtocol {
override def get(id: UUID)(resolveOriginality: (UUID) => DataOriginality): Future[StorageGetData] = Future.failed(new Exception)
})
// when
val result = service.apply(id, null)
// then
whenReady(result) { _ shouldBe StorageNodeFailedRead(id) }
}
}
| speedcom/JustinDB | justin-core/src/test/scala/justin/db/replica/read/ReplicaLocalReaderTest.scala | Scala | apache-2.0 | 2,273 |
/*
* Copyright 2014-2020 Rik van der Kleij
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package intellij.haskell.psi.stubs.index
import com.intellij.psi.stubs.{StringStubIndexExtension, StubIndexKey}
import intellij.haskell.psi.HaskellNamedElement
object HaskellAllNameIndex {
val Key: StubIndexKey[String, HaskellNamedElement] = StubIndexKey.createIndexKey("haskell.all.name")
val Version = 1
}
class HaskellAllNameIndex extends StringStubIndexExtension[HaskellNamedElement] {
override def getVersion: Int = {
super.getVersion + HaskellAllNameIndex.Version
}
def getKey: StubIndexKey[String, HaskellNamedElement] = {
HaskellAllNameIndex.Key
}
} | rikvdkleij/intellij-haskell | src/main/scala/intellij/haskell/psi/stubs/index/HaskellAllNameIndex.scala | Scala | apache-2.0 | 1,186 |
package com.xah.chat.ui.activities
import android.app.Activity
import android.os.Bundle
import android.support.v7.app.ActionBarActivity
import android.view.{View, Window}
import com.xah.chat.comms.{XService, XServiceConnection}
import android.content.{Context, Intent}
import com.xah.chat.framework.TraitActivityContext
import scala.language.implicitConversions
import com.xah.chat.utils.DeviceUtils
import com.xah.chat.datamodel.xah
class BaseActivity extends Activity with TraitActivityContext[Activity] {
val mConnection = new XServiceConnection
val mDeviceId = DeviceUtils.getDeviceId(this)
override def onCreate(savedInstanceState: Bundle): Unit = {
super.onCreate(savedInstanceState)
}
protected def runOnUi(f: () => Unit) = this.runOnUiThread(new Runnable {
override def run(): Unit = f()
})
override def onStart() = {
super.onStart()
if (xah.Handle(this) != "") {
bindService(new Intent(this, classOf[XService]), mConnection, Context.BIND_AUTO_CREATE)
}
}
override def onDestroy() = {
super.onDestroy()
mConnection.mBound match {
case true => unbindService(mConnection)
case _ => ()
}
}
} | lemonxah/xaHChat | src/main/scala/com/xah/chat/ui/activities/BaseActivity.scala | Scala | mit | 1,172 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.csv
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.types._
class CSVInferSchemaSuite extends SparkFunSuite {
test("String fields types are inferred correctly from null types") {
val options = new CSVOptions(Map.empty[String, String], false, "GMT")
assert(CSVInferSchema.inferField(NullType, "", options) == NullType)
assert(CSVInferSchema.inferField(NullType, null, options) == NullType)
assert(CSVInferSchema.inferField(NullType, "100000000000", options) == LongType)
assert(CSVInferSchema.inferField(NullType, "60", options) == IntegerType)
assert(CSVInferSchema.inferField(NullType, "3.5", options) == DoubleType)
assert(CSVInferSchema.inferField(NullType, "test", options) == StringType)
assert(CSVInferSchema.inferField(NullType, "2015-08-20 15:57:00", options) == TimestampType)
assert(CSVInferSchema.inferField(NullType, "True", options) == BooleanType)
assert(CSVInferSchema.inferField(NullType, "FAlSE", options) == BooleanType)
val textValueOne = Long.MaxValue.toString + "0"
val decimalValueOne = new java.math.BigDecimal(textValueOne)
val expectedTypeOne = DecimalType(decimalValueOne.precision, decimalValueOne.scale)
assert(CSVInferSchema.inferField(NullType, textValueOne, options) == expectedTypeOne)
}
test("String fields types are inferred correctly from other types") {
val options = new CSVOptions(Map.empty[String, String], false, "GMT")
assert(CSVInferSchema.inferField(LongType, "1.0", options) == DoubleType)
assert(CSVInferSchema.inferField(LongType, "test", options) == StringType)
assert(CSVInferSchema.inferField(IntegerType, "1.0", options) == DoubleType)
assert(CSVInferSchema.inferField(DoubleType, null, options) == DoubleType)
assert(CSVInferSchema.inferField(DoubleType, "test", options) == StringType)
assert(CSVInferSchema.inferField(LongType, "2015-08-20 14:57:00", options) == TimestampType)
assert(CSVInferSchema.inferField(DoubleType, "2015-08-20 15:57:00", options) == TimestampType)
assert(CSVInferSchema.inferField(LongType, "True", options) == BooleanType)
assert(CSVInferSchema.inferField(IntegerType, "FALSE", options) == BooleanType)
assert(CSVInferSchema.inferField(TimestampType, "FALSE", options) == BooleanType)
val textValueOne = Long.MaxValue.toString + "0"
val decimalValueOne = new java.math.BigDecimal(textValueOne)
val expectedTypeOne = DecimalType(decimalValueOne.precision, decimalValueOne.scale)
assert(CSVInferSchema.inferField(IntegerType, textValueOne, options) == expectedTypeOne)
}
test("Timestamp field types are inferred correctly via custom data format") {
var options = new CSVOptions(Map("timestampFormat" -> "yyyy-mm"), false, "GMT")
assert(CSVInferSchema.inferField(TimestampType, "2015-08", options) == TimestampType)
options = new CSVOptions(Map("timestampFormat" -> "yyyy"), false, "GMT")
assert(CSVInferSchema.inferField(TimestampType, "2015", options) == TimestampType)
}
test("Timestamp field types are inferred correctly from other types") {
val options = new CSVOptions(Map.empty[String, String], false, "GMT")
assert(CSVInferSchema.inferField(IntegerType, "2015-08-20 14", options) == StringType)
assert(CSVInferSchema.inferField(DoubleType, "2015-08-20 14:10", options) == StringType)
assert(CSVInferSchema.inferField(LongType, "2015-08 14:49:00", options) == StringType)
}
test("Boolean fields types are inferred correctly from other types") {
val options = new CSVOptions(Map.empty[String, String], false, "GMT")
assert(CSVInferSchema.inferField(LongType, "Fale", options) == StringType)
assert(CSVInferSchema.inferField(DoubleType, "TRUEe", options) == StringType)
}
test("Type arrays are merged to highest common type") {
assert(
CSVInferSchema.mergeRowTypes(Array(StringType),
Array(DoubleType)).deep == Array(StringType).deep)
assert(
CSVInferSchema.mergeRowTypes(Array(IntegerType),
Array(LongType)).deep == Array(LongType).deep)
assert(
CSVInferSchema.mergeRowTypes(Array(DoubleType),
Array(LongType)).deep == Array(DoubleType).deep)
}
test("Null fields are handled properly when a nullValue is specified") {
var options = new CSVOptions(Map("nullValue" -> "null"), false, "GMT")
assert(CSVInferSchema.inferField(NullType, "null", options) == NullType)
assert(CSVInferSchema.inferField(StringType, "null", options) == StringType)
assert(CSVInferSchema.inferField(LongType, "null", options) == LongType)
options = new CSVOptions(Map("nullValue" -> "\\\\N"), false, "GMT")
assert(CSVInferSchema.inferField(IntegerType, "\\\\N", options) == IntegerType)
assert(CSVInferSchema.inferField(DoubleType, "\\\\N", options) == DoubleType)
assert(CSVInferSchema.inferField(TimestampType, "\\\\N", options) == TimestampType)
assert(CSVInferSchema.inferField(BooleanType, "\\\\N", options) == BooleanType)
assert(CSVInferSchema.inferField(DecimalType(1, 1), "\\\\N", options) == DecimalType(1, 1))
}
test("Merging Nulltypes should yield Nulltype.") {
val mergedNullTypes = CSVInferSchema.mergeRowTypes(Array(NullType), Array(NullType))
assert(mergedNullTypes.deep == Array(NullType).deep)
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val options = new CSVOptions(Map("TiMeStampFormat" -> "yyyy-mm"), false, "GMT")
assert(CSVInferSchema.inferField(TimestampType, "2015-08", options) == TimestampType)
}
test("SPARK-18877: `inferField` on DecimalType should find a common type with `typeSoFar`") {
val options = new CSVOptions(Map.empty[String, String], false, "GMT")
// 9.03E+12 is Decimal(3, -10) and 1.19E+11 is Decimal(3, -9).
assert(CSVInferSchema.inferField(DecimalType(3, -10), "1.19E+11", options) ==
DecimalType(4, -9))
// BigDecimal("12345678901234567890.01234567890123456789") is precision 40 and scale 20.
val value = "12345678901234567890.01234567890123456789"
assert(CSVInferSchema.inferField(DecimalType(3, -10), value, options) == DoubleType)
// Seq(s"${Long.MaxValue}1", "2015-12-01 00:00:00") should be StringType
assert(CSVInferSchema.inferField(NullType, s"${Long.MaxValue}1", options) == DecimalType(20, 0))
assert(CSVInferSchema.inferField(DecimalType(20, 0), "2015-12-01 00:00:00", options)
== StringType)
}
test("DoubleType should be inferred when user defined nan/inf are provided") {
val options = new CSVOptions(Map("nanValue" -> "nan", "negativeInf" -> "-inf",
"positiveInf" -> "inf"), false, "GMT")
assert(CSVInferSchema.inferField(NullType, "nan", options) == DoubleType)
assert(CSVInferSchema.inferField(NullType, "inf", options) == DoubleType)
assert(CSVInferSchema.inferField(NullType, "-inf", options) == DoubleType)
}
}
| ahnqirage/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchemaSuite.scala | Scala | apache-2.0 | 7,719 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.cache
import org.apache.ignite.cluster.{ClusterGroupEmptyException, ClusterNode}
import org.apache.ignite.visor.commands.common.VisorTextTable
import org.apache.ignite.visor.visor._
import org.apache.ignite.internal.visor.query._
import org.apache.ignite.internal.visor.util.VisorTaskUtils._
import scala.collection.JavaConversions._
/**
* ==Overview==
* Visor 'scan' command implementation.
*
* ====Specification====
* {{{
* cache {-id=<node-id>|-id8=<node-id8>} {-p=<page size>} -c=<cache name> -scan
* }}}
*
* ====Arguments====
* {{{
* <node-id>
* Full node ID.
* <node-id8>
* Node ID8.
* <page size>
* Number of object to fetch from cache at once.
* <cache-name>
* Name of the cache.
* }}}
*
* ====Examples====
* {{{
* cache -c=cache
* List entries from cache with name 'cache' from all nodes with this cache.
* cache -c=@c0 -scan -p=50
* List entries from cache with name taken from 'c0' memory variable with page of 50 items
* from all nodes with this cache.
* cache -c=cache -scan -id8=12345678
* List entries from cache with name 'cache' and node '12345678' ID8.
* }}}
*/
class VisorCacheScanCommand {
/**
* Prints error message and advise.
*
* @param errMsgs Error messages.
*/
private def scold(errMsgs: Any*) {
assert(errMsgs != null)
warn(errMsgs: _*)
warn("Type 'help cache' to see how to use this command.")
}
private def error(e: Throwable) {
var cause: Throwable = e
while (cause.getCause != null)
cause = cause.getCause
scold(cause.getMessage)
}
/**
* ===Command===
* List all entries in cache with specified name.
*
* ===Examples===
* <ex>cache -c=cache -scan</ex>
* List entries from cache with name 'cache' from all nodes with this cache.
* <br>
* <ex>cache -c=@c0 -scan -p=50</ex>
* List entries from cache with name taken from 'c0' memory variable with page of 50 items
* from all nodes with this cache.
* <br>
* <ex>cache -c=cache -scan -id8=12345678</ex>
* List entries from cache with name 'cache' and node '12345678' ID8.
*
* @param argLst Command arguments.
*/
def scan(argLst: ArgList, node: Option[ClusterNode]) {
val pageArg = argValue("p", argLst)
val cacheArg = argValue("c", argLst)
var pageSize = 25
if (pageArg.isDefined) {
val page = pageArg.get
try
pageSize = page.toInt
catch {
case nfe: NumberFormatException =>
scold("Invalid value for 'page size': " + page)
return
}
if (pageSize < 1 || pageSize > 100) {
scold("'Page size' should be in range [1..100] but found: " + page)
return
}
}
val cacheName = cacheArg match {
case None => null // default cache.
case Some(s) if s.startsWith("@") =>
warn("Can't find cache variable with specified name: " + s,
"Type 'cache' to see available cache variables."
)
return
case Some(name) => name
}
val firstPage =
try
executeRandom(groupForDataNode(node, cacheName),
classOf[VisorScanQueryTask], new VisorScanQueryTaskArg(cacheName, null, false, false, false, false, pageSize)) match {
case x if x.getError != null =>
error(x.getError)
return
case x => x.getResult
}
catch {
case e: ClusterGroupEmptyException =>
scold(messageNodeNotFound(node, cacheName))
return
case e: Throwable =>
error(e)
return
}
if (firstPage.getRows.isEmpty) {
println(s"Cache: ${escapeName(cacheName)} is empty")
return
}
var nextPage: VisorQueryResult = firstPage
def render() {
println("Entries in cache: " + escapeName(cacheName))
val t = VisorTextTable()
t #= ("Key Class", "Key", "Value Class", "Value")
nextPage.getRows.foreach(r => t += (r(0), r(1), r(2), r(3)))
t.render()
}
render()
while (nextPage.isHasMore) {
ask("\\nFetch more objects (y/n) [y]:", "y") match {
case "y" | "Y" =>
try {
nextPage = executeOne(firstPage.getResponseNodeId, classOf[VisorQueryNextPageTask],
new VisorQueryNextPageTaskArg(firstPage.getQueryId, pageSize))
render()
}
catch {
case e: Exception => error(e)
}
case _ => return
}
}
}
}
/**
* Companion object that does initialization of the command.
*/
object VisorCacheScanCommand {
/** Singleton command. */
private val cmd = new VisorCacheScanCommand
/**
* Singleton.
*/
def apply() = cmd
}
| vadopolski/ignite | modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheScanCommand.scala | Scala | apache-2.0 | 6,231 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock
import kafka.common.{KafkaException, LogCleaningAbortedException}
import kafka.metrics.KafkaMetricsGroup
import kafka.server.LogDirFailureChannel
import kafka.server.checkpoints.OffsetCheckpointFile
import kafka.utils.CoreUtils._
import kafka.utils.{Logging, Pool}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.errors.KafkaStorageException
import scala.collection.{Iterable, Seq, mutable}
private[log] sealed trait LogCleaningState
private[log] case object LogCleaningInProgress extends LogCleaningState
private[log] case object LogCleaningAborted extends LogCleaningState
private[log] case class LogCleaningPaused(pausedCount: Int) extends LogCleaningState
private[log] class LogCleaningException(val log: UnifiedLog,
private val message: String,
private val cause: Throwable) extends KafkaException(message, cause)
/**
* This class manages the state of each partition being cleaned.
* LogCleaningState defines the cleaning states that a TopicPartition can be in.
* 1. None : No cleaning state in a TopicPartition. In this state, it can become LogCleaningInProgress
* or LogCleaningPaused(1). Valid previous state are LogCleaningInProgress and LogCleaningPaused(1)
* 2. LogCleaningInProgress : The cleaning is currently in progress. In this state, it can become None when log cleaning is finished
* or become LogCleaningAborted. Valid previous state is None.
* 3. LogCleaningAborted : The cleaning abort is requested. In this state, it can become LogCleaningPaused(1).
* Valid previous state is LogCleaningInProgress.
* 4-a. LogCleaningPaused(1) : The cleaning is paused once. No log cleaning can be done in this state.
* In this state, it can become None or LogCleaningPaused(2).
* Valid previous state is None, LogCleaningAborted or LogCleaningPaused(2).
* 4-b. LogCleaningPaused(i) : The cleaning is paused i times where i>= 2. No log cleaning can be done in this state.
* In this state, it can become LogCleaningPaused(i-1) or LogCleaningPaused(i+1).
* Valid previous state is LogCleaningPaused(i-1) or LogCleaningPaused(i+1).
*/
private[log] class LogCleanerManager(val logDirs: Seq[File],
val logs: Pool[TopicPartition, UnifiedLog],
val logDirFailureChannel: LogDirFailureChannel) extends Logging with KafkaMetricsGroup {
import LogCleanerManager._
protected override def loggerName = classOf[LogCleaner].getName
// package-private for testing
private[log] val offsetCheckpointFile = "cleaner-offset-checkpoint"
/* the offset checkpoints holding the last cleaned point for each log */
@volatile private var checkpoints = logDirs.map(dir =>
(dir, new OffsetCheckpointFile(new File(dir, offsetCheckpointFile), logDirFailureChannel))).toMap
/* the set of logs currently being cleaned */
private val inProgress = mutable.HashMap[TopicPartition, LogCleaningState]()
/* the set of uncleanable partitions (partitions that have raised an unexpected error during cleaning)
* for each log directory */
private val uncleanablePartitions = mutable.HashMap[String, mutable.Set[TopicPartition]]()
/* a global lock used to control all access to the in-progress set and the offset checkpoints */
private val lock = new ReentrantLock
/* for coordinating the pausing and the cleaning of a partition */
private val pausedCleaningCond = lock.newCondition()
/* gauges for tracking the number of partitions marked as uncleanable for each log directory */
for (dir <- logDirs) {
newGauge("uncleanable-partitions-count",
() => inLock(lock) { uncleanablePartitions.get(dir.getAbsolutePath).map(_.size).getOrElse(0) },
Map("logDirectory" -> dir.getAbsolutePath)
)
}
/* gauges for tracking the number of uncleanable bytes from uncleanable partitions for each log directory */
for (dir <- logDirs) {
newGauge("uncleanable-bytes",
() => inLock(lock) {
uncleanablePartitions.get(dir.getAbsolutePath) match {
case Some(partitions) =>
val lastClean = allCleanerCheckpoints
val now = Time.SYSTEM.milliseconds
partitions.iterator.map { tp =>
val log = logs.get(tp)
val lastCleanOffset = lastClean.get(tp)
val offsetsToClean = cleanableOffsets(log, lastCleanOffset, now)
val (_, uncleanableBytes) = calculateCleanableBytes(log, offsetsToClean.firstDirtyOffset, offsetsToClean.firstUncleanableDirtyOffset)
uncleanableBytes
}.sum
case None => 0
}
},
Map("logDirectory" -> dir.getAbsolutePath)
)
}
/* a gauge for tracking the cleanable ratio of the dirtiest log */
@volatile private var dirtiestLogCleanableRatio = 0.0
newGauge("max-dirty-percent", () => (100 * dirtiestLogCleanableRatio).toInt)
/* a gauge for tracking the time since the last log cleaner run, in milli seconds */
@volatile private var timeOfLastRun: Long = Time.SYSTEM.milliseconds
newGauge("time-since-last-run-ms", () => Time.SYSTEM.milliseconds - timeOfLastRun)
/**
* @return the position processed for all logs.
*/
def allCleanerCheckpoints: Map[TopicPartition, Long] = {
inLock(lock) {
checkpoints.values.flatMap(checkpoint => {
try {
checkpoint.read()
} catch {
case e: KafkaStorageException =>
error(s"Failed to access checkpoint file ${checkpoint.file.getName} in dir ${checkpoint.file.getParentFile.getAbsolutePath}", e)
Map.empty[TopicPartition, Long]
}
}).toMap
}
}
/**
* Package private for unit test. Get the cleaning state of the partition.
*/
private[log] def cleaningState(tp: TopicPartition): Option[LogCleaningState] = {
inLock(lock) {
inProgress.get(tp)
}
}
/**
* Package private for unit test. Set the cleaning state of the partition.
*/
private[log] def setCleaningState(tp: TopicPartition, state: LogCleaningState): Unit = {
inLock(lock) {
inProgress.put(tp, state)
}
}
/**
* Choose the log to clean next and add it to the in-progress set. We recompute this
* each time from the full set of logs to allow logs to be dynamically added to the pool of logs
* the log manager maintains.
*/
def grabFilthiestCompactedLog(time: Time, preCleanStats: PreCleanStats = new PreCleanStats()): Option[LogToClean] = {
inLock(lock) {
val now = time.milliseconds
this.timeOfLastRun = now
val lastClean = allCleanerCheckpoints
val dirtyLogs = logs.filter {
case (_, log) => log.config.compact // match logs that are marked as compacted
}.filterNot {
case (topicPartition, log) =>
// skip any logs already in-progress and uncleanable partitions
inProgress.contains(topicPartition) || isUncleanablePartition(log, topicPartition)
}.map {
case (topicPartition, log) => // create a LogToClean instance for each
try {
val lastCleanOffset = lastClean.get(topicPartition)
val offsetsToClean = cleanableOffsets(log, lastCleanOffset, now)
// update checkpoint for logs with invalid checkpointed offsets
if (offsetsToClean.forceUpdateCheckpoint)
updateCheckpoints(log.parentDirFile, partitionToUpdateOrAdd = Option(topicPartition, offsetsToClean.firstDirtyOffset))
val compactionDelayMs = maxCompactionDelay(log, offsetsToClean.firstDirtyOffset, now)
preCleanStats.updateMaxCompactionDelay(compactionDelayMs)
LogToClean(topicPartition, log, offsetsToClean.firstDirtyOffset, offsetsToClean.firstUncleanableDirtyOffset, compactionDelayMs > 0)
} catch {
case e: Throwable => throw new LogCleaningException(log,
s"Failed to calculate log cleaning stats for partition $topicPartition", e)
}
}.filter(ltc => ltc.totalBytes > 0) // skip any empty logs
this.dirtiestLogCleanableRatio = if (dirtyLogs.nonEmpty) dirtyLogs.max.cleanableRatio else 0
// and must meet the minimum threshold for dirty byte ratio or have some bytes required to be compacted
val cleanableLogs = dirtyLogs.filter { ltc =>
(ltc.needCompactionNow && ltc.cleanableBytes > 0) || ltc.cleanableRatio > ltc.log.config.minCleanableRatio
}
if(cleanableLogs.isEmpty) {
None
} else {
preCleanStats.recordCleanablePartitions(cleanableLogs.size)
val filthiest = cleanableLogs.max
inProgress.put(filthiest.topicPartition, LogCleaningInProgress)
Some(filthiest)
}
}
}
/**
* Pause logs cleaning for logs that do not have compaction enabled
* and do not have other deletion or compaction in progress.
* This is to handle potential race between retention and cleaner threads when users
* switch topic configuration between compacted and non-compacted topic.
* @return retention logs that have log cleaning successfully paused
*/
def pauseCleaningForNonCompactedPartitions(): Iterable[(TopicPartition, UnifiedLog)] = {
inLock(lock) {
val deletableLogs = logs.filter {
case (_, log) => !log.config.compact // pick non-compacted logs
}.filterNot {
case (topicPartition, _) => inProgress.contains(topicPartition) // skip any logs already in-progress
}
deletableLogs.foreach {
case (topicPartition, _) => inProgress.put(topicPartition, LogCleaningPaused(1))
}
deletableLogs
}
}
/**
* Find any logs that have compaction enabled. Mark them as being cleaned
* Include logs without delete enabled, as they may have segments
* that precede the start offset.
*/
def deletableLogs(): Iterable[(TopicPartition, UnifiedLog)] = {
inLock(lock) {
val toClean = logs.filter { case (topicPartition, log) =>
!inProgress.contains(topicPartition) && log.config.compact &&
!isUncleanablePartition(log, topicPartition)
}
toClean.foreach { case (tp, _) => inProgress.put(tp, LogCleaningInProgress) }
toClean
}
}
/**
* Abort the cleaning of a particular partition, if it's in progress. This call blocks until the cleaning of
* the partition is aborted.
* This is implemented by first abortAndPausing and then resuming the cleaning of the partition.
*/
def abortCleaning(topicPartition: TopicPartition): Unit = {
inLock(lock) {
abortAndPauseCleaning(topicPartition)
resumeCleaning(Seq(topicPartition))
}
}
/**
* Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition.
* This call blocks until the cleaning of the partition is aborted and paused.
* 1. If the partition is not in progress, mark it as paused.
* 2. Otherwise, first mark the state of the partition as aborted.
* 3. The cleaner thread checks the state periodically and if it sees the state of the partition is aborted, it
* throws a LogCleaningAbortedException to stop the cleaning task.
* 4. When the cleaning task is stopped, doneCleaning() is called, which sets the state of the partition as paused.
* 5. abortAndPauseCleaning() waits until the state of the partition is changed to paused.
* 6. If the partition is already paused, a new call to this function
* will increase the paused count by one.
*/
def abortAndPauseCleaning(topicPartition: TopicPartition): Unit = {
inLock(lock) {
inProgress.get(topicPartition) match {
case None =>
inProgress.put(topicPartition, LogCleaningPaused(1))
case Some(LogCleaningInProgress) =>
inProgress.put(topicPartition, LogCleaningAborted)
case Some(LogCleaningPaused(count)) =>
inProgress.put(topicPartition, LogCleaningPaused(count + 1))
case Some(s) =>
throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be aborted and paused since it is in $s state.")
}
while(!isCleaningInStatePaused(topicPartition))
pausedCleaningCond.await(100, TimeUnit.MILLISECONDS)
}
}
/**
* Resume the cleaning of paused partitions.
* Each call of this function will undo one pause.
*/
def resumeCleaning(topicPartitions: Iterable[TopicPartition]): Unit = {
inLock(lock) {
topicPartitions.foreach {
topicPartition =>
inProgress.get(topicPartition) match {
case None =>
throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is not paused.")
case Some(state) =>
state match {
case LogCleaningPaused(count) if count == 1 =>
inProgress.remove(topicPartition)
case LogCleaningPaused(count) if count > 1 =>
inProgress.put(topicPartition, LogCleaningPaused(count - 1))
case s =>
throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is in $s state.")
}
}
}
}
}
/**
* Check if the cleaning for a partition is in a particular state. The caller is expected to hold lock while making the call.
*/
private def isCleaningInState(topicPartition: TopicPartition, expectedState: LogCleaningState): Boolean = {
inProgress.get(topicPartition) match {
case None => false
case Some(state) =>
if (state == expectedState)
true
else
false
}
}
/**
* Check if the cleaning for a partition is paused. The caller is expected to hold lock while making the call.
*/
private def isCleaningInStatePaused(topicPartition: TopicPartition): Boolean = {
inProgress.get(topicPartition) match {
case None => false
case Some(state) =>
state match {
case _: LogCleaningPaused =>
true
case _ =>
false
}
}
}
/**
* Check if the cleaning for a partition is aborted. If so, throw an exception.
*/
def checkCleaningAborted(topicPartition: TopicPartition): Unit = {
inLock(lock) {
if (isCleaningInState(topicPartition, LogCleaningAborted))
throw new LogCleaningAbortedException()
}
}
/**
* Update checkpoint file, adding or removing partitions if necessary.
*
* @param dataDir The File object to be updated
* @param partitionToUpdateOrAdd The [TopicPartition, Long] map data to be updated. pass "none" if doing remove, not add
* @param topicPartitionToBeRemoved The TopicPartition to be removed
*/
def updateCheckpoints(dataDir: File, partitionToUpdateOrAdd: Option[(TopicPartition, Long)] = None,
partitionToRemove: Option[TopicPartition] = None): Unit = {
inLock(lock) {
val checkpoint = checkpoints(dataDir)
if (checkpoint != null) {
try {
val currentCheckpoint = checkpoint.read().filter { case (tp, _) => logs.keys.contains(tp) }.toMap
// remove the partition offset if any
var updatedCheckpoint = partitionToRemove match {
case Some(topicPartion) => currentCheckpoint - topicPartion
case None => currentCheckpoint
}
// update or add the partition offset if any
updatedCheckpoint = partitionToUpdateOrAdd match {
case Some(updatedOffset) => updatedCheckpoint + updatedOffset
case None => updatedCheckpoint
}
checkpoint.write(updatedCheckpoint)
} catch {
case e: KafkaStorageException =>
error(s"Failed to access checkpoint file ${checkpoint.file.getName} in dir ${checkpoint.file.getParentFile.getAbsolutePath}", e)
}
}
}
}
/**
* alter the checkpoint directory for the topicPartition, to remove the data in sourceLogDir, and add the data in destLogDir
*/
def alterCheckpointDir(topicPartition: TopicPartition, sourceLogDir: File, destLogDir: File): Unit = {
inLock(lock) {
try {
checkpoints.get(sourceLogDir).flatMap(_.read().get(topicPartition)) match {
case Some(offset) =>
debug(s"Removing the partition offset data in checkpoint file for '${topicPartition}' " +
s"from ${sourceLogDir.getAbsoluteFile} directory.")
updateCheckpoints(sourceLogDir, partitionToRemove = Option(topicPartition))
debug(s"Adding the partition offset data in checkpoint file for '${topicPartition}' " +
s"to ${destLogDir.getAbsoluteFile} directory.")
updateCheckpoints(destLogDir, partitionToUpdateOrAdd = Option(topicPartition, offset))
case None =>
}
} catch {
case e: KafkaStorageException =>
error(s"Failed to access checkpoint file in dir ${sourceLogDir.getAbsolutePath}", e)
}
val logUncleanablePartitions = uncleanablePartitions.getOrElse(sourceLogDir.toString, mutable.Set[TopicPartition]())
if (logUncleanablePartitions.contains(topicPartition)) {
logUncleanablePartitions.remove(topicPartition)
markPartitionUncleanable(destLogDir.toString, topicPartition)
}
}
}
/**
* Stop cleaning logs in the provided directory
*
* @param dir the absolute path of the log dir
*/
def handleLogDirFailure(dir: String): Unit = {
warn(s"Stopping cleaning logs in dir $dir")
inLock(lock) {
checkpoints = checkpoints.filter { case (k, _) => k.getAbsolutePath != dir }
}
}
/**
* Truncate the checkpointed offset for the given partition if its checkpointed offset is larger than the given offset
*/
def maybeTruncateCheckpoint(dataDir: File, topicPartition: TopicPartition, offset: Long): Unit = {
inLock(lock) {
if (logs.get(topicPartition).config.compact) {
val checkpoint = checkpoints(dataDir)
if (checkpoint != null) {
val existing = checkpoint.read()
if (existing.getOrElse(topicPartition, 0L) > offset)
checkpoint.write(mutable.Map() ++= existing += topicPartition -> offset)
}
}
}
}
/**
* Save out the endOffset and remove the given log from the in-progress set, if not aborted.
*/
def doneCleaning(topicPartition: TopicPartition, dataDir: File, endOffset: Long): Unit = {
inLock(lock) {
inProgress.get(topicPartition) match {
case Some(LogCleaningInProgress) =>
updateCheckpoints(dataDir, partitionToUpdateOrAdd = Option(topicPartition, endOffset))
inProgress.remove(topicPartition)
case Some(LogCleaningAborted) =>
inProgress.put(topicPartition, LogCleaningPaused(1))
pausedCleaningCond.signalAll()
case None =>
throw new IllegalStateException(s"State for partition $topicPartition should exist.")
case s =>
throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.")
}
}
}
def doneDeleting(topicPartitions: Iterable[TopicPartition]): Unit = {
inLock(lock) {
topicPartitions.foreach {
topicPartition =>
inProgress.get(topicPartition) match {
case Some(LogCleaningInProgress) =>
inProgress.remove(topicPartition)
case Some(LogCleaningAborted) =>
inProgress.put(topicPartition, LogCleaningPaused(1))
pausedCleaningCond.signalAll()
case None =>
throw new IllegalStateException(s"State for partition $topicPartition should exist.")
case s =>
throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.")
}
}
}
}
/**
* Returns an immutable set of the uncleanable partitions for a given log directory
* Only used for testing
*/
private[log] def uncleanablePartitions(logDir: String): Set[TopicPartition] = {
var partitions: Set[TopicPartition] = Set()
inLock(lock) { partitions ++= uncleanablePartitions.getOrElse(logDir, partitions) }
partitions
}
def markPartitionUncleanable(logDir: String, partition: TopicPartition): Unit = {
inLock(lock) {
uncleanablePartitions.get(logDir) match {
case Some(partitions) =>
partitions.add(partition)
case None =>
uncleanablePartitions.put(logDir, mutable.Set(partition))
}
}
}
private def isUncleanablePartition(log: UnifiedLog, topicPartition: TopicPartition): Boolean = {
inLock(lock) {
uncleanablePartitions.get(log.parentDir).exists(partitions => partitions.contains(topicPartition))
}
}
}
/**
* Helper class for the range of cleanable dirty offsets of a log and whether to update the checkpoint associated with
* the log
*
* @param firstDirtyOffset the lower (inclusive) offset to begin cleaning from
* @param firstUncleanableDirtyOffset the upper(exclusive) offset to clean to
* @param forceUpdateCheckpoint whether to update the checkpoint associated with this log. if true, checkpoint should be
* reset to firstDirtyOffset
*/
private case class OffsetsToClean(firstDirtyOffset: Long,
firstUncleanableDirtyOffset: Long,
forceUpdateCheckpoint: Boolean = false) {
}
private[log] object LogCleanerManager extends Logging {
def isCompactAndDelete(log: UnifiedLog): Boolean = {
log.config.compact && log.config.delete
}
/**
* get max delay between the time when log is required to be compacted as determined
* by maxCompactionLagMs and the current time.
*/
def maxCompactionDelay(log: UnifiedLog, firstDirtyOffset: Long, now: Long) : Long = {
val dirtyNonActiveSegments = log.nonActiveLogSegmentsFrom(firstDirtyOffset)
val firstBatchTimestamps = log.getFirstBatchTimestampForSegments(dirtyNonActiveSegments).filter(_ > 0)
val earliestDirtySegmentTimestamp = {
if (firstBatchTimestamps.nonEmpty)
firstBatchTimestamps.min
else Long.MaxValue
}
val maxCompactionLagMs = math.max(log.config.maxCompactionLagMs, 0L)
val cleanUntilTime = now - maxCompactionLagMs
if (earliestDirtySegmentTimestamp < cleanUntilTime)
cleanUntilTime - earliestDirtySegmentTimestamp
else
0L
}
/**
* Returns the range of dirty offsets that can be cleaned.
*
* @param log the log
* @param lastCleanOffset the last checkpointed offset
* @param now the current time in milliseconds of the cleaning operation
* @return OffsetsToClean containing offsets for cleanable portion of log and whether the log checkpoint needs updating
*/
def cleanableOffsets(log: UnifiedLog, lastCleanOffset: Option[Long], now: Long): OffsetsToClean = {
// If the log segments are abnormally truncated and hence the checkpointed offset is no longer valid;
// reset to the log starting offset and log the error
val (firstDirtyOffset, forceUpdateCheckpoint) = {
val logStartOffset = log.logStartOffset
val checkpointDirtyOffset = lastCleanOffset.getOrElse(logStartOffset)
if (checkpointDirtyOffset < logStartOffset) {
// Don't bother with the warning if compact and delete are enabled.
if (!isCompactAndDelete(log))
warn(s"Resetting first dirty offset of ${log.name} to log start offset $logStartOffset " +
s"since the checkpointed offset $checkpointDirtyOffset is invalid.")
(logStartOffset, true)
} else if (checkpointDirtyOffset > log.logEndOffset) {
// The dirty offset has gotten ahead of the log end offset. This could happen if there was data
// corruption at the end of the log. We conservatively assume that the full log needs cleaning.
warn(s"The last checkpoint dirty offset for partition ${log.name} is $checkpointDirtyOffset, " +
s"which is larger than the log end offset ${log.logEndOffset}. Resetting to the log start offset $logStartOffset.")
(logStartOffset, true)
} else {
(checkpointDirtyOffset, false)
}
}
val minCompactionLagMs = math.max(log.config.compactionLagMs, 0L)
// Find the first segment that cannot be cleaned. We cannot clean past:
// 1. The active segment
// 2. The last stable offset (including the high watermark)
// 3. Any segments closer to the head of the log than the minimum compaction lag time
val firstUncleanableDirtyOffset: Long = Seq(
// we do not clean beyond the last stable offset
Some(log.lastStableOffset),
// the active segment is always uncleanable
Option(log.activeSegment.baseOffset),
// the first segment whose largest message timestamp is within a minimum time lag from now
if (minCompactionLagMs > 0) {
// dirty log segments
val dirtyNonActiveSegments = log.nonActiveLogSegmentsFrom(firstDirtyOffset)
dirtyNonActiveSegments.find { s =>
val isUncleanable = s.largestTimestamp > now - minCompactionLagMs
debug(s"Checking if log segment may be cleaned: log='${log.name}' segment.baseOffset=${s.baseOffset} " +
s"segment.largestTimestamp=${s.largestTimestamp}; now - compactionLag=${now - minCompactionLagMs}; " +
s"is uncleanable=$isUncleanable")
isUncleanable
}.map(_.baseOffset)
} else None
).flatten.min
debug(s"Finding range of cleanable offsets for log=${log.name}. Last clean offset=$lastCleanOffset " +
s"now=$now => firstDirtyOffset=$firstDirtyOffset firstUncleanableOffset=$firstUncleanableDirtyOffset " +
s"activeSegment.baseOffset=${log.activeSegment.baseOffset}")
OffsetsToClean(firstDirtyOffset, math.max(firstDirtyOffset, firstUncleanableDirtyOffset), forceUpdateCheckpoint)
}
/**
* Given the first dirty offset and an uncleanable offset, calculates the total cleanable bytes for this log
* @return the biggest uncleanable offset and the total amount of cleanable bytes
*/
def calculateCleanableBytes(log: UnifiedLog, firstDirtyOffset: Long, uncleanableOffset: Long): (Long, Long) = {
val firstUncleanableSegment = log.nonActiveLogSegmentsFrom(uncleanableOffset).headOption.getOrElse(log.activeSegment)
val firstUncleanableOffset = firstUncleanableSegment.baseOffset
val cleanableBytes = log.logSegments(math.min(firstDirtyOffset, firstUncleanableOffset), firstUncleanableOffset).map(_.size.toLong).sum
(firstUncleanableOffset, cleanableBytes)
}
}
| lindong28/kafka | core/src/main/scala/kafka/log/LogCleanerManager.scala | Scala | apache-2.0 | 27,975 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
import scala.util.Success
object DelayBySelectorSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val source = Observable.range(0L, sourceCount.toLong)
val o = source.delayOnNextBySelector(x => Observable.now(x).delayExecution(1.second))
val c = sourceCount
Sample(o, c, (c * (c - 1) / 2).toLong, 1.second, 1.second)
}
def observableInError(sourceCount: Int, ex: Throwable) = Some {
val source = createObservableEndingInError(Observable.range(0L, sourceCount.toLong), ex)
val o = source.delayOnNextBySelector(x => Observable.now(x).delayExecution(1.second))
val c = sourceCount
Sample(o, c - 1, (c - 1) * (c - 2) / 2, 1.second, 1.second)
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val source = Observable.range(0, sourceCount.toLong + 1)
val o = source.delayOnNextBySelector { x =>
if (x < sourceCount)
Observable.now(x).delayExecution(1.second)
else
throw ex
}
val c = sourceCount
Sample(o, c, (c * (c - 1) / 2).toLong, 1.second, 1.second)
}
override def cancelableObservables() = {
val o = Observable
.now(1L)
.delayOnNextBySelector(x => Observable.now(x).delayExecution(1.second))
Seq(Sample(o, 0, 0, 0.seconds, 0.seconds))
}
test("should terminate immediately on empty observable") { implicit s =>
val f = Observable
.empty[Int]
.delayOnNextBySelector(n => Observable.empty)
.completedL
.runToFuture
s.tick(1.day)
assertEquals(f.value, Some(Success(())))
}
}
| alexandru/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DelayBySelectorSuite.scala | Scala | apache-2.0 | 2,375 |
End of preview. Expand
in Dataset Viewer.
Dataset Card for "github-code-scala"
This contains just the scala data in github-code-clean. There are 817k samples with a total download size of 1.52GB.
- Downloads last month
- 298