code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package test.testSpark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
case class Emp(id: Int, country: String, gender: String, salary: Int)
object CountByEmployeeDF {
def main(args: Array[String]) {
System.setProperty("hadoop.home.dir", "C:/Users/Thambu/Desktop/arun ws/testSpark");
val conf = new SparkConf().setAppName("CountByEmployeeDF");
conf.setMaster("local[4]");
var sc = new SparkContext(conf);
val sqlContext = new SQLContext(sc);
import sqlContext.implicits._
val emp = sc.textFile("data/input/empdata.txt").map(_.split("\\t")).map(e => Emp(e(0).toInt, e(1), e(2), e(3).toInt)).toDF();
emp.registerTempTable("employee");
val filteredEmployees = sqlContext.sql("SELECT id, country, gender, salary FROM employee WHERE country = 'india' AND salary <= 50 and gender = 'm'")
filteredEmployees.collect()
filteredEmployees.collect().foreach(println);
}
} | arunchan05/sparkLearning | src/main/scala/test/testSpark/CountByEmployeeDF.scala | Scala | apache-2.0 | 1,032 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sparklinedata.druid.client
import com.github.nscala_time.time.Imports._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.sparklinedata.spark.dateTime.dsl.expressions._
import scala.language.postfixOps
class DruidRewritesTest extends BaseTest {
test("basicAgg") {
val df = sqlAndLog("basicAgg",
"select l_returnflag, l_linestatus, " +
"count(*), sum(l_extendedprice) as s, max(ps_supplycost) as m, avg(ps_availqty) as a," +
"count(distinct o_orderkey) " +
"from orderLineItemPartSupplier group by l_returnflag, l_linestatus")
logPlan("basicAgg", df)
// df.show()
}
test("basicAggWithProject") {
val df = sqlAndLog("basicAggWithProject",
"select f, s, " +
"count(*) " +
"from (select l_returnflag f, l_linestatus s from orderLineItemPartSupplier) t group by f, s")
logPlan("basicAggWithProject", df)
// df.show()
}
test("dateFilter") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val df = sqlAndLog("dateFilter",
date"""
select f, s, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate, s_region
from orderLineItemPartSupplier
) t
where $shipDtPredicate
group by f,s
order by f,s
""")
logPlan("dateFilter", df)
// df.show()
}
test("intervalFilter") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val df = sqlAndLog("intervalFilter",
date"""
select f, s, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate, s_region, s_nation, c_nation
from orderLineItemPartSupplier
) t
where $shipDtPredicate and ((s_nation = 'FRANCE' and c_nation = 'GERMANY') or
(c_nation = 'FRANCE' and s_nation = 'GERMANY')
)
group by f,s
order by f,s
""")
logPlan("intervalFilter", df)
// df.show()
}
test("intervalFilter2") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val shipDtPredicate2 = dateTime('l_shipdate) > (dateTime("1995-12-01"))
val df = sqlAndLog("intervalFilter2",
date"""
select f, s, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate, s_region, s_nation, c_nation
from orderLineItemPartSupplier
) t
where $shipDtPredicate and $shipDtPredicate2
group by f,s
order by f,s
""")
logPlan("intervalFilter2", df)
// df.show()
}
test("intervalFilter3") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val shipDtPredicate2 = dateTime('l_shipdate) < (dateTime("1995-12-01"))
val df = sqlAndLog("intervalFilter3",
date"""
select f, s, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate, s_region, s_nation, c_nation
from orderLineItemPartSupplier
) t
where $shipDtPredicate and $shipDtPredicate2
group by f,s
order by f,s
""")
logPlan("intervalFilter3", df)
// df.show()
}
test("intervalFilter4") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val shipDtPredicate2 = dateTime('l_shipdate) > (dateTime("1997-12-02"))
val df = sqlAndLog("intervalFilter4",
date"""
select f, s, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate, s_region, s_nation, c_nation
from orderLineItemPartSupplier
) t
where $shipDtPredicate and $shipDtPredicate2
group by f,s
order by f,s
""")
logPlan("intervalFilter4", df)
// df.show()
}
test("dimFilter2") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val df = sqlAndLog("dimFilter2",
date"""
select f, s, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate,
s_region, s_nation, c_nation, p_type
from orderLineItemPartSupplier
) t
where $shipDtPredicate and ((s_nation = 'FRANCE' and c_nation = 'GERMANY') or
(c_nation = 'FRANCE' and s_nation = 'GERMANY')
) and p_type = 'ECONOMY ANODIZED STEEL'
group by f,s
order by f,s
""")
logPlan("dimFilter2", df)
// df.show()
}
test("dimFilter3") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val df = sqlAndLog("dimFilter3",
date"""
select s_nation, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate,
s_region, s_nation, c_nation, p_type
from orderLineItemPartSupplier
) t
where $shipDtPredicate and ((s_nation = 'FRANCE' and c_nation = 'GERMANY') or
(c_nation = 'FRANCE' and s_nation = 'GERMANY')
)
group by s_nation
order by s_nation
""")
logPlan("dimFilter3", df)
// df.show()
}
test("dimFilter4") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val df = sqlAndLog("dimFilter4",
date"""
select s_nation, count(*) as count_order
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate,
s_region, s_nation, c_nation, p_type
from orderLineItemPartSupplier
) t
where $shipDtPredicate and s_nation >= 'FRANCE'
group by s_nation
order by s_nation
""")
logPlan("dimFilter4", df)
// df.show()
}
test("projFilterAgg") {
val shipDtPredicate = dateTime('l_shipdate) <= (dateTime("1997-12-01") - 90.day)
val shipDtPredicate2 = dateTime('l_shipdate) > (dateTime("1995-12-01"))
val df = sqlAndLog("projFilterAgg",
date"""
select s_nation,
count(*) as count_order,
sum(l_extendedprice) as s,
max(ps_supplycost) as m,
avg(ps_availqty) as a,
count(distinct o_orderkey)
from
(
select l_returnflag as f, l_linestatus as s, l_shipdate,
s_region, s_nation, c_nation, p_type,
l_extendedprice, ps_supplycost, ps_availqty, o_orderkey
from orderLineItemPartSupplier
where p_type = 'ECONOMY ANODIZED STEEL'
) t
where $shipDtPredicate and
$shipDtPredicate2 and ((s_nation = 'FRANCE' and c_nation = 'GERMANY') or
(c_nation = 'FRANCE' and s_nation = 'GERMANY')
)
group by s_nation
order by s_nation
""")
logPlan("projFilterAgg", df)
// df.show()
}
test("ShipDateYearAgg") {
val shipDtYrGroup = dateTime('l_shipdate) year
val df = sqlAndLog("basicAgg",
date"""select l_returnflag, l_linestatus, $shipDtYrGroup, count(*),
sum(l_extendedprice) as s, max(ps_supplycost) as m, avg(ps_availqty) as a,
count(distinct o_orderkey)
from orderLineItemPartSupplier group by l_returnflag, l_linestatus, $shipDtYrGroup""")
logPlan("basicAgg", df)
// df.show()
}
test("OrderDateYearAgg") {
val orderDtYrGroup = dateTime('o_orderdate) year
val df = sqlAndLog("basicAgg",
date"""select l_returnflag, l_linestatus, $orderDtYrGroup, count(*),
sum(l_extendedprice) as s, max(ps_supplycost) as m, avg(ps_availqty) as a,
count(distinct o_orderkey)
from orderLineItemPartSupplier group by l_returnflag, l_linestatus, $orderDtYrGroup""")
logPlan("basicAgg", df)
// df.show()
}
}
| YanjieGao/spark-druid-olap | src/test/scala/org/sparklinedata/druid/client/DruidRewritesTest.scala | Scala | apache-2.0 | 8,632 |
package org.jetbrains.plugins.scala
package debugger.evaluation
import com.intellij.codeInsight.PsiEquivalenceUtil
import com.intellij.debugger.engine.evaluation.expression.ExpressionEvaluator
import com.intellij.debugger.impl.{DebuggerManagerAdapter, DebuggerSession}
import com.intellij.debugger.{DebuggerManagerEx, SourcePosition}
import com.intellij.openapi.components.AbstractProjectComponent
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, PsiFile}
import scala.collection.mutable
/**
* Nikolay.Tropin
* 2014-06-03
*/
class ScalaEvaluatorCache(project: Project) extends AbstractProjectComponent(project) {
private val cachedEvaluators = mutable.HashMap[(PsiFile, Int), mutable.HashMap[PsiElement, ExpressionEvaluator]]()
private val cachedStamp = mutable.HashMap[PsiFile, Long]()
private val listener = new DebuggerManagerAdapter {
override def sessionDetached(session: DebuggerSession) = clear()
}
override def projectOpened() = {
DebuggerManagerEx.getInstanceEx(project).addDebuggerManagerListener(listener)
}
override def projectClosed(): Unit = {
clear()
DebuggerManagerEx.getInstanceEx(project).removeDebuggerManagerListener(listener)
}
def clear() {
cachedEvaluators.values.foreach(_.clear())
cachedEvaluators.clear()
cachedStamp.clear()
}
def get(position: SourcePosition, element: PsiElement): Option[ExpressionEvaluator] = {
if (position == null) return None
val file = position.getFile
val offset = position.getOffset
if (!cachedStamp.get(file).contains(file.getModificationStamp)) {
cachedStamp(file) = file.getModificationStamp
cachedEvaluators.filterKeys(_._1 == file).foreach {
case (pos, map) =>
map.clear()
cachedEvaluators.remove(pos)
}
None
} else {
cachedEvaluators.get((file, offset)) match {
case Some(map) => map.collectFirst {
case (elem, eval) if PsiEquivalenceUtil.areElementsEquivalent(element, elem) => eval
}
case None => None
}
}
}
def add(position: SourcePosition, element: PsiElement, evaluator: ExpressionEvaluator): ExpressionEvaluator = {
if (position != null) {
val file = position.getFile
val offset = position.getOffset
cachedEvaluators.get((file, offset)) match {
case Some(map) => map += (element -> evaluator)
case None =>
cachedEvaluators += ((file, offset) -> mutable.HashMap(element -> evaluator))
}
}
evaluator
}
}
object ScalaEvaluatorCache {
def getInstance(project: Project) = project.getComponent(classOf[ScalaEvaluatorCache])
}
| advancedxy/intellij-scala | src/org/jetbrains/plugins/scala/debugger/evaluation/ScalaEvaluatorCache.scala | Scala | apache-2.0 | 2,673 |
/*
* Copyright 2017 TEAM PER LA TRASFORMAZIONE DIGITALE
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import javax.inject._
import play.api.http.DefaultHttpErrorHandler
import play.api._
import play.api.mvc._
import play.api.mvc.Results._
import play.api.routing.Router
import scala.concurrent.Future
import de.zalando.play.controllers.PlayBodyParsing
/**
* The purpose of this ErrorHandler is to override default play's error reporting with application/json content type.
*/
class ErrorHandler @Inject() (
env: Environment,
config: Configuration,
sourceMapper: OptionalSourceMapper,
router: Provider[Router]
) extends DefaultHttpErrorHandler(env, config, sourceMapper, router) {
private def contentType(request: RequestHeader): String =
request.acceptedTypes.map(_.toString).filterNot(_ == "text/html").headOption.getOrElse("application/json")
override def onProdServerError(request: RequestHeader, exception: UsefulException) = {
implicit val writer = PlayBodyParsing.anyToWritable[Throwable](contentType(request))
Future.successful(InternalServerError(exception))
}
// called when a route is found, but it was not possible to bind the request parameters
override def onBadRequest(request: RequestHeader, error: String): Future[Result] = {
implicit val writer = PlayBodyParsing.anyToWritable[String](contentType(request))
Future.successful(BadRequest("Bad Request: " + error))
}
// 404 - page not found error
override def onNotFound(request: RequestHeader, message: String): Future[Result] = {
implicit val writer = PlayBodyParsing.anyToWritable[String](contentType(request))
Future.successful(NotFound(request.path))
}
} | seralf/daf-semantics | semantic_manager/app/ErrorHandler.scala | Scala | apache-2.0 | 2,213 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
import com.netflix.atlas.core.util.Math
/**
* Normalizes values by truncating the timestamp to the previous step boundary. All values will
* be passed through to the `next` function.
*
* @param step
* Normalized distance between samples produced by this class.
* @param next
* Normalized values will be passed to the this function.
*/
class MaxValueFunction(step: Long, next: ValueFunction) extends ValueFunction {
private val impl = new RollingValueFunction(step, Math.maxNaN, next)
def apply(timestamp: Long, value: Double): Unit = {
impl.apply(timestamp, value)
}
override def close(): Unit = {
impl.close()
}
override def toString: String = {
s"${getClass.getSimpleName}(step=$step)"
}
}
| brharrington/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/norm/MaxValueFunction.scala | Scala | apache-2.0 | 1,386 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import scala.reflect.ClassTag
/**
* Takes a table of Tensors and outputs the max of all of them.
*/
@SerialVersionUID(8594258233874356842L)
class CMaxTable[T: ClassTag](implicit ev: TensorNumeric[T])
extends AbstractModule[Table, Tensor[T], T]{
@transient
private var maxIdx: Tensor[T] = null
@transient
private var mask: Tensor[T] = null
@transient
private var maskResult: Tensor[T] = null
override def updateOutput(input: Table): Tensor[T] = {
if (null == maxIdx) maxIdx = Tensor[T]()
if (null == mask) mask = Tensor[T]()
if (null == maskResult) maskResult = Tensor[T]()
val res1 = input[Tensor[T]](1)
output.resizeAs(res1).copy(res1)
maxIdx.resizeAs(res1).fill(ev.fromType(1))
var i = 2
while (i <= input.length()) {
mask.resize(res1.size())
mask.gt(input(i), output)
maxIdx.maskedFill(mask, ev.fromType(i))
if (ev.isGreater(mask.sum(), ev.fromType(0))) {
output.maskedCopy(mask, input[Tensor[T]](i).maskedSelect(mask, maskResult))
}
i += 1
}
output
}
override def updateGradInput(input: Table, gradOutput: Tensor[T]): Table = {
var i = 1
while (i <= input.length()) {
if (!gradInput.contains(i)) gradInput.insert(i, Tensor[T]())
gradInput[Tensor[T]](i).resizeAs(input(i)).zero()
mask.resize(maxIdx.size())
mask.eq(maxIdx, ev.fromType(i))
if (ev.isGreater(mask.sum(), ev.fromType(0))) {
gradInput[Tensor[T]](i).maskedCopy(mask, gradOutput.maskedSelect(mask, maskResult))
}
i += 1
}
gradInput
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[CMaxTable[T]]
override def equals(other: Any): Boolean = other match {
case that: CMaxTable[T] =>
super.equals(that) &&
(that canEqual this)
case _ => false
}
override def hashCode(): Int = {
def getHashCode(a: Any): Int = if (a == null) 0 else a.hashCode()
val state = Seq(super.hashCode())
state.map(getHashCode).foldLeft(0)((a, b) => 31 * a + b)
}
override def toString() : String = {
"nn.CMaxTable"
}
}
object CMaxTable {
def apply[@specialized(Float, Double) T: ClassTag]()
(implicit ev: TensorNumeric[T]) : CMaxTable[T] = {
new CMaxTable[T]()
}
}
| SeaOfOcean/BigDL | dl/src/main/scala/com/intel/analytics/bigdl/nn/CMaxTable.scala | Scala | apache-2.0 | 3,327 |
/*
* Copyright (c) 2015 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cats.derived
import cats.{ Apply, Semigroup, SemigroupK }
import shapeless._
trait MkSemigroupK[F[_]] extends SemigroupK[F]
object MkSemigroupK extends MkSemigroupK0 {
def apply[F[_]](implicit sgk: MkSemigroupK[F]): MkSemigroupK[F] = sgk
}
private[derived] abstract class MkSemigroupK0 extends MkSemigroupK0b {
implicit val mkSemigroupKHnil: MkSemigroupK[Const[HNil]#λ] =
new MkSemigroupK[Const[HNil]#λ] {
def empty[A] = HNil
def combineK[A](x: HNil, y: HNil) = HNil
}
implicit def mkSemigroupKHcons[F[_]](implicit ihc: IsHCons1[F, SemigroupK, MkSemigroupK])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import ihc._
def combineK[A](x: F[A], y: F[A]) = {
val (hx, tx) = unpack(x)
val (hy, ty) = unpack(y)
pack(fh.combineK(hx, hy), ft.combineK(tx, ty))
}
}
}
private[derived] abstract class MkSemigroupK0b extends MkSemigroupK1 {
implicit def mkSemigroupKHconsFurther[F[_]](implicit ihc: IsHCons1[F, MkSemigroupK, MkSemigroupK])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import ihc._
def combineK[A](x: F[A], y: F[A]) = {
val (hx, tx) = unpack(x)
val (hy, ty) = unpack(y)
pack(fh.combineK(hx, hy), ft.combineK(tx, ty))
}
}
}
private[derived] abstract class MkSemigroupK1 extends MkSemigroupK1b {
implicit def mkSemigroupKComposed[F[_]](implicit split: Split1[F, SemigroupK, Trivial1])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import split._
def combineK[A](x: F[A], y: F[A]) =
pack(fo.combineK(unpack(x), unpack(y)))
}
}
private[derived] abstract class MkSemigroupK1b extends MkSemigroupK2 {
implicit def mkSemigroupKComposedFuther[F[_]](implicit split: Split1[F, MkSemigroupK, Trivial1])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import split._
def combineK[A](x: F[A], y: F[A]) =
pack(fo.combineK(unpack(x), unpack(y)))
}
}
private[derived] abstract class MkSemigroupK2 extends MkSemigroupK2b {
implicit def mkSemigroupKApplied[F[_]](implicit split: Split1[F, Apply, SemigroupK])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import split._
def combineK[A](x: F[A], y: F[A]) =
pack(fo.map2(unpack(x), unpack(y))(fi.combineK(_, _)))
}
}
private[derived] abstract class MkSemigroupK2b extends MkSemigroupK3 {
implicit def mkSemigroupKAppliedFuther[F[_]](implicit split: Split1[F, Apply, MkSemigroupK])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import split._
def combineK[A](x: F[A], y: F[A]) =
pack(fo.map2(unpack(x), unpack(y))(fi.combineK(_, _)))
}
}
private[derived] abstract class MkSemigroupK3 extends MkSemigroupK4 {
implicit def mkSemigroupKGeneric[F[_]](implicit gen: Generic1[F, MkSemigroupK])
: MkSemigroupK[F] = new MkSemigroupK[F] {
import gen._
def combineK[A](x: F[A], y: F[A]) =
from(fr.combineK(to(x), to(y)))
}
}
trait MkSemigroupK4 {
implicit def mkSemigroupKConst[T](implicit sg: Semigroup[T])
: MkSemigroupK[Const[T]#λ] = new MkSemigroupK[Const[T]#λ] {
def combineK[A](x: T, y: T) = sg.combine(x, y)
}
}
| milessabin/kittens | core/src/main/scala/cats/derived/semigroupk.scala | Scala | apache-2.0 | 3,718 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary.api
import com.eevolution.context.dictionary.domain.model.InformationWindowTrl
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 10/11/17.
*/
/**
* Information Window Trl Service
*/
trait InformationWindowTrlService extends api.Service[InformationWindowTrl, Int] {
//Definition
}
| adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/InformationWindowTrlService.scala | Scala | gpl-3.0 | 1,262 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.suiteprop
import org.scalatest._
import prop.TableDrivenPropertyChecks
class PathBeforeAndAfterExamples extends PathSuiteExamples {
case class Counts(
var before0: Int = 0,
var before00: Int = 0,
var before000: Int = 0,
var before01: Int = 0,
var before010: Int = 0,
var middle: Int = 0,
var after010: Int = 0,
var after01: Int = 0,
var after000: Int = 0,
var after00: Int = 0,
var after0: Int = 0
) {
val arr = new Array[Int](0)
}
trait Services {
val counts: Counts
var firstTestCounts: Counts = Counts()
var secondTestCounts: Counts = Counts()
val expectedFirstTestCounts: Counts
val expectedSecondTestCounts: Counts
val expectedCounts: Counts
}
type FixtureServices = Services
class EmptyPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
middle += 1
override def newInstance = new EmptyPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts()
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(middle = 1)
}
class EmptyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
middle += 1
}
after0 += 1
override def newInstance = new EmptyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts()
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 1, middle = 1, after0 = 1)
}
class SiblingEmptyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
}
middle += 1
describe("Another subject") {
before01 += 1
}
after0 += 1
override def newInstance = new SiblingEmptyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts()
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, after0 = 2)
}
class OneTestSiblingEmptyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
}
middle += 1
describe("Another subject") {
before01 += 1
it("first test") { initialInstance.get.firstTestCounts = counts.copy() }
after01 += 1
}
after0 += 1
override def newInstance = new OneTestSiblingEmptyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, after0 = 1)
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, after01 = 1, after0 = 2)
}
class OneTestSiblingEmptyDeeplyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
}
middle += 1
describe("Another subject") {
before01 += 1
describe("when created") {
before010 += 1
it("first test") { initialInstance.get.firstTestCounts = counts.copy() }
after010 += 1
}
after01 += 1
}
after0 += 1
override def newInstance = new OneTestSiblingEmptyDeeplyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, before010 = 1, after0 = 1)
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, before010 = 1, after010 = 1, after01 = 1, after0 = 2)
}
class PathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
it("first test") { firstTestCounts = counts.copy() }
middle += 1
it("second test") { initialInstance.get.secondTestCounts = counts.copy() }
after0 += 1
override def newInstance = new PathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, middle = 2, after0 = 1)
val expectedCounts = Counts(before0 = 2, middle = 2, after0 = 2)
}
class NestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
it("should first test") { firstTestCounts = counts.copy() }
middle += 1
it("should second test") { initialInstance.get.secondTestCounts = counts.copy() }
after00 += 1
}
after0 += 1
override def newInstance = new NestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 2, middle = 2, after00 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 2, middle = 2, after00 = 2, after0 = 2)
}
class SiblingNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
it("should first test") { firstTestCounts = counts.copy() }
after00 += 1
}
middle += 1
describe("Another subject") {
before01 += 1
it("should second test") { initialInstance.get.secondTestCounts = counts.copy() }
after01 += 1
}
after0 += 1
override def newInstance = new SiblingNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 1, after00 = 1, middle = 2, before01 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 1, after01 = 1, middle = 2, before01 = 1, after00 = 1, after0 = 2)
}
class DeeplyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
describe("when created") {
before000 += 1
it("should first test") { firstTestCounts = counts.copy() }
middle += 1
it("should second test") { initialInstance.get.secondTestCounts = counts.copy() }
after000 += 1
}
after00 += 1
}
after0 += 1
override def newInstance = new DeeplyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1, before000 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 2, before000 = 2, middle = 2, after000 = 1, after00 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 2, before000 = 2, middle = 2, after000 = 2, after00 = 2, after0 = 2)
}
class SiblingDeeplyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
describe("when created") {
before000 += 1
it("should first test") { firstTestCounts = counts.copy() }
after000 += 1
}
after00 += 1
}
middle += 1
describe("Another subject") {
before01 += 1
describe("when created") {
before010 += 1
it("should second test") { initialInstance.get.secondTestCounts = counts.copy() }
after010 += 1
}
after01 += 1
}
after0 += 1
override def newInstance = new SiblingDeeplyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1, before000 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 1, before000 = 1, after000 = 1, after00 = 1, middle = 2, before01 = 1, before010 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 1, before000 = 1, after000 = 1, after00 = 1, middle = 2, before01 = 1, before010 = 1, after010 = 1, after01 = 1, after0 = 2)
}
class AsymetricalDeeplyNestedPathFunSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FunSpec with Services {
import counts._
before0 += 1
describe("A subject") {
before00 += 1
describe("when created") {
before000 += 1
it("should first test") { firstTestCounts = counts.copy() }
after000 += 1
}
middle += 1
it("should second test") { initialInstance.get.secondTestCounts = counts.copy() }
after00 += 1
}
after0 += 1
override def newInstance = new AsymetricalDeeplyNestedPathFunSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1, before000 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 2, before000 = 1, after000 = 1, middle = 2, after00 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 2, before000 = 1, after000 = 1, middle = 2, after00 = 2, after0 = 2)
}
class EmptyPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
middle += 1
override def newInstance = new EmptyPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts()
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(middle = 1)
}
class EmptyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
middle += 1
}
after0 += 1
override def newInstance = new EmptyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts()
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 1, middle = 1, after0 = 1)
}
class SiblingEmptyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
}
middle += 1
"Another subject" - {
before01 += 1
}
after0 += 1
override def newInstance = new SiblingEmptyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts()
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, after0 = 2)
}
class OneTestSiblingEmptyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
}
middle += 1
"Another subject" - {
before01 += 1
"first test" in { initialInstance.get.firstTestCounts = counts.copy() }
after01 += 1
}
after0 += 1
override def newInstance = new OneTestSiblingEmptyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, after0 = 1)
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, after01 = 1, after0 = 2)
}
class OneTestSiblingEmptyDeeplyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
}
middle += 1
"Another subject" - {
before01 += 1
"when created" - {
before010 += 1
"first test" in { initialInstance.get.firstTestCounts = counts.copy() }
after010 += 1
}
after01 += 1
}
after0 += 1
override def newInstance = new OneTestSiblingEmptyDeeplyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, before010 = 1, after0 = 1)
val expectedSecondTestCounts = Counts()
val expectedCounts = Counts(before0 = 2, before00 = 1, middle = 2, before01 = 1, before010 = 1, after010 = 1, after01 = 1, after0 = 2)
}
class PathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"first test" in { firstTestCounts = counts.copy() }
middle += 1
"second test" in { initialInstance.get.secondTestCounts = counts.copy() }
after0 += 1
override def newInstance = new PathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, middle = 2, after0 = 1)
val expectedCounts = Counts(before0 = 2, middle = 2, after0 = 2)
}
class NestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
"should first test" in { firstTestCounts = counts.copy() }
middle += 1
"should second test" in { initialInstance.get.secondTestCounts = counts.copy() }
after00 += 1
}
after0 += 1
override def newInstance = new NestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 2, middle = 2, after00 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 2, middle = 2, after00 = 2, after0 = 2)
}
class SiblingNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
"should first test" in { firstTestCounts = counts.copy() }
after00 += 1
}
middle += 1
"Another subject" - {
before01 += 1
"should second test" in { initialInstance.get.secondTestCounts = counts.copy() }
after01 += 1
}
after0 += 1
override def newInstance = new SiblingNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 1, after00 = 1, middle = 2, before01 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 1, after01 = 1, middle = 2, before01 = 1, after00 = 1, after0 = 2)
}
class DeeplyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
"when created" - {
before000 += 1
"should first test" in { firstTestCounts = counts.copy() }
middle += 1
"should second test" in { initialInstance.get.secondTestCounts = counts.copy() }
after000 += 1
}
after00 += 1
}
after0 += 1
override def newInstance = new DeeplyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1, before000 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 2, before000 = 2, middle = 2, after000 = 1, after00 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 2, before000 = 2, middle = 2, after000 = 2, after00 = 2, after0 = 2)
}
class SiblingDeeplyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
"when created" - {
before000 += 1
"should first test" in { firstTestCounts = counts.copy() }
after000 += 1
}
after00 += 1
}
middle += 1
"Another subject" - {
before01 += 1
"when created" - {
before010 += 1
"should second test" in { initialInstance.get.secondTestCounts = counts.copy() }
after010 += 1
}
after01 += 1
}
after0 += 1
override def newInstance = new SiblingDeeplyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1, before000 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 1, before000 = 1, after000 = 1, after00 = 1, middle = 2, before01 = 1, before010 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 1, before000 = 1, after000 = 1, after00 = 1, middle = 2, before01 = 1, before010 = 1, after010 = 1, after01 = 1, after0 = 2)
}
class AsymetricalDeeplyNestedPathFreeSpecExample(val counts: Counts, initialInstance: Option[Services] = None) extends path.FreeSpec with Services {
import counts._
before0 += 1
"A subject" - {
before00 += 1
"when created" - {
before000 += 1
"should first test" in { firstTestCounts = counts.copy() }
after000 += 1
}
middle += 1
"should second test" in { initialInstance.get.secondTestCounts = counts.copy() }
after00 += 1
}
after0 += 1
override def newInstance = new AsymetricalDeeplyNestedPathFreeSpecExample(counts, Some(this))
val expectedFirstTestCounts = Counts(before0 = 1, before00 = 1, before000 = 1)
val expectedSecondTestCounts = Counts(before0 = 2, before00 = 2, before000 = 1, after000 = 1, middle = 2, after00 = 1, after0 = 1)
val expectedCounts = Counts(before0 = 2, before00 = 2, before000 = 1, after000 = 1, middle = 2, after00 = 2, after0 = 2)
}
lazy val emptyPathFunSpec = new EmptyPathFunSpecExample(Counts())
lazy val emptyNestedPathFunSpec = new EmptyNestedPathFunSpecExample(Counts())
lazy val siblingEmptyNestedPathFunSpec = new SiblingEmptyNestedPathFunSpecExample(Counts())
lazy val oneTestSiblingEmptyNestedPathFunSpec = new OneTestSiblingEmptyNestedPathFunSpecExample(Counts())
lazy val oneTestSiblingEmptyDeeplyNestedPathFunSpec = new OneTestSiblingEmptyDeeplyNestedPathFunSpecExample(Counts())
lazy val pathFunSpec = new PathFunSpecExample(Counts())
lazy val nestedPathFunSpec = new NestedPathFunSpecExample(Counts())
lazy val siblingNestedPathFunSpec = new SiblingNestedPathFunSpecExample(Counts())
lazy val deeplyNestedPathFunSpec = new DeeplyNestedPathFunSpecExample(Counts())
lazy val siblingDeeplyNestedPathFunSpec = new SiblingDeeplyNestedPathFunSpecExample(Counts())
lazy val asymetricalDeeplyNestedPathFunSpec = new AsymetricalDeeplyNestedPathFunSpecExample(Counts())
lazy val emptyPathFreeSpec = new EmptyPathFreeSpecExample(Counts())
lazy val emptyNestedPathFreeSpec = new EmptyNestedPathFreeSpecExample(Counts())
lazy val siblingEmptyNestedPathFreeSpec = new SiblingEmptyNestedPathFreeSpecExample(Counts())
lazy val oneTestSiblingEmptyNestedPathFreeSpec = new OneTestSiblingEmptyNestedPathFreeSpecExample(Counts())
lazy val oneTestSiblingEmptyDeeplyNestedPathFreeSpec = new OneTestSiblingEmptyDeeplyNestedPathFreeSpecExample(Counts())
lazy val pathFreeSpec = new PathFreeSpecExample(Counts())
lazy val nestedPathFreeSpec = new NestedPathFreeSpecExample(Counts())
lazy val siblingNestedPathFreeSpec = new SiblingNestedPathFreeSpecExample(Counts())
lazy val deeplyNestedPathFreeSpec = new DeeplyNestedPathFreeSpecExample(Counts())
lazy val siblingDeeplyNestedPathFreeSpec = new SiblingDeeplyNestedPathFreeSpecExample(Counts())
lazy val asymetricalDeeplyNestedPathFreeSpec = new AsymetricalDeeplyNestedPathFreeSpecExample(Counts())
}
| travisbrown/scalatest | src/test/scala/org/scalatest/suiteprop/PathBeforeAndAfterExamples.scala | Scala | apache-2.0 | 20,607 |
package dispatch.oauth.spec
import org.asynchttpclient.oauth.{ConsumerKey, RequestToken}
import org.scalacheck.Gen.listOf
import org.scalacheck.Prop.forAll
import org.scalacheck.{Gen, Properties}
/**
* Tests for oauth / exchange.
*
* @author Erik-Berndt Scheper
* @since 25-01-2017
*
*/
object ExchangeSpecification extends Properties("String") {
private val safeChars = "[A-Za-z0-9%._~()'!*:@,;-]*"
private val urlPattern = s"(.*)[?]oauth_token=($safeChars)[&]oauth_signature=($safeChars)".r
private val validKeyString: Gen[String] = listOf(Gen.alphaNumChar).map(_.mkString)
property("signedAuthorize") = forAll(validKeyString, validKeyString) { (keyValue: String, tokenValue: String) =>
import dispatch._
import dispatch.oauth._
trait DropboxHttp extends SomeHttp {
def http: HttpExecutor = Http.default
}
trait DropboxConsumer extends SomeConsumer {
def consumer: ConsumerKey = new ConsumerKey(keyValue, tokenValue)
}
trait DropboxCallback extends SomeCallback {
def callback: String = "oob"
}
trait DropboxEndpoints extends SomeEndpoints {
def requestToken: String = "https://api.dropbox.com/1/oauth/request_token"
def accessToken: String = "https://www.dropbox.com/1/oauth/authorize"
def authorize: String = "https://api.dropbox.com/1/oauth/access_token"
}
object DropboxExchange extends Exchange
with DropboxHttp with DropboxConsumer with DropboxCallback with DropboxEndpoints
val token = new RequestToken(keyValue, tokenValue)
val url = DropboxExchange.signedAuthorize(token)
val urlMatcher = url match {
case urlPattern(path: String, authToken: String, signature: String) =>
(path, authToken, signature)
case _ =>
("", "", "") // no match
}
val urlPath = urlMatcher._1
val authToken = urlMatcher._2
val authSignature = urlMatcher._3
urlPath.equals(DropboxExchange.authorize) &&
authToken.length >= keyValue.length &&
authToken.matches(safeChars) &&
authSignature.length > 0 &&
authSignature.matches(safeChars)
}
}
| dispatch/reboot | core/src/test/scala/oauth/exchange.scala | Scala | lgpl-3.0 | 2,131 |
package commons.models
import commons.repositories.BaseId
trait IdMetaModel {
type ModelId <: BaseId[_]
val id: Property[Option[ModelId]] = Property("id")
}
| Dasiu/play-framework-test-project | app/commons/models/IdMetaModel.scala | Scala | mit | 165 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.execution.benchmark.SqlBasedBenchmark
/**
* Synthetic benchmark for date and timestamp functions.
* To run this benchmark:
* {{{
* 1. without sbt:
* bin/spark-submit --class <this class> --jars <spark core test jar> <sql core test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result:
* SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/DateTimeBenchmark-results.txt".
* }}}
*/
object DateTimeBenchmark extends SqlBasedBenchmark {
private def doBenchmark(cardinality: Int, exprs: String*): Unit = {
spark.range(cardinality).selectExpr(exprs: _*).write.format("noop").save()
}
private def run(cardinality: Int, name: String, exprs: String*): Unit = {
codegenBenchmark(name, cardinality) {
doBenchmark(cardinality, exprs: _*)
}
}
private def run(cardinality: Int, func: String): Unit = {
codegenBenchmark(s"$func of timestamp", cardinality) {
doBenchmark(cardinality, s"$func(cast(id as timestamp))")
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
val N = 10000000
runBenchmark("Extract components") {
run(N, "cast to timestamp", "cast(id as timestamp)")
run(N, "year")
run(N, "quarter")
run(N, "month")
run(N, "weekofyear")
run(N, "day")
run(N, "dayofyear")
run(N, "dayofmonth")
run(N, "dayofweek")
run(N, "weekday")
run(N, "hour")
run(N, "minute")
run(N, "second")
}
runBenchmark("Current date and time") {
run(N, "current_date", "current_date")
run(N, "current_timestamp", "current_timestamp")
}
runBenchmark("Date arithmetic") {
val dateExpr = "cast(cast(id as timestamp) as date)"
run(N, "cast to date", dateExpr)
run(N, "last_day", s"last_day($dateExpr)")
run(N, "next_day", s"next_day($dateExpr, 'TU')")
run(N, "date_add", s"date_add($dateExpr, 10)")
run(N, "date_sub", s"date_sub($dateExpr, 10)")
run(N, "add_months", s"add_months($dateExpr, 10)")
}
runBenchmark("Formatting dates") {
val dateExpr = "cast(cast(id as timestamp) as date)"
run(N, "format date", s"date_format($dateExpr, 'MMM yyyy')")
}
runBenchmark("Formatting timestamps") {
run(N, "from_unixtime", "from_unixtime(id, 'yyyy-MM-dd HH:mm:ss.SSSSSS')")
}
runBenchmark("Convert timestamps") {
val timestampExpr = "cast(id as timestamp)"
run(N, "from_utc_timestamp", s"from_utc_timestamp($timestampExpr, 'CET')")
run(N, "to_utc_timestamp", s"to_utc_timestamp($timestampExpr, 'CET')")
}
runBenchmark("Intervals") {
val (start, end) = ("cast(id as timestamp)", "cast((id+8640000) as timestamp)")
run(N, "cast interval", start, end)
run(N, "datediff", s"datediff($start, $end)")
run(N, "months_between", s"months_between($start, $end)")
run(1000000, "window", s"window($start, 100, 10, 1)")
}
runBenchmark("Truncation") {
val timestampExpr = "cast(id as timestamp)"
Seq("YEAR", "YYYY", "YY", "MON", "MONTH", "MM", "DAY", "DD", "HOUR", "MINUTE",
"SECOND", "WEEK", "QUARTER").foreach { level =>
run(N, s"date_trunc $level", s"date_trunc('$level', $timestampExpr)")
}
val dateExpr = "cast(cast(id as timestamp) as date)"
Seq("year", "yyyy", "yy", "mon", "month", "mm").foreach { level =>
run(N, s"trunc $level", s"trunc('$level', $dateExpr)")
}
}
runBenchmark("Parsing") {
val n = 1000000
val timestampStrExpr = "concat('2019-01-27 11:02:01.', cast(mod(id, 1000) as string))"
val pattern = "'yyyy-MM-dd HH:mm:ss.SSS'"
run(n, "to timestamp str", timestampStrExpr)
run(n, "to_timestamp", s"to_timestamp($timestampStrExpr, $pattern)")
run(n, "to_unix_timestamp", s"to_unix_timestamp($timestampStrExpr, $pattern)")
val dateStrExpr = "concat('2019-01-', cast(mod(id, 25) as string))"
run(n, "to date str", dateStrExpr)
run(n, "to_date", s"to_date($dateStrExpr, 'yyyy-MM-dd')")
}
}
}
| WindCanDie/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeBenchmark.scala | Scala | apache-2.0 | 4,982 |
package mq.consumer
import mq.{Message, BrokerActor}
/**
* Created by Bruce on 3/1/15.
*/
class ConsolePrint extends BrokerActor {
def process(m: Message) = {
println(m)
}
}
| ElasticQueue/ElasticQueue | src/main/scala/mq/consumer/ConsolePrint.scala | Scala | mit | 190 |
package dzufferey.smtlib
import org.scalatest.funsuite.AnyFunSuite
class DRealSuite extends AnyFunSuite {
val sampleModel1 = """delta-sat with delta = 0.1
beam.dx : [-inf, 1015.625] = [0, 0]
beam.dy : [-inf, 1015.625] = [0, 0]
beam.dz : [ ENTIRE ] = [-162.9375, -153.875]
beam.q_a : [-inf, 1] = [1, 1]
beam.q_i : [-inf, 1] = [0, 0]
beam.q_j : [-inf, 1] = [0, 0]
beam.q_k : [-inf, 1] = [0, 0]
leftmotor.angle : [-inf, 3.141592653589794] = [-3.141592653589792, 3.141592653589794]
leftmotor.angle_dt : [ ENTIRE ] = [1, 1]
leftmotor.q_a : [-inf, 1] = [-1, 1]
leftmotor.q_i : [-inf, 1] = [-1, 1]
leftmotor.q_j : [-inf, 1] = [-1, 1]
leftmotor.q_k : [-inf, 1] = [-1, 1]
leftwheel.dx : [-inf, 1015.625] = [-64, -56]
leftwheel.dx_dt : [ ENTIRE ] = [ -INFTY ]
leftwheel.dy : [-inf, 1015.625] = [-64, -56]
leftwheel.dy_dt : [ ENTIRE ] = [ -INFTY ]
leftwheel.dz : [ ENTIRE ] = [-160, -150.9375]
leftwheel.q_a : [-inf, 1] = [-1, 1]
leftwheel.q_a_dt : [ ENTIRE ] = [ -INFTY ]
leftwheel.q_i : [-inf, 1] = [-1, 1]
leftwheel.q_i_dt : [ ENTIRE ] = [ -INFTY ]
leftwheel.q_j : [-inf, 1] = [-1, 1]
leftwheel.q_j_dt : [ ENTIRE ] = [ -INFTY ]
leftwheel.q_k : [-inf, 1] = [-1, 1]
leftwheel.q_k_dt : [ ENTIRE ] = [ -INFTY ]
rightmotor.angle : [-inf, 3.141592653589794] = [-3.141592653589792, 3.141592653589794]
rightmotor.angle_dt : [ ENTIRE ] = [1, 1]
rightmotor.q_a : [-inf, 1] = [-1, 1]
rightmotor.q_i : [-inf, 1] = [-1, 1]
rightmotor.q_j : [-inf, 1] = [-1, 1]
rightmotor.q_k : [-inf, 1] = [-1, 1]
rightwheel.dx : [-inf, 1015.625] = [-64, -56]
rightwheel.dx_dt : [ ENTIRE ] = [ -INFTY ]
rightwheel.dy : [-inf, 1015.625] = [-64, -56]
rightwheel.dy_dt : [ ENTIRE ] = [ -INFTY ]
rightwheel.dz : [ ENTIRE ] = [-160, -154.21875]
rightwheel.q_a : [-inf, 1] = [-1, 1]
rightwheel.q_a_dt : [ ENTIRE ] = [ -INFTY ]
rightwheel.q_i : [-inf, 1] = [-1, 1]
rightwheel.q_i_dt : [ ENTIRE ] = [ -INFTY ]
rightwheel.q_j : [-inf, 1] = [-1, 1]
rightwheel.q_j_dt : [ ENTIRE ] = [ -INFTY ]
rightwheel.q_k : [-inf, 1] = [-1, 1]
rightwheel.q_k_dt : [ ENTIRE ] = [ -INFTY ]
siminput0_input : [ ENTIRE ] = [1, 1]
siminput1_input : [ ENTIRE ] = [1, 1]
tail.dz : [ ENTIRE ] = [-150, -140.9375]
tail.q_a : [-inf, 1] = [-1, 1]
tail.q_i : [-inf, 1] = [-1, 1]
tail.q_j : [-inf, 1] = [-1, 1]
tail.q_k : [-inf, 1] = [-1, 1]
"""
test("parsing model 1"){
val model = DRealParser.parse(sampleModel1)
//Console.println(model.get.mkString("\\n"))
assert(model.isDefined)
}
test("checking dReal model parsing") {
val x = Variable("x").setType(Real)
val form1 = Eq(x, DRealDecl.cos(x))
val form2 = And(Eq(x, Literal(2.0)), Eq(x, Literal(1.0)))
val solver3 = new DRealHackI(QF_NRA, "dreal", Array("--in", "--model"), None, true, false, None, 1)
solver3.push
solver3.assert(form1)
solver3.checkSat match {
case Sat(model) =>
assert(model.isDefined)
val m = model.get
m(x) match {
case ValD(v) => assert((v - math.cos(v)).abs < 0.1)
case _ => assert(false)
}
case _ =>
assert(false)
}
solver3.pop
solver3.assert(form2)
assert(solver3.checkSat == UnSat)
}
ignore("checking real division") {
val x = Variable("x").setType(Real)
val form1 = Eq(Literal(1.0), Divides(x,x).setType(Real))
val solver = new DRealHack(QF_NRA, "dreal", Array("--in"), None, true, false, None, 1)
solver.assert(form1)
solver.checkSat() match {
case Sat(_) => ()
case _ => assert(false)
}
}
}
| dzufferey/scala-smtlib-interface | src/test/scala/dzufferey/smtlib/DRealSuite.scala | Scala | apache-2.0 | 3,485 |
package es.weso.shex
case class ValueClassDefinition(
defn: Either[(ValueClass,Actions), External]
)
object ValueClassDefinition {
def fromValueClass(vc: ValueClass): ValueClassDefinition = {
ValueClassDefinition(Left((vc,Actions.empty)))
}
def fromValueClassActions(vc: ValueClass, as: Actions): ValueClassDefinition = {
ValueClassDefinition(Left((vc,as)))
}
def external: ValueClassDefinition = {
ValueClassDefinition(Right(External()))
}
}
case class External()
| labra/ShExcala | src/main/scala/es/weso/shex/ValueClassDefinition.scala | Scala | mit | 508 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import play.api.libs.json._
case class IP14PensionSavingsModel(eligibleIP14PensionSavings: Option[String])
object IP14PensionSavingsModel {
implicit val format = Json.format[IP14PensionSavingsModel]
}
| hmrc/pensions-lifetime-allowance-frontend | app/models/IP14PensionSavingsModel.scala | Scala | apache-2.0 | 826 |
object X {
def f(e: Either[Int, X.type]) = e match {
case Left(i) => i
case Right(X) => 0
// scala/bug#5986 spurious exhaustivity warning here
}
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t5968.scala | Scala | apache-2.0 | 164 |
package com.airbnb.common.ml.strategy.trainer
import org.junit.Test
import com.airbnb.common.ml.strategy.data.BinarySampleMockUtil
import com.airbnb.common.ml.strategy.eval.BinaryMetrics
import com.airbnb.common.ml.strategy.params.BaseParam
import com.airbnb.common.ml.util.ScalaLogging
class BinaryRegressionTrainerTest
extends ScalaLogging {
@Test
def evalExample(): Unit = {
val params = BaseParam.getDefault
val examples = BinarySampleMockUtil.getKnownSamples
examples.foreach(e => {
val score = params.score(e)
val result = BinaryRegressionTrainer.evalExample(e, params, 0.5)
logger.info(s"score $score value ${e.observedValue} result $result")
})
}
@Test
def getMetrics(): Unit = {
val params = BaseParam.getDefault
val examples = BinarySampleMockUtil.getKnownSamples
val m = BinaryRegressionTrainer.getMetrics(examples, params)
logger.debug(s"metrics ${BinaryMetrics.metricsHeader}")
logger.debug(s"metrics ${m.toTSVRow}")
}
}
| airbnb/aerosolve | airlearner/airlearner-strategy/src/test/scala/com/airbnb/common/ml/strategy/trainer/BinaryRegressionTrainerTest.scala | Scala | apache-2.0 | 1,010 |
package im.actor.server.values
import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import akka.util.Timeout
import im.actor.config.ActorConfig
final class ValuesExtension(val system: ActorSystem) extends Extension with SyncedSet {
val defaultTimeout = Timeout(ActorConfig.defaultTimeout)
}
object ValuesExtension extends ExtensionId[ValuesExtension] with ExtensionIdProvider {
override def createExtension(system: ExtendedActorSystem): ValuesExtension = new ValuesExtension(system)
override def lookup(): ExtensionId[_ <: Extension] = ValuesExtension
} | EaglesoftZJ/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/values/ValuesExtension.scala | Scala | agpl-3.0 | 610 |
package org.jetbrains.sbt.runner
import com.intellij.execution.configurations.{RunProfile, RunProfileState, RunnerSettings}
import com.intellij.execution.executors.DefaultDebugExecutor
import com.intellij.execution.runners.{ExecutionEnvironment, GenericProgramRunner, ProgramRunner}
/**
* User: Dmitry.Naydanov
* Date: 14.08.18.
*/
class SbtProgramRunner extends GenericProgramRunner[RunnerSettings] with SbtProgramRunnerBase {
override def getRunnerId: String = "SbtProgramRunner"
override def execute(environment: ExecutionEnvironment, callback: ProgramRunner.Callback, state: RunProfileState): Unit = {
state match {
case sbtState: SbtCommandLineState =>
if (sbtState.configuration.useSbtShell) submitCommands(environment, sbtState) else super.execute(environment, callback, state)
case _ =>
}
}
override def canRun(executorId: String, profile: RunProfile): Boolean =
checkRunProfile(profile) && executorId != DefaultDebugExecutor.EXECUTOR_ID
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/runner/SbtProgramRunner.scala | Scala | apache-2.0 | 999 |
/* __ *\\
** ________ ___ / / ___ __ ____ PhantomJS support for Scala.js **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2017, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ https://www.scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.jsenv.phantomjs
private[phantomjs] trait WebsocketManager {
def start(): Unit
def stop(): Unit
def sendMessage(msg: String): Unit
def closeConnection(): Unit
def localPort: Int
def isConnected: Boolean
def isClosed: Boolean
}
| sjrd/scala-js-env-phantomjs | phantomjs-env/src/main/scala/org/scalajs/jsenv/phantomjs/WebsocketManager.scala | Scala | bsd-3-clause | 815 |
import org.apache.http.client.HttpClient
import org.apache.http.impl.client.DefaultHttpClient
import org.apache.http.client.methods.HttpGet
import oauth.signpost.commonshttp.CommonsHttpOAuthConsumer
import org.apache.commons.io.IOUtils
object TwitterPull {
val AccessToken = "access token for your app";
val AccessSecret = "access secret for your app";
val ConsumerKey = "consumer key for your app";
val ConsumerSecret = "consumer secret for your app";
def main(args: Array[String]) {
val consumer = new CommonsHttpOAuthConsumer(ConsumerKey,ConsumerSecret);
consumer.setTokenWithSecret(AccessToken, AccessSecret);
val request = new HttpGet("http://api.twitter.com/1.1/followers/ids.json?cursor=-1&screen_name=josdirksen");
consumer.sign(request);
val client = new DefaultHttpClient();
val response = client.execute(request);
println(response.getStatusLine().getStatusCode());
println(IOUtils.toString(response.getEntity().getContent()));
}
} | riccardomerolla/scala-twitter-client | src/main/scala/TwitterPull.scala | Scala | mit | 1,006 |
/*
* This file is part of P2pCore.
*
* Copyright (C) 2012 Timur Mehrvarz, timur.mehrvarz(at)gmail.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation <http://www.gnu.org/licenses/>, either
* version 3 of the License, or (at your option) any later version.
*/
package timur.p2pCore
object RelayBase {
def main(args:Array[String]): Unit = {
new RelayBase().start
}
}
class RelayBase extends RelayTrait {
override def connectedThread(connectString:String) {
val msg = "data"
log("connectedThread send='"+msg+"'")
send(msg)
log("connectedThread finished")
}
override def receiveMsgHandler(str:String) {
if(str=="data") {
log("receiveMsgHandler 'data'; setting relayQuitFlag")
relayQuitFlag = true
return
}
log("receiveMsgHandler str=["+str+"]")
}
}
| mehrvarz/P2pCore | src/RelayBase.scala | Scala | gpl-3.0 | 951 |
package objsets
import TweetReader._
/**
* A class to represent tweets.
*/
class Tweet(val user: String, val text: String, val retweets: Int) {
override def toString: String =
"User: " + user + "\\n" +
"Text: " + text + " [" + retweets + "]"
}
/**
* This represents a set of objects of type `Tweet` in the form of a binary search
* tree. Every branch in the tree has two children (two `TweetSet`s). There is an
* invariant which always holds: for every branch `b`, all elements in the left
* subtree are smaller than the tweet at `b`. The elements in the right subtree are
* larger.
*
* Note that the above structure requires us to be able to compare two tweets (we
* need to be able to say which of two tweets is larger, or if they are equal). In
* this implementation, the equality / order of tweets is based on the tweet's text
* (see `def incl`). Hence, a `TweetSet` could not contain two tweets with the same
* text from different users.
*
*
* The advantage of representing sets as binary search trees is that the elements
* of the set can be found quickly. If you want to learn more you can take a look
* at the Wikipedia page [1], but this is not necessary in order to solve this
* assignment.
*
* [1] http://en.wikipedia.org/wiki/Binary_search_tree
*/
abstract class TweetSet {
/**
* This method takes a predicate and returns a subset of all the elements
* in the original set for which the predicate is true.
*
* Question: Can we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def filter(p: Tweet => Boolean): TweetSet = filterAcc(p, new Empty)
/**
* This is a helper method for `filter` that propagates the accumulated tweets.
*/
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet
/**
* Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def union(that: TweetSet): TweetSet
/**
* Returns the tweet from this set which has the greatest retweet count.
*
* Calling `mostRetweeted` on an empty set should throw an exception of
* type `java.util.NoSuchElementException`.
*
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def mostRetweeted: Tweet
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in descending order. In other words, the head of the resulting list should
* have the highest retweet count.
*
* Hint: the method `remove` on TweetSet will be very useful.
* Question: Should we implement this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def descendingByRetweet: TweetList
/**
* The following methods are already implemented
*/
/**
* Returns a new `TweetSet` which contains all elements of this set, and the
* the new element `tweet` in case it does not already exist in this set.
*
* If `this.contains(tweet)`, the current set is returned.
*/
def incl(tweet: Tweet): TweetSet
/**
* Returns a new `TweetSet` which excludes `tweet`.
*/
def remove(tweet: Tweet): TweetSet
/**
* Tests if `tweet` exists in this `TweetSet`.
*/
def contains(tweet: Tweet): Boolean
/**
* This method takes a function and applies it to every element in the set.
*/
def foreach(f: Tweet => Unit): Unit
// helper method
def isEmpty: Boolean
}
class Empty extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = acc
/**
* The following methods are already implemented
*/
def contains(tweet: Tweet): Boolean = false
def incl(tweet: Tweet): TweetSet = new NonEmpty(tweet, new Empty, new Empty)
def remove(tweet: Tweet): TweetSet = this
def foreach(f: Tweet => Unit): Unit = ()
// helper method
def isEmpty: Boolean = true
// Overridden methods
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in descending order. In other words, the head of the resulting list should
* have the highest retweet count.
**/
override def descendingByRetweet: TweetList = Nil
/** Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
*/
override def union(that: TweetSet): TweetSet = that
/**
* Returns the tweet from this set which has the greatest retweet count.
*
* Calling `mostRetweeted` on an empty set should throw an exception of
* type `java.util.NoSuchElementException`.
*/
override def mostRetweeted: Tweet = throw new java.util.NoSuchElementException()
}
class NonEmpty(elem: Tweet, left: TweetSet, right: TweetSet) extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = {
if (p(elem)) left.filterAcc(p, right.filterAcc(p, acc.incl(elem)))
else
left.filterAcc(p, right.filterAcc(p, acc))
}
/**
* The following methods are already implemented
*/
def contains(x: Tweet): Boolean =
if (x.text < elem.text) left.contains(x)
else if (elem.text < x.text) right.contains(x)
else true
def incl(x: Tweet): TweetSet = {
if (x.text < elem.text) new NonEmpty(elem, left.incl(x), right)
else if (elem.text < x.text) new NonEmpty(elem, left, right.incl(x))
else this
}
def remove(tw: Tweet): TweetSet =
if (tw.text < elem.text) new NonEmpty(elem, left.remove(tw), right)
else if (elem.text < tw.text) new NonEmpty(elem, left, right.remove(tw))
else left.union(right)
def foreach(f: Tweet => Unit): Unit = {
f(elem)
left.foreach(f)
right.foreach(f)
}
// helper method
def isEmpty: Boolean = false
/**
* Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
*/
override def union(that: TweetSet): TweetSet = {
if (that.isEmpty) this
else that.filterAcc(x => true, this)
}
/**
* Returns the tweet from this set which has the greatest retweet count.
*
*/
override def mostRetweeted: Tweet = {
def moreTweets(x: Tweet, y: Tweet): Tweet = {
if (x.retweets > y.retweets) x else y
}
if (left.isEmpty && right.isEmpty) elem
else if (left.isEmpty && !right.isEmpty) moreTweets(elem, right.mostRetweeted)
else if (right.isEmpty && !left.isEmpty) moreTweets(elem, left.mostRetweeted)
else moreTweets(elem, moreTweets(left.mostRetweeted, right.mostRetweeted))
}
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in descending order. In other words, the head of the resulting list should
* have the highest retweet count.
*
*/
override def descendingByRetweet: TweetList =
new Cons(mostRetweeted, remove(mostRetweeted).descendingByRetweet)
}
trait TweetList {
def head: Tweet
def tail: TweetList
def isEmpty: Boolean
def foreach(f: Tweet => Unit): Unit =
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
object Nil extends TweetList {
def head = throw new java.util.NoSuchElementException("head of EmptyList")
def tail = throw new java.util.NoSuchElementException("tail of EmptyList")
def isEmpty = true
}
class Cons(val head: Tweet, val tail: TweetList) extends TweetList {
def isEmpty = false
}
object GoogleVsApple {
val google = List("android", "Android", "galaxy", "Galaxy", "nexus", "Nexus")
val apple = List("ios", "iOS", "iphone", "iPhone", "ipad", "iPad")
lazy val googleTweets: TweetSet = TweetReader.allTweets.filter { t => google.exists { keyWord => t.text.contains(keyWord) } }
lazy val appleTweets: TweetSet = TweetReader.allTweets.filter { t => apple.exists { keyWord => t.text.contains(keyWord) } }
/**
* A list of all tweets mentioning a keyword from either apple or google,
* sorted by the number of retweets.
*/
lazy val trending: TweetList = appleTweets.union(googleTweets).descendingByRetweet
}
object Main extends App {
// Print the trending tweets
GoogleVsApple.trending foreach println
}
| mathusuthan/scala-fun | objsets/src/main/scala/objsets/TweetSet.scala | Scala | mit | 8,243 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package server
package middleware
import cats.Functor
import cats.data.Kleisli
import org.http4s.headers.`Strict-Transport-Security`
import scala.concurrent.duration._
/** [[Middleware]] to add HTTP Strict Transport Security (HSTS) support adding
* the Strict Transport Security headers
*/
object HSTS {
// Default HSTS policy of waiting for 1 year and include sub domains
private val defaultHSTSPolicy = `Strict-Transport-Security`.unsafeFromDuration(
365.days,
includeSubDomains = true,
preload = false,
)
def apply[F[_]: Functor, A, G[_]](
routes: Kleisli[F, A, Response[G]]
): Kleisli[F, A, Response[G]] =
apply(routes, defaultHSTSPolicy)
def apply[F[_]: Functor, A, G[_]](
http: Kleisli[F, A, Response[G]],
header: `Strict-Transport-Security`,
): Kleisli[F, A, Response[G]] =
Kleisli { req =>
http.map(_.putHeaders(header)).apply(req)
}
def unsafeFromDuration[F[_]: Functor, A, G[_]](
http: Kleisli[F, A, Response[G]],
maxAge: FiniteDuration = 365.days,
includeSubDomains: Boolean = true,
preload: Boolean = false,
): Kleisli[F, A, Response[G]] = {
val header = `Strict-Transport-Security`.unsafeFromDuration(maxAge, includeSubDomains, preload)
apply(http, header)
}
object httpRoutes {
def apply[F[_]: Functor](httpRoutes: HttpRoutes[F]): HttpRoutes[F] =
HSTS.apply(httpRoutes)
def apply[F[_]: Functor](
httpRoutes: HttpRoutes[F],
header: `Strict-Transport-Security`,
): HttpRoutes[F] =
HSTS.apply(httpRoutes, header)
def unsafeFromDuration[F[_]: Functor](
httpRoutes: HttpRoutes[F],
maxAge: FiniteDuration = 365.days,
includeSubDomains: Boolean = true,
preload: Boolean = false,
): HttpRoutes[F] =
HSTS.unsafeFromDuration(httpRoutes, maxAge, includeSubDomains, preload)
}
object httpApp {
def apply[F[_]: Functor](httpApp: HttpApp[F]): HttpApp[F] =
HSTS.apply(httpApp)
def apply[F[_]: Functor](httpApp: HttpApp[F], header: `Strict-Transport-Security`): HttpApp[F] =
HSTS.apply(httpApp, header)
def unsafeFromDuration[F[_]: Functor](
httpApp: HttpApp[F],
maxAge: FiniteDuration = 365.days,
includeSubDomains: Boolean = true,
preload: Boolean = false,
): HttpApp[F] =
HSTS.unsafeFromDuration(httpApp, maxAge, includeSubDomains, preload)
}
}
| http4s/http4s | server/shared/src/main/scala/org/http4s/server/middleware/HSTS.scala | Scala | apache-2.0 | 3,037 |
package io.buoyant.linkerd.protocol.http
import com.twitter.finagle.Path
import com.twitter.finagle.util.LoadService
import io.buoyant.config.Parser
import io.buoyant.linkerd.IdentifierInitializer
import io.buoyant.linkerd.protocol.HttpIdentifierConfig
import org.scalatest.FunSuite
class PathIdentifierConfigTest extends FunSuite {
test("sanity") {
// ensure it doesn't totally blowup
val _ = new PathIdentifierConfig().newIdentifier(Path.empty)
}
test("service registration") {
assert(LoadService[IdentifierInitializer].exists(_.isInstanceOf[PathIdentifierInitializer]))
}
test("parse config") {
val yaml = s"""
|kind: io.l5d.path
|segments: 2
|consume: true
""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(PathIdentifierInitializer)))
val config = mapper.readValue[HttpIdentifierConfig](yaml).asInstanceOf[PathIdentifierConfig]
assert(config.segments == Some(2))
assert(config.consume == Some(true))
}
}
| denverwilliams/linkerd | linkerd/protocol/http/src/test/scala/io/buoyant/linkerd/protocol/http/PathIdentifierConfigTest.scala | Scala | apache-2.0 | 1,037 |
package com.blinkbox.books.search
import java.io.IOException
import scala.concurrent.Future
import scala.concurrent.duration._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.junit.runner.RunWith
import org.mockito.Matchers
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.{ BeforeAndAfter, FunSuite }
import org.scalatest.mock.MockitoSugar
import org.scalatest.junit.JUnitRunner
import spray.http.StatusCodes._
import spray.testkit.ScalatestRouteTest
import com.blinkbox.books.spray.Paging._
import SearchApi._
@RunWith(classOf[JUnitRunner])
class SearchApiTests extends FunSuite with BeforeAndAfter with ScalatestRouteTest with MockitoSugar with SearchApi {
override def service = mockService
override val apiConfig = ApiConfig("localhost", 8080, "service/search", 5.seconds, "*", 100.seconds, 200.seconds)
var mockService: SearchService = _
override implicit def actorRefFactory = system
val isbn = "1234567890123"
val searchResults = BookSearchResult(42, Seq("suggested search"), List(
Book("9781443414005", "Bleak House", List("Charles Dickens")),
Book("9780141920061", "Hard Times", List("Charles Dickens"))))
val suggestions = List(
BookSuggestion("9781443414005", "Bleak House", List("Charles Dickens")),
AuthorSuggestion("1d1f0d88a461e2e143c44c7736460c663c27ef3b", "Charles Dickens"),
BookSuggestion("9780141920061", "Hard Times", List("Charles Dickens")))
val similar = BookSearchResult(101, Seq(), List(
Book("9781443414005", "Block House", List("Charles Smith")),
Book("9780141920061", "Happy Times", List("Charles Smith"))))
before {
mockService = mock[SearchService]
// Default mocked behaviour: return results for any query.
doReturn(Future(searchResults)).when(service).search(anyString, anyInt, anyInt, any[SortOrder])
doReturn(Future(suggestions)).when(service).suggestions(anyString, anyInt, anyInt)
doReturn(Future(similar)).when(service).findSimilar(anyString, anyInt, anyInt)
}
test("simple search for book") {
Get("/search/books?q=some+words") ~> route ~> check {
assert(status == OK &&
contentType.value == "application/vnd.blinkboxbooks.data.v1+json; charset=UTF-8" &&
header("Access-Control-Allow-Origin").get.value == apiConfig.corsOrigin &&
header("Cache-Control").get.value == s"public, max-age=${apiConfig.searchMaxAge.toSeconds}")
// Check performed query, including default parameters.
verify(service).search("some words", 0, 50, SortOrder("RELEVANCE", true))
// Just this once, check the response against the full text of the expected JSON.
val expectedJson =
"""{
"type": "urn:blinkboxbooks:schema:search",
"id": "some words",
"numberOfResults": 42,
"suggestions": ["suggested search"],
"books": [
{
"id": "9781443414005",
"title": "Bleak House",
"authors": [
"Charles Dickens"
]
},
{
"id": "9780141920061",
"title": "Hard Times",
"authors": [
"Charles Dickens"
]
}
],
"links": [
{
"rel": "this",
"href": "service/search/books?count=50&offset=0"
}
]
}"""
// Compare normalised JSON string representations.
assert(parse(body.data.asString).toString == parse(expectedJson).toString, "Got: \n" + body.data.asString)
}
}
test("search for book with all parameters") {
val (offset, count) = (5, 10)
// Set up mock to return search results for expected offset and count only.
doReturn(Future(searchResults)).when(service)
.search(anyString, Matchers.eq(offset), Matchers.eq(count), any[SortOrder])
Get(s"/search/books?q=some+words&count=$count&order=POPULARITY&desc=false&offset=$offset") ~> route ~> check {
assert(status == OK &&
contentType.value == "application/vnd.blinkboxbooks.data.v1+json; charset=UTF-8")
// Check request parameters were picked up correctly.
verify(service).search("some words", offset, count, SortOrder("POPULARITY", false))
// Check that the JSON response is correct
val result = parse(body.data.asString).extract[QuerySearchResult]
assert(result.numberOfResults == searchResults.numberOfResults)
// Check the expected links, ignoring their order in the returned list.
val links = result.links.groupBy(_.rel).mapValues(_.head.href)
assert(links == Map(
"this" -> s"service/search/books?count=$count&offset=$offset",
"next" -> s"service/search/books?count=$count&offset=${offset + count}",
"prev" -> s"service/search/books?count=$count&offset=0"))
}
}
test("returns empty list for search query that matches nothing") {
doReturn(Future(BookSearchResult(0, Seq(), List())))
.when(service).search(anyString, anyInt, anyInt, any[SortOrder])
Get("/search/books?q=unmatched&count=10") ~> route ~> check {
assert(status == OK)
val result = parse(body.data.asString).extract[QuerySearchResult]
assert(result.numberOfResults == 0 &&
result.links.size == 1 &&
result.links(0) == PageLink("this", s"service/search/books?count=10&offset=0"))
}
}
test("search with missing query parameter") {
Get("search/books") ~> route ~> check {
assert(!handled)
}
}
test("search with missing query parameter and one valid parameter") {
Get("search/books?limit=10") ~> route ~> check {
assert(!handled)
}
}
test("error returned when we fail to perform search on back-end") {
for (
(exception, excectedCode) <- Map(
new IOException("Test exception") -> InternalServerError,
new IndexOutOfBoundsException("Test exception") -> InternalServerError,
new IllegalArgumentException("Test exception") -> BadRequest)
) {
// Return failure from mock service.
doReturn(Future(throw exception)).when(service).search(anyString, anyInt, anyInt, any[SortOrder])
Get("/search/books?q=some+query") ~> route ~> check {
assert(status == excectedCode)
}
}
}
test("simple query for suggestions") {
Get("/search/suggestions?q=foo") ~> route ~> check {
assert(status == OK &&
contentType.value == "application/vnd.blinkboxbooks.data.v1+json; charset=UTF-8" &&
header("Access-Control-Allow-Origin").get.value == apiConfig.corsOrigin &&
header("Cache-Control").get.value == s"public, max-age=${apiConfig.autoCompleteMaxAge.toSeconds}")
// TODO!
// val result = parse(body.data.asString).extract[SuggestionsResult]
// assert(result.items == suggestions, "Got: " + body.data.asString)
}
}
test("simple query for suggestions with query parameters") {
val (offset, count) = (5, 15)
Get(s"/search/suggestions?q=foo&offset=$offset&count=$count") ~> route ~> check {
assert(status == OK)
// Check query parameters in request.
verify(service).suggestions("foo", offset, count)
}
}
test("simple query for similar books") {
Get(s"/search/books/$isbn/similar") ~> route ~> check {
assert(status == OK &&
header("Access-Control-Allow-Origin").get.value == apiConfig.corsOrigin &&
header("Cache-Control").get.value == s"public, max-age=${apiConfig.searchMaxAge.toSeconds}")
// Check performed query, including default parameters.
verify(service).findSimilar(isbn, 0, 10)
// Check returned results.
val result = parse(body.data.asString).extract[SimilarBooksSearchResult]
assert(result.numberOfResults == similar.numberOfResults &&
result.books == similar.books &&
result.suggestions == similar.suggestions)
}
}
test("query for similar books with query parameters") {
val (offset, count) = (2, 18)
Get(s"/search/books/$isbn/similar?offset=$offset&count=$count") ~> route ~> check {
assert(status == OK)
// Check performed query, including default parameters.
verify(service).findSimilar(isbn, offset, count)
// Check returned results.
val str = body.data.asString
val result = parse(body.data.asString).extract[SimilarBooksSearchResult]
assert(result.numberOfResults == similar.numberOfResults &&
result.books == similar.books &&
result.suggestions == similar.suggestions)
}
}
test("request for similar books with invalid ISBN") {
for (id <- Seq("123456789012", "12345678901234", "xyz"))
Get(s"/search/books/$id/similar") ~> route ~> check {
assert(!handled)
}
}
test("invalid request for similar books, with unwanted slash at end of URL") {
Get("/search/books/") ~> route ~> check {
assert(!handled)
}
}
test("invalid request for similar books, with unwanted path elements at end of URL") {
Get("/search/books/12345/similar/other") ~> route ~> check {
assert(!handled)
}
}
}
| blinkboxbooks/spray-search-service.scala | src/test/scala/com/blinkbox/books/search/SearchApiTests.scala | Scala | mit | 9,057 |
package breeze.config
import com.thoughtworks.paranamer.AdaptiveParanamer
import java.lang.reflect.Type
import collection.mutable.ArrayBuffer
import java.io.File
import java.{lang=>jl}
import ReflectionUtils._
/**
* Generates a Help message from a case class. If a constructor parameter has a "Help" annotation
* present, it will display that along with basic type information.
* @author dlwh
*/
object GenerateHelp {
/**
* Generates help for the given manifest
*/
def apply[C:Manifest](conf: Configuration = Configuration.empty):String = {
val man = implicitly[Manifest[C]]
def recGen(staticManifest: Manifest[_], prefix: String):Seq[Format] = {
val clss = staticManifest.runtimeClass
val ann = clss.getAnnotation(classOf[Help])
val res = new ArrayBuffer[Format]
if(ann != null) {
res += Break += Group(prefix,clss.getName,ann.text) += Break
}
val dynamicClass: Class[_] = conf.recursiveGetProperty(prefix).map( x => Class.forName(x._1) ) getOrElse clss
if (dynamicClass.getConstructors.isEmpty)
return res
val staticTypeVars: Seq[String] = staticManifest.runtimeClass.getTypeParameters.map(_.toString)
val staticTypeVals: Seq[OptManifest[_]] = staticManifest.typeArguments
val staticTypeMap: Map[String, OptManifest[_]] = (staticTypeVars zip staticTypeVals).toMap withDefaultValue (NoManifest)
val dynamicTypeMap = solveTypes(staticTypeMap, staticManifest.runtimeClass, dynamicClass)
// Handle ctor parameters
val toRecurse = ArrayBuffer[(String,Manifest[_])]()
val ctor = dynamicClass.getConstructors.head
val paramNames = reader.lookupParameterNames(ctor)
val defaults = lookupDefaultValues(dynamicClass, paramNames)
val anns = ctor.getParameterAnnotations
val typedParams = ctor.getGenericParameterTypes.map { mkManifest(dynamicTypeMap, _)}
for( i <- 0 until paramNames.length) {
val myAnns = anns(i)
val tpe = typedParams(i)
val name = paramNames(i)
val default = defaults(i).map(_.toString).getOrElse("")
val ann = myAnns.collectFirst{case h: Help => h}
ann match {
case None if isPrimitive(tpe.runtimeClass) =>
res += Param(wrap(prefix,name), prettyString(tpe),default, "")
case Some(help) =>
res += Param(wrap(prefix,name), prettyString(tpe),default, help.text)
case _ =>
}
if(!isPrimitive(tpe.runtimeClass)) {
toRecurse += (name -> tpe)
}
}
for( (name,tpe) <- toRecurse) {
res ++= recGen(tpe,wrap(prefix,name))
}
res
}
val formats = recGen(man,"")
val paramSplit = formats.foldLeft(0)(_ max _.paramLength)
val minLength = formats.foldLeft(0)(_ max _.minLineLength)
val buf = new StringBuilder()
formats.foldLeft(buf) { (b,s) =>
b ++= s.mkString(paramSplit - s.paramLength, minLength)
b += '\\n'
b
}
buf.toString
}
private trait Format {
def paramLength: Int = 0
def minLineLength: Int = 0
def mkString(splitWidth:Int, lineLength: Int):String
}
private case class Param(name: String, typeString: String, default: String, helpString: String) extends Format {
override def paramLength = name.length + typeString.length + default.length + {if(default.length == 0) 4 else 7}
override def minLineLength: Int = paramLength + helpString.length + 1
def mkString(splitWidth: Int, lineLength: Int) = {
"--" + name + ": " + typeString + {if(default.length == 0) "" else " = " + default} + (" " * splitWidth) + " " + helpString
}
}
private case object Break extends Format {
def mkString(splitWidth:Int, lineLength: Int) = "=" * lineLength.min(10)
}
private case class Group(name: String, className: String, helpString: String) extends Format {
val string = "Parameter Group " + name + " (" + className +")\\n"
def mkString(splitWidth:Int, lineLength: Int) = string +"\\n" + helpString
override def minLineLength = string.length max helpString.length
}
private def prettyString(tpe: Manifest[_]) = tpe match {
case Manifest.Int => "Int"
case Manifest.Float => "Float"
case Manifest.Boolean => "Boolean"
case Manifest.Long => "Long"
case Manifest.Double => "Double"
case Manifest.Char => "Char"
case Manifest.Byte => "Byte"
case c => if(c.runtimeClass == classOf[String]) "String" else if (c.runtimeClass == classOf[File]) "File" else tpe.toString
}
private def wrap(prefix: String, name: String):String = {
if(prefix.isEmpty) name
else prefix + "." + name
}
private val STRING = classOf[String]
private val FILE = classOf[java.io.File]
private def isPrimitive(tpe: Type):Boolean = tpe match {
case jl.Integer.TYPE => true
case jl.Float.TYPE => true
case jl.Boolean.TYPE => true
case jl.Long.TYPE => true
case jl.Double.TYPE => true
case jl.Character.TYPE => true
case jl.Byte.TYPE => true
case STRING => true
case FILE => true
case _ => false
}
@Help(text="Recursion works!")
case class Rec[T](i: T)
case class Params(str: Int, bo: Boolean, @Help(text="woooooo") f: File , rec: Rec[Int])
def main(args: Array[String]) {
println(GenerateHelp[Params]())
}
private val reader = new AdaptiveParanamer()
} | dlwh/breeze-config | src/main/scala/breeze/config/GenerateHelp.scala | Scala | apache-2.0 | 5,377 |
package fpinscala.datastructures
import List._
object TestingTheList {;import org.scalaide.worksheet.runtime.library.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(207); val res$0 =
// What happens when we call foldRight using Nil and Cons themselves...
foldRight(List(1, 2, 3), Nil: List[Int])(Cons(_, _));System.out.println("""res0: fpinscala.datastructures.List[Int] = """ + $show(res$0));$skip(415); val res$1 =
// This just gives us back the original list..!
// One way of thinking about what `foldRight` "does" is it replaces the `Nil` constructor
// of the list with the `z` argument, and it replaces the `Cons` constructor with the
// given function, `f`. If we just supply `Nil` for `z` and `Cons` for `f`, then we get
// back the input list.
// Testing the length function
length(List(1, 2, 3, 4, 5));System.out.println("""res1: Int = """ + $show(res$1));$skip(14); val res$2 =
length(Nil);System.out.println("""res2: Int = """ + $show(res$2));$skip(18); val res$3 =
length(List(1));System.out.println("""res3: Int = """ + $show(res$3));$skip(27); val res$4 =
sum3(List(1, 2, 3, 4));System.out.println("""res4: Int = """ + $show(res$4));$skip(29); val res$5 =
product3(List(1, 2, 3, 4));System.out.println("""res5: Double = """ + $show(res$5));$skip(28); val res$6 =
length2(List(1, 2, 3, 4));System.out.println("""res6: Int = """ + $show(res$6));$skip(31); val res$7 =
reverse(List(1, 2, 3, 4));System.out.println("""res7: fpinscala.datastructures.List[Int] = """ + $show(res$7));$skip(28); val res$8 =
init(List(1, 2, 3, 4));System.out.println("""res8: fpinscala.datastructures.List[Int] = """ + $show(res$8));$skip(57); val res$9 =
foldRightViaFoldLeft(List(1, 2, 3, 4, 5), 0)(_ + _);System.out.println("""res9: Int = """ + $show(res$9));$skip(53); val res$10 =
appendViaFoldLeft(List(1, 2, 3), List(4, 5, 6));System.out.println("""res10: fpinscala.datastructures.List[Int] = """ + $show(res$10));$skip(51); val res$11 =
appendViaFoldRight(List(1, 2, 3), List(4, 5, 6));System.out.println("""res11: fpinscala.datastructures.List[Int] = """ + $show(res$11));$skip(54); val res$12 =
concat(List(List(1, 2), List(3, 4), List(5, 6)));System.out.println("""res12: fpinscala.datastructures.List[Int] = """ + $show(res$12));$skip(40); val res$13 =
addOneToListElements(List(1, 2, 3));System.out.println("""res13: fpinscala.datastructures.List[Int] = """ + $show(res$13));$skip(42); val res$14 =
listDoubleToString(List(1.0, 2.3, 3.0));System.out.println("""res14: fpinscala.datastructures.List[String] = """ + $show(res$14));$skip(28); val res$15 =
map(List(1, 2, 3))(_ + 1);System.out.println("""res15: fpinscala.datastructures.List[Int] = """ + $show(res$15));$skip(29); val res$16 =
map2(List(1, 2, 3))(_ + 1);System.out.println("""res16: fpinscala.datastructures.List[Int] = """ + $show(res$16));$skip(44); val res$17 =
filter(List(1, 2, 3, 4, 5, 6))(_ >= 3);System.out.println("""res17: fpinscala.datastructures.List[Int] = """ + $show(res$17));$skip(42); val res$18 =
filter2(List(1, 2, 3, 4, 5, 6))(_ >= 3);System.out.println("""res18: fpinscala.datastructures.List[Int] = """ + $show(res$18));$skip(47); val res$19 =
hasSubsequence(List(1, 2, 3, 4), List(1, 2));System.out.println("""res19: Boolean = """ + $show(res$19));$skip(47); val res$20 =
hasSubsequence(List(1, 2, 3, 4), List(3, 4));System.out.println("""res20: Boolean = """ + $show(res$20))}
}
| js1972/fpinscala | exercises/.worksheet/src/fpinscala.datastructures.TestingTheList.scala | Scala | mit | 3,500 |
package pt.org.apec.services.users
import spray.routing._
import pt.org.apec.services.users.common.json.JsonProtocol
import pt.org.apec.services.users.dal._
import spray.http.StatusCodes._
import pt.org.apec.services.users.common._
import spray.httpx.PlayJsonSupport
import scala.concurrent.ExecutionContext
class UsersServiceActor(val dal: Dal[_]) extends HttpServiceActor with UsersService {
override val executionContext = context.dispatcher
override def receive = runRoute(routes)
}
trait UsersService extends HttpService with PlayJsonSupport with JsonProtocol with DalComponent {
implicit val executionContext: ExecutionContext
def routes = userRoutes
def userRoutes = path("users") {
path("register") {
(post & entity(as[UserRegistration])) { registration =>
complete {
Created -> dal.registerUser(registration)
}
}
} ~
path(LongNumber) { id =>
path("activate") {
post {
complete {
dal.activateUser(id) map { result =>
if (result) OK else Accepted
}
}
}
}
}
}
} | ragb/apec-users-service | service/src/main/scala/pt/org/apec/services/users/UsersService.scala | Scala | apache-2.0 | 1,147 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import org.apache.kafka.common.protocol.SecurityProtocol
class SaslSslEndToEndAuthorizationTest extends EndToEndAuthorizationTest {
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override val clientPrincipal = "client"
override val kafkaPrincipal = "kafka"
}
| samaitra/kafka | core/src/test/scala/integration/kafka/api/SaslSslEndToEndAuthorizationTest.scala | Scala | apache-2.0 | 1,120 |
package uk.gov.dvla.vehicles.presentation.common.controllers
import com.google.inject.Inject
import play.api.mvc.{Action, Controller}
import play.api.data.Form
import uk.gov.dvla.vehicles.presentation.common.models
import uk.gov.dvla.vehicles.presentation.common.views
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.CookieImplicits.RichForm
import models.EmailModel
class EmailController @Inject()(implicit clientSideSessionFactory: ClientSideSessionFactory) extends Controller {
private[controllers] val form = Form(
EmailModel.Form.Mapping
)
def present = Action { implicit request =>
Ok(views.html.emailView(form.fill()))
}
def submit = Action {
implicit request => {
form.bindFromRequest.fold(
formWithErrors => BadRequest(views.html.emailView(formWithErrors)),
f => Ok(views.html.success(s"success - you entered an email ${f.email}"))
)
}
}
}
| dvla/vehicles-presentation-common | common-test/app/uk/gov/dvla/vehicles/presentation/common/controllers/EmailController.scala | Scala | mit | 1,020 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package interactive
import scala.collection.mutable.ArrayBuffer
import scala.annotation.tailrec
trait ContextTrees { self: Global =>
type Context = analyzer.Context
lazy val NoContext = analyzer.NoContext
type Contexts = ArrayBuffer[ContextTree]
/** A context tree contains contexts that are indexed by positions.
* It satisfies the following properties:
* 1. All context come from compiling the same unit.
* 2. Child contexts have parent contexts in their outer chain.
* 3. The `pos` field of a context is the same as `context.tree.pos`, unless that
* position is transparent. In that case, `pos` equals the position of
* one of the solid descendants of `context.tree`.
* 4. Children of a context have non-overlapping increasing positions.
* 5. No context in the tree has a transparent position.
*/
class ContextTree(val pos: Position, val context: Context, val children: ArrayBuffer[ContextTree]) {
def this(pos: Position, context: Context) = this(pos, context, new ArrayBuffer[ContextTree])
override def toString = "ContextTree("+pos+", "+children+")"
}
/** Returns the most precise context possible for the given `pos`.
*
* It looks for the finest ContextTree containing `pos`, and then look inside
* this ContextTree for a child ContextTree located immediately before `pos`.
* If such a child exists, returns its context, otherwise returns the context of
* the parent ContextTree.
*
* This is required to always return a context which contains the all the imports
* declared up to `pos` (see scala/bug#7280 for a test case).
*
* Can return None if `pos` is before any valid Scala code.
*/
def locateContext(contexts: Contexts, pos: Position): Option[Context] = synchronized {
@tailrec
def locateFinestContextTree(context: ContextTree): ContextTree = {
if (context.pos includes pos) {
locateContextTree(context.children, pos) match {
case Some(x) =>
locateFinestContextTree(x)
case None =>
context
}
} else {
context
}
}
def sanitizeContext(c: Context): Context = {
c.retyping = false
c
}
val tree = locateContextTree(contexts, pos)
tree map locateFinestContextTree map (ct => sanitizeContext(ct.context))
}
/** Returns the ContextTree containing `pos`, or the ContextTree positioned just before `pos`,
* or None if `pos` is located before all ContextTrees.
*/
def locateContextTree(contexts: Contexts, pos: Position): Option[ContextTree] = {
if (contexts.isEmpty) None
else {
// binary search on contexts, loop invar: lo <= hi, recursion metric: `hi - lo`
@tailrec
def loop(lo: Int, hi: Int, previousSibling: Option[ContextTree]): Option[ContextTree] = {
// [scala/bug#8239] enforce loop invariant & ensure recursion metric decreases monotonically on every recursion
if (lo > hi) previousSibling
else if (pos properlyPrecedes contexts(lo).pos)
previousSibling
else if (contexts(hi).pos properlyPrecedes pos)
Some(contexts(hi))
else {
val mid = (lo + hi) / 2
val midpos = contexts(mid).pos
if (midpos includes pos)
Some(contexts(mid))
else if (midpos properlyPrecedes pos)
// recursion metric: (hi - ((lo + hi)/2 + 1)) < (hi - lo)
// since (hi - ((lo + hi)/2 + 1)) - (hi - lo) = lo - ((lo + hi)/2 + 1) < 0
// since 2*lo - lo - hi - 2 = lo - hi - 2 < 0
// since lo < hi + 2
// can violate lo <= hi, hence the lo > hi check at the top [scala/bug#8239]
loop(mid + 1, hi, Some(contexts(mid)))
else if (lo != hi) // avoid looping forever (lo == hi violates the recursion metric) [scala/bug#8239]
// recursion metric: ((lo + hi)/2) - lo < (hi - lo)
// since ((lo + hi)/2) - lo - (hi - lo) = ((lo + hi)/2) - hi < 0
// since 2 * (((lo + hi)/2) - hi) = lo - hi < 0 since lo < hi
loop(lo, mid, previousSibling)
else previousSibling
}
}
loop(0, contexts.length - 1, None)
}
}
/** Insert a context at correct position into a buffer of context trees.
* If the `context` has a transparent position, add it multiple times
* at the positions of all its solid descendant trees.
*/
def addContext(contexts: Contexts, context: Context): Unit = {
val cpos = context.tree.pos
if (cpos.isTransparent) {
val traverser = new ChildSolidDescendantsCollector() {
override def traverseSolidChild(t: Tree): Unit = {
addContext(contexts, context, t.pos)
}
}
traverser.apply(context.tree)
} else
addContext(contexts, context, cpos)
}
/** Insert a context with non-transparent position `cpos`
* at correct position into a buffer of context trees.
*/
def addContext(contexts: Contexts, context: Context, cpos: Position): Unit = synchronized {
try {
if (!cpos.isRange) {}
else if (contexts.isEmpty) contexts += new ContextTree(cpos, context)
else {
val hi = contexts.length - 1
if (contexts(hi).pos precedes cpos)
contexts += new ContextTree(cpos, context)
else if (contexts(hi).pos properlyIncludes cpos) // fast path w/o search
addContext(contexts(hi).children, context, cpos)
else if (cpos precedes contexts(0).pos)
new ContextTree(cpos, context) +=: contexts
else {
def insertAt(idx: Int): Boolean = {
val oldpos = contexts(idx).pos
if (oldpos sameRange cpos) {
contexts(idx) = new ContextTree(cpos, context, contexts(idx).children)
true
} else if (oldpos includes cpos) {
addContext(contexts(idx).children, context, cpos)
true
} else if (cpos includes oldpos) {
val start = contexts.indexWhere(cpos includes _.pos)
val last = contexts.lastIndexWhere(cpos includes _.pos)
contexts(start) = new ContextTree(cpos, context, contexts.slice(start, last + 1))
contexts.remove(start + 1, last - start)
true
} else false
}
def loop(lo: Int, hi: Int): Unit = {
if (hi - lo > 1) {
val mid = (lo + hi) / 2
val midpos = contexts(mid).pos
if (cpos precedes midpos)
loop(lo, mid)
else if (midpos precedes cpos)
loop(mid, hi)
else
addContext(contexts(mid).children, context, cpos)
} else if (!insertAt(lo) && !insertAt(hi)) {
val lopos = contexts(lo).pos
val hipos = contexts(hi).pos
if ((lopos precedes cpos) && (cpos precedes hipos))
contexts.insert(hi, new ContextTree(cpos, context))
else
inform("internal error? skewed positions: "+lopos+" !< "+cpos+" !< "+hipos)
}
}
loop(0, hi)
}
}
} catch {
case ex: Throwable =>
println(ex)
ex.printStackTrace()
println("failure inserting "+cpos+" into "+contexts+"/"+contexts(contexts.length - 1).pos+"/"+
(contexts(contexts.length - 1).pos includes cpos))
throw ex
}
}
}
| scala/scala | src/interactive/scala/tools/nsc/interactive/ContextTrees.scala | Scala | apache-2.0 | 7,784 |
package scalads
package core
import scalads.readers.ObjectReader
import scalads.macroimpls.EntityBuilder
/**
* @author Bryce Anderson
* Created on 6/1/13
*/
trait QueryIterator[+U, E]
extends Iterator[U] { self =>
def nextEntity(): E
def next(): U
override def map[Z](f: (U) => Z): QueryIterator[Z,E] = new QueryIterator[Z,E] {
def hasNext = self.hasNext
def nextEntity() = self.nextEntity()
def next(): Z = f(self.next())
}
}
object QueryIterator {
def apply[A, E](datastore: Datastore[_, E], it: Iterator[E], transformer: Transformer[A, E]) =
new QueryIterator[A with EntityBacker[A, E], E] {
def hasNext: Boolean = it.hasNext
def nextEntity(): E = it.next()
def next(): A with EntityBacker[A, E] =
transformer.deserializer.deserialize(datastore, transformer, nextEntity())
}
}
| bryce-anderson/scalads | macros/src/main/scala/scalads/core/QueryIterator.scala | Scala | apache-2.0 | 864 |
package akka
import akka.actor._
import akka.event.Logging
import akka.routing.RoundRobinPool
//TODO rename to bigsoup
import org.kirhgoff.ap.core._
sealed trait RunnerMessage
case class StartWorldProcessing(world:WorldModel, listener:WorldModelListener, iterations:Int) extends RunnerMessage
case class CalculateNewState(world:WorldModel) extends RunnerMessage
case class ProcessElement(element:Element, environment:Environment) extends RunnerMessage
case class ElementUpdated(newState:Element, created:List[Element], removed:List[Element]) extends RunnerMessage
case class WorldUpdated(elements:List[Element]) extends RunnerMessage
case class InterruptWork() extends RunnerMessage
//TODO updated
/**
* Worker class - actor to calculate elements
*/
class ElementProcessorActor extends Actor {
def receive = {
case ProcessElement(element:Element, environment:Environment) ⇒ {
val strategy:Strategy = element.getStrategy(environment)
strategy.apply(element, environment)
//println("Worker:" + element + "->" + newState)
sender ! ElementUpdated(
strategy.getNewState,
strategy.getCreatedElements,
strategy.getRemovedElements
)
}
}
}
/**
* Master - splits a task to calculate world into separate tasks
* @param nrOfWorkers - how many workers to use
*/
class ElementBatchProcessorActor(nrOfWorkers: Int) extends Actor {
val workerRouter = context.actorOf(Props[ElementProcessorActor].withRouter(RoundRobinPool(nrOfWorkers)), name = "workerRouter")
var numberOfResults:Int = _
// var newElements:mutable.MutableList[Element] = mutable.MutableList()
var operator:ActorRef = null
var worldMerger:WorldModelMerger = null
def receive = {
//Make sure we are in correct state
case CalculateNewState if numberOfResults != 0 => throw new RuntimeException("Incorrect sequence of calls, check the code 0")
case ElementUpdated if numberOfResults == 0 => throw new RuntimeException("Incorrect sequence of calls, check the code 1")
case CalculateNewState(world) => {
//println("CalculateNewState")
operator = sender
worldMerger = world.makeMerger
val elements:List[Element] = world.getElements.filter(!_.isInstanceOf[EmptyElement])
numberOfResults = elements.length
elements.map {
e:Element => workerRouter ! ProcessElement(e, world.getEnvironmentFor(e))
}
}
case ElementUpdated(newState, created, deleted) => {
//println ("Result received:" + newElement)
worldMerger.merge(newState, created, deleted)
numberOfResults -= 1
if (numberOfResults == 0) {
operator ! WorldUpdated(worldMerger.getResults)
}
}
}
}
/**
* Created with a world to run and runs it
*/
class BigSoupOperatorActor(val workers: Int) extends Actor {
val log = Logging(context.system, this)
var iterations:Int = 0
var currentIteration:Int = -1
var world:WorldModel = null
var listener:WorldModelListener = null
var manager = createManager
def createManager: ActorRef = {
context.actorOf(Props(new ElementBatchProcessorActor(workers)), name = "master")
}
def receive = {
case StartWorldProcessing(world:WorldModel, listener:WorldModelListener, iterations:Int) => {
log.info ("received StartWorldProcessing")
if (alreadyRunning) {
log.info("already running, ignoring command")
} else {
manager = createManager
this.world = world
this.listener = listener
this.iterations = iterations
this.currentIteration = 0
listener.worldUpdated(world)
manager ! CalculateNewState(world)
}
}
case WorldUpdated(elements) ⇒ {
log.info (s"received WorldUpdated $currentIteration")
world.setElements(elements)
listener.worldUpdated(world)
currentIteration += 1
if (currentIteration >= iterations) {
iterations = 0
currentIteration = -1
//Stop system
log.info("final iteration, stopping manager")
context.stop(manager)
} else {
sender ! CalculateNewState(world)
}
}
case InterruptWork => {
log.info("received InterruptWork, stopping manager")
//Stop system
context.stop(manager)
}
}
def alreadyRunning: Boolean = {
this.iterations != 0 || this.currentIteration != -1
}
}
object LifeActors {
val workers = 100
val system = ActorSystem(s"BigSoupAkka")
val operator = system.actorOf(
Props(new BigSoupOperatorActor(workers)), //TODO check this
name = "listener"
)
def run (world:WorldModel, listener: WorldModelListener, iterations:Int) {
operator ! StartWorldProcessing(world, listener, iterations)
}
//TODO create sync method
def stop = {
operator
}
}
| kirhgoff/life-server | app/akka/LifeActors.scala | Scala | mit | 4,800 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.duration
/** A Duration represents a signed, fixed-length span of time represented
* as a count of seconds and fractions of seconds at nanosecond
* resolution. It is independent of any calendar and concepts like "day"
* or "month". It is related to Timestamp in that the difference between
* two Timestamp values is a Duration and it can be added or subtracted
* from a Timestamp. Range is approximately +-10,000 years.
*
* # Examples
*
* Example 1: Compute Duration from two Timestamps in pseudo code.
*
* Timestamp start = ...;
* Timestamp end = ...;
* Duration duration = ...;
*
* duration.seconds = end.seconds - start.seconds;
* duration.nanos = end.nanos - start.nanos;
*
* if (duration.seconds < 0 && duration.nanos > 0) {
* duration.seconds += 1;
* duration.nanos -= 1000000000;
* } else if (durations.seconds > 0 && duration.nanos < 0) {
* duration.seconds -= 1;
* duration.nanos += 1000000000;
* }
*
* Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
*
* Timestamp start = ...;
* Duration duration = ...;
* Timestamp end = ...;
*
* end.seconds = start.seconds + duration.seconds;
* end.nanos = start.nanos + duration.nanos;
*
* if (end.nanos < 0) {
* end.seconds -= 1;
* end.nanos += 1000000000;
* } else if (end.nanos >= 1000000000) {
* end.seconds += 1;
* end.nanos -= 1000000000;
* }
*
* Example 3: Compute Duration from datetime.timedelta in Python.
*
* td = datetime.timedelta(days=3, minutes=10)
* duration = Duration()
* duration.FromTimedelta(td)
*
* # JSON Mapping
*
* In JSON format, the Duration type is encoded as a string rather than an
* object, where the string ends in the suffix "s" (indicating seconds) and
* is preceded by the number of seconds, with nanoseconds expressed as
* fractional seconds. For example, 3 seconds with 0 nanoseconds should be
* encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
* be expressed in JSON format as "3.000000001s", and 3 seconds and 1
* microsecond should be expressed in JSON format as "3.000001s".
*
* @param seconds
* Signed seconds of the span of time. Must be from -315,576,000,000
* to +315,576,000,000 inclusive. Note: these bounds are computed from:
* 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
* @param nanos
* Signed fractions of a second at nanosecond resolution of the span
* of time. Durations less than one second are represented with a 0
* `seconds` field and a positive or negative `nanos` field. For durations
* of one second or more, a non-zero value for the `nanos` field must be
* of the same sign as the `seconds` field. Must be from -999,999,999
* to +999,999,999 inclusive.
*/
@SerialVersionUID(0L)
final case class Duration(
seconds: _root_.scala.Long = 0L,
nanos: _root_.scala.Int = 0
) extends scalapb.GeneratedMessage with scalapb.Message[Duration] with scalapb.lenses.Updatable[Duration] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
{
val __value = seconds
if (__value != 0L) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt64Size(1, __value)
}
};
{
val __value = nanos
if (__value != 0) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(2, __value)
}
};
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = seconds
if (__v != 0L) {
_output__.writeInt64(1, __v)
}
};
{
val __v = nanos
if (__v != 0) {
_output__.writeInt32(2, __v)
}
};
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.duration.Duration = {
var __seconds = this.seconds
var __nanos = this.nanos
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__seconds = _input__.readInt64()
case 16 =>
__nanos = _input__.readInt32()
case tag => _input__.skipField(tag)
}
}
com.google.protobuf.duration.Duration(
seconds = __seconds,
nanos = __nanos
)
}
def withSeconds(__v: _root_.scala.Long): Duration = copy(seconds = __v)
def withNanos(__v: _root_.scala.Int): Duration = copy(nanos = __v)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => {
val __t = seconds
if (__t != 0L) __t else null
}
case 2 => {
val __t = nanos
if (__t != 0) __t else null
}
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PLong(seconds)
case 2 => _root_.scalapb.descriptors.PInt(nanos)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.duration.Duration
}
object Duration extends scalapb.GeneratedMessageCompanion[com.google.protobuf.duration.Duration] with scalapb.JavaProtoSupport[com.google.protobuf.duration.Duration, com.google.protobuf.Duration] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.duration.Duration] with scalapb.JavaProtoSupport[com.google.protobuf.duration.Duration, com.google.protobuf.Duration] = this
def toJavaProto(scalaPbSource: com.google.protobuf.duration.Duration): com.google.protobuf.Duration = {
val javaPbOut = com.google.protobuf.Duration.newBuilder
javaPbOut.setSeconds(scalaPbSource.seconds)
javaPbOut.setNanos(scalaPbSource.nanos)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.Duration): com.google.protobuf.duration.Duration = com.google.protobuf.duration.Duration(
seconds = javaPbSource.getSeconds.longValue,
nanos = javaPbSource.getNanos.intValue
)
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.google.protobuf.duration.Duration = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.google.protobuf.duration.Duration(
__fieldsMap.getOrElse(__fields.get(0), 0L).asInstanceOf[_root_.scala.Long],
__fieldsMap.getOrElse(__fields.get(1), 0).asInstanceOf[_root_.scala.Int]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.duration.Duration] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.duration.Duration(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Long]).getOrElse(0L),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).map(_.as[_root_.scala.Int]).getOrElse(0)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = DurationProto.javaDescriptor.getMessageTypes.get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = DurationProto.scalaDescriptor.messages(0)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.duration.Duration(
)
implicit class DurationLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.duration.Duration]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.duration.Duration](_l) {
def seconds: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Long] = field(_.seconds)((c_, f_) => c_.copy(seconds = f_))
def nanos: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.nanos)((c_, f_) => c_.copy(nanos = f_))
}
final val SECONDS_FIELD_NUMBER = 1
final val NANOS_FIELD_NUMBER = 2
def of(
seconds: _root_.scala.Long,
nanos: _root_.scala.Int
): _root_.com.google.protobuf.duration.Duration = _root_.com.google.protobuf.duration.Duration(
seconds,
nanos
)
}
| dotty-staging/ScalaPB | scalapb-runtime/jvm/src/main/scala/com/google/protobuf/duration/Duration.scala | Scala | apache-2.0 | 9,804 |
def f(p: Int*) = {}
val args1: Array[Int] = Array(1, 2)
println(/* offset: 4, applicable: false */ f(args1)) | whorbowicz/intellij-scala | testdata/resolve2/function/repeat/ArrayRaw.scala | Scala | apache-2.0 | 110 |
opaque type T[X] = X
object T {
def f(x: T[Int]): Int = x // OK
def g(x: Int): T[Int] = x // OK
}
| som-snytt/dotty | tests/pos/toplevel-opaque/opaque-id.scala | Scala | apache-2.0 | 102 |
/* Copyright (C) 2008-2010 Univ of Massachusetts Amherst, Computer Science Dept
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://code.google.com/p/factorie/
This software is provided under the terms of the Eclipse Public License 1.0
as published by http://www.opensource.org. For further information,
see the file `LICENSE.txt' included with this distribution. */
package cc.factorie
import scala.reflect.Manifest
import scala.collection.mutable.{HashSet,HashMap}
import scala.util.Random
trait MixtureChoiceVariable extends GeneratedDiscreteVariable with Gate
abstract class MixtureChoice(p:Proportions, value:Int = 0) extends Discrete(p, value) with MixtureChoiceVariable
abstract class MixtureChoiceMixture(ps:Seq[Proportions], choice:MixtureChoiceVariable, value:Int = 0) extends DiscreteMixture(ps, choice, value) with MixtureChoiceVariable
trait MixtureOutcome extends GeneratedVar {
def prFromMixtureComponent(index:Int): Double
}
//class MixtureComponentRef[P<:Parameter,C<:MixtureOutcome](p:P, override val child:C) extends ParameterRef(p, child)
trait DiscreteMixtureVariable extends GeneratedDiscreteVariable with MixtureOutcome {
def choice: MixtureChoiceVariable
def components: Seq[Proportions]
private val proportionsRef: GatedParameterRef[Proportions,DiscreteMixtureVariable] = new GatedParameterRef(components, choice, this)
def proportions = proportionsRef.value
def proportions_=(p2:Proportions)(implicit d:DiffList = null) = { assert(p2 == null || p2.length <= domainSize); proportionsRef.set(p2) }
override def parentRefs = List(proportionsRef)
def prFromMixtureComponent(index:Int): Double = components(index).pr(intValue)
}
class DiscreteMixture(val components:Seq[Proportions], val choice:MixtureChoiceVariable, value:Int = 0) extends DiscreteVariable(value) with DiscreteMixtureVariable
class CategoricalMixture[A<:AnyRef](val components:Seq[Proportions], val choice:MixtureChoiceVariable, value:A) extends CategoricalVariable(value) with GeneratedCategoricalVariable[A] with DiscreteMixtureVariable
/*class DenseDirichletMixture(val components:Seq[Proportions], prec:RealValueParameter, val choice:MixtureChoiceVariable, p:Seq[Double] = Nil)
extends DenseDirichlet(components(choice.intValue), prec, p) with MixtureOutcome {
override protected val meanRef: ParameterRef[Proportions,Dirichlet with MixtureOutcome] = new GatedParameterRef(components, choice, this)
override def mean_=(p2:Proportions)(implicit d:DiffList = null) = throw new Error
def prFromMixtureComponent(index:Int): Double = math.exp(logpr(components(index), precision))
}*/
class MixtureChoiceVariableTemplate extends TemplateWithStatistics1[MixtureChoiceVariable] {
def score(s:Stat) = 0 // s.s1.logpr comes from GeneratedVariableTemplate; gateRefs similarly
//def score(s:Stat) = { val mc = s.s1; mc.gateRefs.reduceLeft((sum,ref) => sum + mc.value.logpr(ref.outcome)) }
}
/*
trait MixtureComponent extends Parameter {
def parent: MixtureComponents[P]
override def addChild(v:GeneratedValue)(implicit d:DiffList): Unit = parent.addChild(v)
override def removeChild(v:GeneratedValue)(implicit d:DiffList): Unit = parent.removeChild(v)
override def children: Iterable[GeneratedValue] = parent.childrenOf(this)
def weightedChildren: Iterable[(MixtureOutcome,Double)]
}
trait MixtureComponents[P<:Parameter] extends Seq[P] with Parameter
class FiniteMixture[P<:Parameter](val components:Seq[P]) extends MixtureComponents[P] {
components.foreach(_.addChild(this))
def length = components.length
def apply(index:Int) = components(index)
def childrenOf(p:P): Iterable[GeneratedValue] = {
val index = components.indexOf(p)
children.filter(_.isInstanceOf[MixtureOutcome]).asInstanceOf[Iterable[MixtureOutcome]].filter(_.choice.intValue == index)
}
}
object FiniteMixture {
def apply[P<:Parameter](n:Int)(constructor: =>P): FiniteMixture[P] = new FiniteMixture[P](for (i <- 1 to n) yield constructor())
}
class DiscreteMixture(val components:FiniteMixture[Proportions], val choice:MixtureChoice, value:Int = 0) extends DiscreteVariable(value) with GeneratedDiscreteVariable with MixtureOutcome {
choice.addChild(this)
def proportions = components(choice.intValue)
def prFromMixtureComponent(index:Int) = components(index).pr(intValue)
}
class MixtureChoice(p:Proportions, value:Int = 0) extends Discrete(p, value) with IntegerValueParameter
trait MixtureOutcome extends GeneratedValue {
def choice: MixtureChoice
def prFromMixtureComponent(index:Int): Double
}
class MixtureChoiceTemplate extends TemplateWithStatistics3s[GeneratedValue,MixtureChoice,Parameter] {
def unroll1(v:GeneratedValue) = Factor(v, v match { case v:MixtureOutcome => v.choice; case _ => null }, v.parents)
def unroll2(c:MixtureChoice) = c.children.map(v => Factor(v, c, v.parents))
def unroll3(p:Parameter) = p.children.map(v => Factor(v, v match { case v:MixtureOutcome => v.choice; case _ => null }, v.parents))
def score(s:Stat) = 0.0 // s.s1.logpr comes from GeneratedVariableTemplate; gateRefs similarly
//def score(s:Stat) = { val mc = s.s1; mc.gateRefs.reduceLeft((sum,ref) => sum + mc.value.logpr(ref.outcome)) }
}
*/
| andrewmilkowski/factorie | src/main/scala/cc/factorie/Mixture.scala | Scala | epl-1.0 | 5,254 |
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.bam.api
import java.util.Random
import com.fulcrumgenomics.FgBioDef._
import com.fulcrumgenomics.commons.util.SimpleCounter
import com.fulcrumgenomics.testing.SamBuilder.{Minus, Plus}
import com.fulcrumgenomics.testing.{SamBuilder, UnitSpec}
import htsjdk.samtools.{SAMFileHeader, SAMRecordCoordinateComparator, SAMRecordQueryNameComparator}
import htsjdk.samtools.SAMFileHeader.{GroupOrder, SortOrder}
import htsjdk.samtools.util.Murmur3
import scala.collection.mutable.ListBuffer
object SamOrderTest {
// Builder to for a set of records to be used in testing sorting
val builder = new SamBuilder(sort=None)
val random = new Random(42)
Range.inclusive(1, 1000).foreach { i =>
builder.addPair(name="q"+random.nextInt(), contig=random.nextInt(23), start1=random.nextInt(1e7.toInt)+1, start2=random.nextInt(1e7.toInt)+1)
}
Range.inclusive(1, 10).foreach { i => builder.addPair(name="q"+random.nextInt(), unmapped1=true, unmapped2=true)}
}
class SamOrderTest extends UnitSpec {
"SamOrder.apply(String)" should "find the appropriate SamOrder" in {
SamOrder.values.foreach { order =>
SamOrder(order.name) shouldBe order
SamOrder(order.name.toLowerCase) shouldBe order
SamOrder(order.name.toUpperCase) shouldBe order
}
}
"SamOrder.apply(SAMFileHeader)" should "find known orders from headers" in {
val header = new SAMFileHeader()
header.setSortOrder(SortOrder.coordinate)
header.setAttribute("GO", null)
header.setAttribute("SS", null)
SamOrder(header) shouldBe Some(SamOrder.Coordinate)
header.setSortOrder(SortOrder.queryname)
header.setAttribute("GO", null)
header.setAttribute("SS", null)
SamOrder(header) shouldBe Some(SamOrder.Queryname)
header.setSortOrder(SortOrder.unsorted)
header.setAttribute("GO", null)
header.setAttribute("SS", null)
SamOrder(header) shouldBe Some(SamOrder.Unsorted)
header.setSortOrder(SortOrder.unsorted)
header.setAttribute("GO", null)
header.setAttribute("SS", "unsorted:random")
SamOrder(header) shouldBe Some(SamOrder.Random)
header.setSortOrder(SortOrder.unsorted)
header.setGroupOrder(GroupOrder.query)
header.setAttribute("SS", "unsorted:random-query")
SamOrder(header) shouldBe Some(SamOrder.RandomQuery)
header.setSortOrder(SortOrder.unsorted)
header.setGroupOrder(GroupOrder.query)
header.setAttribute("GO", GroupOrder.query.name())
header.setAttribute("SS", "unsorted:template-coordinate")
SamOrder(header) shouldBe Some(SamOrder.TemplateCoordinate)
}
"SamOrder.Coordinate" should "sort reads into coordinate order" in {
val f = SamOrder.Coordinate.sortkey
val recs = SamOrderTest.builder.iterator.toSeq.sortBy(f(_))
val comp = new SAMRecordCoordinateComparator()
recs.sliding(2).foreach { case Seq(lhs,rhs) => comp.fileOrderCompare(lhs.asSam, rhs.asSam) <= 0 shouldBe true }
}
"SamOrder.Queryname" should "sort reads into queryname order" in {
val f = SamOrder.Queryname.sortkey
val recs = SamOrderTest.builder.iterator.toSeq.sortBy(f(_))
val comp = new SAMRecordQueryNameComparator()
recs.sliding(2).foreach { case Seq(lhs,rhs) => comp.fileOrderCompare(lhs.asSam, rhs.asSam) <= 0 shouldBe true }
}
"SamOrder.Random" should "randomize the order of the reads" in {
val f = SamOrder.Random.sortkey
val recs = SamOrderTest.builder.iterator.toSeq.sortBy(f(_))
val comp1 = new SAMRecordCoordinateComparator()
val comp2 = new SAMRecordQueryNameComparator()
val counter1 = new SimpleCounter[Int]()
val counter2 = new SimpleCounter[Int]()
recs.sliding(2).foreach { case Seq(r1, r2) =>
counter1.count(comp1.fileOrderCompare(r1.asSam, r2.asSam))
counter2.count(comp2.fileOrderCompare(r1.asSam, r2.asSam))
}
counter1.countOf(0 ) / counter1.total.toDouble <= 0.1 shouldBe true
counter1.countOf(-1) / counter1.total.toDouble <= 0.6 shouldBe true
counter1.countOf(1 ) / counter1.total.toDouble <= 0.6 shouldBe true
counter2.countOf(0 ) / counter2.total.toDouble <= 0.1 shouldBe true
counter2.countOf(-1) / counter2.total.toDouble <= 0.6 shouldBe true
counter2.countOf(1 ) / counter2.total.toDouble <= 0.6 shouldBe true
}
"SamOrder.RandomQuery" should "keep query names together" in {
val f = SamOrder.RandomQuery.sortkey
val recs = SamOrderTest.builder.iterator.toSeq.sortBy(f(_))
val counts = new SimpleCounter[String]
val iter = recs.iterator.bufferBetter
while (iter.hasNext) {
val name = iter.head.name
val remaining = iter.takeWhile(_.name == name).foreach { r => () }
counts.count(name)
}
counts.foreach { case (name, count) => count shouldBe 1}
}
it should "keep querynames together even when there are has collisions" in {
val f = SamOrder.RandomQuery.sortkey
val builder = new SamBuilder()
val name1 = "000000000-CHG2G:1:1102:14353:13008"
val name2 = "000000000-CHG2G:1:2108:16511:13017"
// Show that there is a hash collision
val hasher = new Murmur3(SamOrder.RandomQuery.HashSeed)
hasher.hashUnencodedChars(name1) shouldBe hasher.hashUnencodedChars(name2)
builder.addFrag(name=name1, start=100)
builder.addFrag(name=name1, start=150).foreach(_.supplementary = true)
builder.addFrag(name=name2, start=100)
builder.addFrag(name=name2, start=150).foreach(_.supplementary = true)
val recs = builder.toIndexedSeq.sortBy(f(_))
val counts = new SimpleCounter[String]
val iter = recs.iterator.bufferBetter
while (iter.hasNext) {
val name = iter.head.name
iter.takeWhile(_.name == name).foreach { r => () }
counts.count(name)
}
counts.foreach { case (name, count) => count shouldBe 1 }
}
"SamOrder.TemplateCoordinate" should "sort by molecular identifier then name" in {
val addFuncs: Seq[SamBuilder => Unit] = Seq(
b => b.addPair(name="ab0", start1=200, start2=200, attrs=Map("MI" -> "0/A"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ab1", start1=100, start2=100, attrs=Map("MI" -> "1/A"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ab2", start1=100, start2=100, attrs=Map("MI" -> "1/A"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ab3", start1=100, start2=100, attrs=Map("MI" -> "2/A"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ba0", start1=200, start2=200, strand1=Minus, strand2=Plus, attrs=Map("MI" -> "0/B"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ba1", start1=100, start2=100, strand1=Minus, strand2=Plus, attrs=Map("MI" -> "1/B"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ba2", start1=100, start2=100, strand1=Minus, strand2=Plus, attrs=Map("MI" -> "1/B"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA"),
b => b.addPair(name="ba3", start1=100, start2=100, strand1=Minus, strand2=Plus, attrs=Map("MI" -> "2/B"), bases1="AAAAAAAAAA", bases2="AAAAAAAAAA")
)
def seq(n: Int, str: String): Seq[String] = IndexedSeq.fill[String](n)(str)
Range.inclusive(start=1, end=10).foreach { _ =>
val builder = new SamBuilder(readLength=10)
scala.util.Random.shuffle(addFuncs).foreach { func => func(builder) }
val f = SamOrder.TemplateCoordinate.sortkey
val recs = builder.iterator.toSeq.sortBy(f(_))
recs should have length 16
val moleclarIdentifiers = seq(4, "1/A") ++ seq(4, "1/B") ++ seq(2, "2/A") ++ seq(2, "2/B") ++ seq(2, "0/A") ++ seq(2, "0/B")
recs.map(_.apply[String]("MI")) should contain theSameElementsInOrderAs moleclarIdentifiers
val names = Seq("ab1", "ab2", "ba1", "ba2", "ab3", "ba3", "ab0", "ba0").flatMap { name => seq(2, name)}
recs.map(_.name) should contain theSameElementsInOrderAs names
recs.grouped(2).foreach { pair =>
pair.count(_.firstOfPair) shouldBe 1
pair.count(_.secondOfPair) shouldBe 1
pair.foreach(_.name shouldBe pair.head.name)
}
}
}
it should "sort pairs by the 'lower' 5' position of the pair" in {
val builder = new SamBuilder(readLength=100, sort=Some(SamOrder.Coordinate))
val exp = ListBuffer[SamRecord]()
// Records are added to the builder in the order that we expect them to be sorted, but the builder
// will coordinate sort them for us, so we can re-sort them and test the results
exp ++= builder.addPair("q1", contig=0, start1=100, start2=300)
exp ++= builder.addPair("q2", contig=0, start1=106, start2=300, cigar1="5S95M") // effective=101
exp ++= builder.addPair("q3", contig=0, start1=102, start2=299)
exp ++= builder.addPair("q4", contig=0, start1=300, start2=110, strand1=Minus, strand2=Plus)
exp ++= builder.addPair("q5", contig=0, start1=120, start2=320)
exp ++= builder.addPair("q6", contig=1, start1=1, start2=200)
// Order they are added in except for q4 gets it's mate's flipped because of strand order
val expected = List("q1/1", "q1/2", "q2/1", "q2/2", "q3/1", "q3/2", "q4/2", "q4/1", "q5/1", "q5/2", "q6/1", "q6/2")
val actual = builder.toList.sortBy(r => SamOrder.TemplateCoordinate.sortkey(r)).map(_.id)
actual should contain theSameElementsInOrderAs expected
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/bam/api/SamOrderTest.scala | Scala | mit | 10,440 |
package forcomp
import common._
object Anagrams {
/** A word is simply a `String`. */
type Word = String
/** A sentence is a `List` of words. */
type Sentence = List[Word]
/** `Occurrences` is a `List` of pairs of characters and positive integers saying
* how often the character appears.
* This list is sorted alphabetically w.r.t. to the character in each pair.
* All characters in the occurrence list are lowercase.
*
* Any list of pairs of lowercase characters and their frequency which is not sorted
* is **not** an occurrence list.
*
* Note: If the frequency of some character is zero, then that character should not be
* in the list.
*/
type Occurrences = List[(Char, Int)]
/** The dictionary is simply a sequence of words.
* It is predefined and obtained as a sequence using the utility method `loadDictionary`.
*/
val dictionary: List[Word] = loadDictionary
/** Converts the word into its character occurence list.
*
* Note: the uppercase and lowercase version of the character are treated as the
* same character, and are represented as a lowercase character in the occurrence list.
*/
def wordOccurrences(w: Word): Occurrences = w.toLowerCase().groupBy((c: Char) => c).toList.sortBy(c => c._1) map (s => (s._1, s._2.length))
/** Converts a sentence into its character occurrence list. */
def sentenceOccurrences(s: Sentence): Occurrences = wordOccurrences(s.mkString)
/** The `dictionaryByOccurrences` is a `Map` from different occurrences to a sequence of all
* the words that have that occurrence count.
* This map serves as an easy way to obtain all the anagrams of a word given its occurrence list.
*
* For example, the word "eat" has the following character occurrence list:
*
* `List(('a', 1), ('e', 1), ('t', 1))`
*
* Incidentally, so do the words "ate" and "tea".
*
* This means that the `dictionaryByOccurrences` map will contain an entry:
*
* List(('a', 1), ('e', 1), ('t', 1)) -> Seq("ate", "eat", "tea")
*
*/
lazy val dictionaryByOccurrences: Map[Occurrences, List[Word]] = dictionary.groupBy(d => wordOccurrences(d)).withDefaultValue(List())
/** Returns all the anagrams of a given word. */
def wordAnagrams(word: Word): List[Word] = dictionaryByOccurrences(wordOccurrences(word))
/** Returns the list of all subsets of the occurrence list.
* This includes the occurrence itself, i.e. `List(('k', 1), ('o', 1))`
* is a subset of `List(('k', 1), ('o', 1))`.
* It also include the empty subset `List()`.
*
* Example: the subsets of the occurrence list `List(('a', 2), ('b', 2))` are:
*
* List(
* List(),
* List(('a', 1)),
* List(('a', 2)),
* List(('b', 1)),
* List(('a', 1), ('b', 1)),
* List(('a', 2), ('b', 1)),
* List(('b', 2)),
* List(('a', 1), ('b', 2)),
* List(('a', 2), ('b', 2))
* )
*
* Note that the order of the occurrence list subsets does not matter -- the subsets
* in the example above could have been displayed in some other order.
*/
def combinations(occurrences: Occurrences): List[Occurrences] = occurrences match{
case List() => List(List())
case x :: xs => (for (n <- 0 to x._2; left_subsets <- combinations(xs)) yield (x._1, n) :: left_subsets).toList.map(subsets => subsets filter(_._2 != 0))
}
/** Subtracts occurrence list `y` from occurrence list `x`.
*
* The precondition is that the occurrence list `y` is a subset of
* the occurrence list `x` -- any character appearing in `y` must
* appear in `x`, and its frequency in `y` must be smaller or equal
* than its frequency in `x`.
*
* Note: the resulting value is an occurrence - meaning it is sorted
* and has no zero-entries.
*/
// sorted !!!
def subtract(x: Occurrences, y: Occurrences): Occurrences = ((y.toMap foldLeft x.toMap)(subtract_value)).toList.filter(_._2 != 0).sorted
def subtract_value(orig: Map[Char, Int], sub_term: (Char, Int)): Map[Char, Int] = orig.updated(sub_term._1, orig.apply(sub_term._1) - sub_term._2)
/** Returns a list of all anagram sentences of the given sentence.
*
* An anagram of a sentence is formed by taking the occurrences of all the characters of
* all the words in the sentence, and producing all possible combinations of words with those characters,
* such that the words have to be from the dictionary.
*
* The number of words in the sentence and its anagrams does not have to correspond.
* For example, the sentence `List("I", "love", "you")` is an anagram of the sentence `List("You", "olive")`.
*
* Also, two sentences with the same words but in a different order are considered two different anagrams.
* For example, sentences `List("You", "olive")` and `List("olive", "you")` are different anagrams of
* `List("I", "love", "you")`.
*
* Here is a full example of a sentence `List("Yes", "man")` and its anagrams for our dictionary:
*
* List(
* List(en, as, my),
* List(en, my, as),
* List(man, yes),
* List(men, say),
* List(as, en, my),
* List(as, my, en),
* List(sane, my),
* List(Sean, my),
* List(my, en, as),
* List(my, as, en),
* List(my, sane),
* List(my, Sean),
* List(say, men),
* List(yes, man)
* )
*
* The different sentences do not have to be output in the order shown above - any order is fine as long as
* all the anagrams are there. Every returned word has to exist in the dictionary.
*
* Note: in case that the words of the sentence are in the dictionary, then the sentence is the anagram of itself,
* so it has to be returned in this list.
*
* Note: There is only one anagram of an empty sentence.
*/
def sentenceAnagrams(sentence: Sentence): List[Sentence] = sentenceOccurs(sentenceOccurrences(sentence))
def sentenceOccurs(occurs: Occurrences): List[Sentence] = occurs match {
case List() => List(List())
case x :: xs =>
for (one_occur <- combinations(occurs).filter( _.length != 0);
word <- dictionaryByOccurrences.apply(one_occur);
sentence <- sentenceOccurs(subtract(occurs, one_occur));
if (dictionaryByOccurrences.contains(one_occur)))
yield word :: sentence
}
}
| xupeixiang/scala-course | progfun-forcomp/src/main/scala/forcomp/Anagrams.scala | Scala | apache-2.0 | 6,439 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala
import java.math.{BigDecimal => JBigDecimal}
import java.sql.{Date, Time, Timestamp}
import org.apache.calcite.avatica.util.DateTimeUtils._
import org.apache.flink.api.common.typeinfo.{SqlTimeTypeInfo, TypeInformation}
import org.apache.flink.table.api.{TableException, CurrentRow, CurrentRange, UnboundedRow, UnboundedRange}
import org.apache.flink.table.expressions.ExpressionUtils.{convertArray, toMilliInterval, toMonthInterval, toRowInterval}
import org.apache.flink.table.expressions.TimeIntervalUnit.TimeIntervalUnit
import org.apache.flink.table.expressions._
import org.apache.flink.table.functions.AggregateFunction
import scala.language.implicitConversions
/**
* These are all the operations that can be used to construct an [[Expression]] AST for expression
* operations.
*
* These operations must be kept in sync with the parser in
* [[org.apache.flink.table.expressions.ExpressionParser]].
*/
trait ImplicitExpressionOperations {
private[flink] def expr: Expression
/**
* Enables literals on left side of binary expressions.
*
* e.g. 12.toExpr % 'a
*
* @return expression
*/
def toExpr: Expression = expr
/**
* Boolean AND in three-valued logic.
*/
def && (other: Expression) = And(expr, other)
/**
* Boolean OR in three-valued logic.
*/
def || (other: Expression) = Or(expr, other)
/**
* Greater than.
*/
def > (other: Expression) = GreaterThan(expr, other)
/**
* Greater than or equal.
*/
def >= (other: Expression) = GreaterThanOrEqual(expr, other)
/**
* Less than.
*/
def < (other: Expression) = LessThan(expr, other)
/**
* Less than or equal.
*/
def <= (other: Expression) = LessThanOrEqual(expr, other)
/**
* Equals.
*/
def === (other: Expression) = EqualTo(expr, other)
/**
* Not equal.
*/
def !== (other: Expression) = NotEqualTo(expr, other)
/**
* Whether boolean expression is not true; returns null if boolean is null.
*/
def unary_! = Not(expr)
/**
* Returns negative numeric.
*/
def unary_- = UnaryMinus(expr)
/**
* Returns numeric.
*/
def unary_+ = expr
/**
* Returns true if the given expression is null.
*/
def isNull = IsNull(expr)
/**
* Returns true if the given expression is not null.
*/
def isNotNull = IsNotNull(expr)
/**
* Returns true if given boolean expression is true. False otherwise (for null and false).
*/
def isTrue = IsTrue(expr)
/**
* Returns true if given boolean expression is false. False otherwise (for null and true).
*/
def isFalse = IsFalse(expr)
/**
* Returns true if given boolean expression is not true (for null and false). False otherwise.
*/
def isNotTrue = IsNotTrue(expr)
/**
* Returns true if given boolean expression is not false (for null and true). False otherwise.
*/
def isNotFalse = IsNotFalse(expr)
/**
* Returns left plus right.
*/
def + (other: Expression) = Plus(expr, other)
/**
* Returns left minus right.
*/
def - (other: Expression) = Minus(expr, other)
/**
* Returns left divided by right.
*/
def / (other: Expression) = Div(expr, other)
/**
* Returns left multiplied by right.
*/
def * (other: Expression) = Mul(expr, other)
/**
* Returns the remainder (modulus) of left divided by right.
* The result is negative only if left is negative.
*/
def % (other: Expression) = mod(other)
/**
* Returns the sum of the numeric field across all input values.
* If all values are null, null is returned.
*/
def sum = Sum(expr)
/**
* Returns the sum of the numeric field across all input values.
* If all values are null, 0 is returned.
*/
def sum0 = Sum0(expr)
/**
* Returns the minimum value of field across all input values.
*/
def min = Min(expr)
/**
* Returns the maximum value of field across all input values.
*/
def max = Max(expr)
/**
* Returns the number of input rows for which the field is not null.
*/
def count = Count(expr)
/**
* Returns the average (arithmetic mean) of the numeric field across all input values.
*/
def avg = Avg(expr)
/**
* Returns the population standard deviation of an expression (the square root of varPop()).
*/
def stddevPop = StddevPop(expr)
/**
* Returns the sample standard deviation of an expression (the square root of varSamp()).
*/
def stddevSamp = StddevSamp(expr)
/**
* Returns the population standard variance of an expression.
*/
def varPop = VarPop(expr)
/**
* Returns the sample variance of a given expression.
*/
def varSamp = VarSamp(expr)
/**
* Converts a value to a given type.
*
* e.g. "42".cast(Types.INT) leads to 42.
*
* @return casted expression
*/
def cast(toType: TypeInformation[_]) = Cast(expr, toType)
/**
* Specifies a name for an expression i.e. a field.
*
* @param name name for one field
* @param extraNames additional names if the expression expands to multiple fields
* @return field with an alias
*/
def as(name: Symbol, extraNames: Symbol*) = Alias(expr, name.name, extraNames.map(_.name))
def asc = Asc(expr)
def desc = Desc(expr)
/**
* Returns the start time of a window when applied on a window reference.
*/
def start = WindowStart(expr)
/**
* Returns the end time of a window when applied on a window reference.
*/
def end = WindowEnd(expr)
/**
* Ternary conditional operator that decides which of two other expressions should be evaluated
* based on a evaluated boolean condition.
*
* e.g. (42 > 5).?("A", "B") leads to "A"
*
* @param ifTrue expression to be evaluated if condition holds
* @param ifFalse expression to be evaluated if condition does not hold
*/
def ?(ifTrue: Expression, ifFalse: Expression) = {
If(expr, ifTrue, ifFalse)
}
// scalar functions
/**
* Calculates the remainder of division the given number by another one.
*/
def mod(other: Expression) = Mod(expr, other)
/**
* Calculates the Euler's number raised to the given power.
*/
def exp() = Exp(expr)
/**
* Calculates the base 10 logarithm of given value.
*/
def log10() = Log10(expr)
/**
* Calculates the natural logarithm of given value.
*/
def ln() = Ln(expr)
/**
* Calculates the given number raised to the power of the other value.
*/
def power(other: Expression) = Power(expr, other)
/**
* Calculates the square root of a given value.
*/
def sqrt() = Sqrt(expr)
/**
* Calculates the absolute value of given value.
*/
def abs() = Abs(expr)
/**
* Calculates the largest integer less than or equal to a given number.
*/
def floor() = Floor(expr)
/**
* Calculates the smallest integer greater than or equal to a given number.
*/
def ceil() = Ceil(expr)
/**
* Calculates the sine of a given number.
*/
def sin() = Sin(expr)
/**
* Calculates the cosine of a given number.
*/
def cos() = Cos(expr)
/**
* Calculates the tangent of a given number.
*/
def tan() = Tan(expr)
/**
* Calculates the cotangent of a given number.
*/
def cot() = Cot(expr)
/**
* Calculates the arc sine of a given number.
*/
def asin() = Asin(expr)
/**
* Calculates the arc cosine of a given number.
*/
def acos() = Acos(expr)
/**
* Calculates the arc tangent of a given number.
*/
def atan() = Atan(expr)
/**
* Converts numeric from radians to degrees.
*/
def degrees() = Degrees(expr)
/**
* Converts numeric from degrees to radians.
*/
def radians() = Radians(expr)
/**
* Calculates the signum of a given number.
*/
def sign() = Sign(expr)
/**
* Rounds the given number to integer places right to the decimal point.
*/
def round(places: Expression) = Round(expr, places)
// String operations
/**
* Creates a substring of the given string at given index for a given length.
*
* @param beginIndex first character of the substring (starting at 1, inclusive)
* @param length number of characters of the substring
* @return substring
*/
def substring(beginIndex: Expression, length: Expression) =
Substring(expr, beginIndex, length)
/**
* Creates a substring of the given string beginning at the given index to the end.
*
* @param beginIndex first character of the substring (starting at 1, inclusive)
* @return substring
*/
def substring(beginIndex: Expression) =
new Substring(expr, beginIndex)
/**
* Removes leading and/or trailing characters from the given string.
*
* @param removeLeading if true, remove leading characters (default: true)
* @param removeTrailing if true, remove trailing characters (default: true)
* @param character string containing the character (default: " ")
* @return trimmed string
*/
def trim(
removeLeading: Boolean = true,
removeTrailing: Boolean = true,
character: Expression = TrimConstants.TRIM_DEFAULT_CHAR) = {
if (removeLeading && removeTrailing) {
Trim(TrimMode.BOTH, character, expr)
} else if (removeLeading) {
Trim(TrimMode.LEADING, character, expr)
} else if (removeTrailing) {
Trim(TrimMode.TRAILING, character, expr)
} else {
expr
}
}
/**
* Returns the length of a string.
*/
def charLength() = CharLength(expr)
/**
* Returns all of the characters in a string in upper case using the rules of
* the default locale.
*/
def upperCase() = Upper(expr)
/**
* Returns all of the characters in a string in lower case using the rules of
* the default locale.
*/
def lowerCase() = Lower(expr)
/**
* Converts the initial letter of each word in a string to uppercase.
* Assumes a string containing only [A-Za-z0-9], everything else is treated as whitespace.
*/
def initCap() = InitCap(expr)
/**
* Returns true, if a string matches the specified LIKE pattern.
*
* e.g. "Jo_n%" matches all strings that start with "Jo(arbitrary letter)n"
*/
def like(pattern: Expression) = Like(expr, pattern)
/**
* Returns true, if a string matches the specified SQL regex pattern.
*
* e.g. "A+" matches all strings that consist of at least one A
*/
def similar(pattern: Expression) = Similar(expr, pattern)
/**
* Returns the position of string in an other string starting at 1.
* Returns 0 if string could not be found.
*
* e.g. "a".position("bbbbba") leads to 6
*/
def position(haystack: Expression) = Position(expr, haystack)
/**
* For windowing function to config over window
* e.g.:
* table
* .window(Over partitionBy 'c orderBy 'rowtime preceding 2.rows following CURRENT_ROW as 'w)
* .select('c, 'a, 'a.count over 'w, 'a.sum over 'w)
*/
def over(alias: Expression) = {
expr match {
case _: Aggregation => UnresolvedOverCall(
expr.asInstanceOf[Aggregation],
alias)
case _ => throw new TableException(
"The over method can only using with aggregation expression.")
}
}
/**
* Replaces a substring of string with a string starting at a position (starting at 1).
*
* e.g. "xxxxxtest".overlay("xxxx", 6) leads to "xxxxxxxxx"
*/
def overlay(newString: Expression, starting: Expression) = new Overlay(expr, newString, starting)
/**
* Replaces a substring of string with a string starting at a position (starting at 1).
* The length specifies how many characters should be removed.
*
* e.g. "xxxxxtest".overlay("xxxx", 6, 2) leads to "xxxxxxxxxst"
*/
def overlay(newString: Expression, starting: Expression, length: Expression) =
Overlay(expr, newString, starting, length)
// Temporal operations
/**
* Parses a date string in the form "yy-mm-dd" to a SQL Date.
*/
def toDate = Cast(expr, SqlTimeTypeInfo.DATE)
/**
* Parses a time string in the form "hh:mm:ss" to a SQL Time.
*/
def toTime = Cast(expr, SqlTimeTypeInfo.TIME)
/**
* Parses a timestamp string in the form "yy-mm-dd hh:mm:ss.fff" to a SQL Timestamp.
*/
def toTimestamp = Cast(expr, SqlTimeTypeInfo.TIMESTAMP)
/**
* Extracts parts of a time point or time interval. Returns the part as a long value.
*
* e.g. "2006-06-05".toDate.extract(DAY) leads to 5
*/
def extract(timeIntervalUnit: TimeIntervalUnit) = Extract(timeIntervalUnit, expr)
/**
* Returns the quarter of a year from a SQL date.
*
* e.g. "1994-09-27".toDate.quarter() leads to 3
*/
def quarter() = Quarter(expr)
/**
* Rounds down a time point to the given unit.
*
* e.g. "12:44:31".toDate.floor(MINUTE) leads to 12:44:00
*/
def floor(timeIntervalUnit: TimeIntervalUnit) = TemporalFloor(timeIntervalUnit, expr)
/**
* Rounds up a time point to the given unit.
*
* e.g. "12:44:31".toDate.ceil(MINUTE) leads to 12:45:00
*/
def ceil(timeIntervalUnit: TimeIntervalUnit) = TemporalCeil(timeIntervalUnit, expr)
// Interval types
/**
* Creates an interval of the given number of years.
*
* @return interval of months
*/
def year = toMonthInterval(expr, 12)
/**
* Creates an interval of the given number of years.
*
* @return interval of months
*/
def years = year
/**
* Creates an interval of the given number of months.
*
* @return interval of months
*/
def month = toMonthInterval(expr, 1)
/**
* Creates an interval of the given number of months.
*
* @return interval of months
*/
def months = month
/**
* Creates an interval of the given number of days.
*
* @return interval of milliseconds
*/
def day = toMilliInterval(expr, MILLIS_PER_DAY)
/**
* Creates an interval of the given number of days.
*
* @return interval of milliseconds
*/
def days = day
/**
* Creates an interval of the given number of hours.
*
* @return interval of milliseconds
*/
def hour = toMilliInterval(expr, MILLIS_PER_HOUR)
/**
* Creates an interval of the given number of hours.
*
* @return interval of milliseconds
*/
def hours = hour
/**
* Creates an interval of the given number of minutes.
*
* @return interval of milliseconds
*/
def minute = toMilliInterval(expr, MILLIS_PER_MINUTE)
/**
* Creates an interval of the given number of minutes.
*
* @return interval of milliseconds
*/
def minutes = minute
/**
* Creates an interval of the given number of seconds.
*
* @return interval of milliseconds
*/
def second = toMilliInterval(expr, MILLIS_PER_SECOND)
/**
* Creates an interval of the given number of seconds.
*
* @return interval of milliseconds
*/
def seconds = second
/**
* Creates an interval of the given number of milliseconds.
*
* @return interval of milliseconds
*/
def milli = toMilliInterval(expr, 1)
/**
* Creates an interval of the given number of milliseconds.
*
* @return interval of milliseconds
*/
def millis = milli
// Row interval type
/**
* Creates an interval of rows.
*
* @return interval of rows
*/
def rows = toRowInterval(expr)
// Advanced type helper functions
/**
* Accesses the field of a Flink composite type (such as Tuple, POJO, etc.) by name and
* returns it's value.
*
* @param name name of the field (similar to Flink's field expressions)
* @return value of the field
*/
def get(name: String) = GetCompositeField(expr, name)
/**
* Accesses the field of a Flink composite type (such as Tuple, POJO, etc.) by index and
* returns it's value.
*
* @param index position of the field
* @return value of the field
*/
def get(index: Int) = GetCompositeField(expr, index)
/**
* Converts a Flink composite type (such as Tuple, POJO, etc.) and all of its direct subtypes
* into a flat representation where every subtype is a separate field.
*/
def flatten() = Flattening(expr)
/**
* Accesses the element of an array based on an index (starting at 1).
*
* @param index position of the element (starting at 1)
* @return value of the element
*/
def at(index: Expression) = ArrayElementAt(expr, index)
/**
* Returns the number of elements of an array.
*
* @return number of elements
*/
def cardinality() = ArrayCardinality(expr)
/**
* Returns the sole element of an array with a single element. Returns null if the array is
* empty. Throws an exception if the array has more than one element.
*
* @return the first and only element of an array with a single element
*/
def element() = ArrayElement(expr)
// Schema definition
/**
* Declares a field as the rowtime attribute for indicating, accessing, and working in
* Flink's event time.
*/
def rowtime = RowtimeAttribute(expr)
/**
* Declares a field as the proctime attribute for indicating, accessing, and working in
* Flink's processing time.
*/
def proctime = ProctimeAttribute(expr)
}
/**
* Implicit conversions from Scala Literals to Expression [[Literal]] and from [[Expression]]
* to [[ImplicitExpressionOperations]].
*/
trait ImplicitExpressionConversions {
implicit val UNBOUNDED_ROW = UnboundedRow()
implicit val UNBOUNDED_RANGE = UnboundedRange()
implicit val CURRENT_ROW = CurrentRow()
implicit val CURRENT_RANGE = CurrentRange()
implicit class WithOperations(e: Expression) extends ImplicitExpressionOperations {
def expr = e
}
implicit class UnresolvedFieldExpression(s: Symbol) extends ImplicitExpressionOperations {
def expr = UnresolvedFieldReference(s.name)
}
implicit class LiteralLongExpression(l: Long) extends ImplicitExpressionOperations {
def expr = Literal(l)
}
implicit class LiteralByteExpression(b: Byte) extends ImplicitExpressionOperations {
def expr = Literal(b)
}
implicit class LiteralShortExpression(s: Short) extends ImplicitExpressionOperations {
def expr = Literal(s)
}
implicit class LiteralIntExpression(i: Int) extends ImplicitExpressionOperations {
def expr = Literal(i)
}
implicit class LiteralFloatExpression(f: Float) extends ImplicitExpressionOperations {
def expr = Literal(f)
}
implicit class LiteralDoubleExpression(d: Double) extends ImplicitExpressionOperations {
def expr = Literal(d)
}
implicit class LiteralStringExpression(str: String) extends ImplicitExpressionOperations {
def expr = Literal(str)
}
implicit class LiteralBooleanExpression(bool: Boolean) extends ImplicitExpressionOperations {
def expr = Literal(bool)
}
implicit class LiteralJavaDecimalExpression(javaDecimal: java.math.BigDecimal)
extends ImplicitExpressionOperations {
def expr = Literal(javaDecimal)
}
implicit class LiteralScalaDecimalExpression(scalaDecimal: scala.math.BigDecimal)
extends ImplicitExpressionOperations {
def expr = Literal(scalaDecimal.bigDecimal)
}
implicit class LiteralSqlDateExpression(sqlDate: Date) extends ImplicitExpressionOperations {
def expr = Literal(sqlDate)
}
implicit class LiteralSqlTimeExpression(sqlTime: Time) extends ImplicitExpressionOperations {
def expr = Literal(sqlTime)
}
implicit class LiteralSqlTimestampExpression(sqlTimestamp: Timestamp)
extends ImplicitExpressionOperations {
def expr = Literal(sqlTimestamp)
}
implicit def symbol2FieldExpression(sym: Symbol): Expression = UnresolvedFieldReference(sym.name)
implicit def byte2Literal(b: Byte): Expression = Literal(b)
implicit def short2Literal(s: Short): Expression = Literal(s)
implicit def int2Literal(i: Int): Expression = Literal(i)
implicit def long2Literal(l: Long): Expression = Literal(l)
implicit def double2Literal(d: Double): Expression = Literal(d)
implicit def float2Literal(d: Float): Expression = Literal(d)
implicit def string2Literal(str: String): Expression = Literal(str)
implicit def boolean2Literal(bool: Boolean): Expression = Literal(bool)
implicit def javaDec2Literal(javaDec: JBigDecimal): Expression = Literal(javaDec)
implicit def scalaDec2Literal(scalaDec: BigDecimal): Expression =
Literal(scalaDec.bigDecimal)
implicit def sqlDate2Literal(sqlDate: Date): Expression = Literal(sqlDate)
implicit def sqlTime2Literal(sqlTime: Time): Expression = Literal(sqlTime)
implicit def sqlTimestamp2Literal(sqlTimestamp: Timestamp): Expression =
Literal(sqlTimestamp)
implicit def array2ArrayConstructor(array: Array[_]): Expression = convertArray(array)
implicit def userDefinedAggFunctionConstructor[T: TypeInformation, ACC]
(udagg: AggregateFunction[T, ACC]): UDAGGExpression[T, ACC] = UDAGGExpression(udagg)
}
// ------------------------------------------------------------------------------------------------
// Expressions with no parameters
// ------------------------------------------------------------------------------------------------
// we disable the object checker here as it checks for capital letters of objects
// but we want that objects look like functions in certain cases e.g. array(1, 2, 3)
// scalastyle:off object.name
/**
* Returns the current SQL date in UTC time zone.
*/
object currentDate {
/**
* Returns the current SQL date in UTC time zone.
*/
def apply(): Expression = {
CurrentDate()
}
}
/**
* Returns the current SQL time in UTC time zone.
*/
object currentTime {
/**
* Returns the current SQL time in UTC time zone.
*/
def apply(): Expression = {
CurrentTime()
}
}
/**
* Returns the current SQL timestamp in UTC time zone.
*/
object currentTimestamp {
/**
* Returns the current SQL timestamp in UTC time zone.
*/
def apply(): Expression = {
CurrentTimestamp()
}
}
/**
* Returns the current SQL time in local time zone.
*/
object localTime {
/**
* Returns the current SQL time in local time zone.
*/
def apply(): Expression = {
LocalTime()
}
}
/**
* Returns the current SQL timestamp in local time zone.
*/
object localTimestamp {
/**
* Returns the current SQL timestamp in local time zone.
*/
def apply(): Expression = {
LocalTimestamp()
}
}
/**
* Determines whether two anchored time intervals overlap. Time point and temporal are
* transformed into a range defined by two time points (start, end). The function
* evaluates <code>leftEnd >= rightStart && rightEnd >= leftStart</code>.
*
* It evaluates: leftEnd >= rightStart && rightEnd >= leftStart
*
* e.g. temporalOverlaps("2:55:00".toTime, 1.hour, "3:30:00".toTime, 2.hour) leads to true
*/
object temporalOverlaps {
/**
* Determines whether two anchored time intervals overlap. Time point and temporal are
* transformed into a range defined by two time points (start, end).
*
* It evaluates: leftEnd >= rightStart && rightEnd >= leftStart
*
* e.g. temporalOverlaps("2:55:00".toTime, 1.hour, "3:30:00".toTime, 2.hour) leads to true
*/
def apply(
leftTimePoint: Expression,
leftTemporal: Expression,
rightTimePoint: Expression,
rightTemporal: Expression): Expression = {
TemporalOverlaps(leftTimePoint, leftTemporal, rightTimePoint, rightTemporal)
}
}
/**
* Creates an array of literals. The array will be an array of objects (not primitives).
*/
object array {
/**
* Creates an array of literals. The array will be an array of objects (not primitives).
*/
def apply(head: Expression, tail: Expression*): Expression = {
ArrayConstructor(head +: tail.toSeq)
}
}
/**
* Returns a value that is closer than any other value to pi.
*/
object pi {
/**
* Returns a value that is closer than any other value to pi.
*/
def apply(): Expression = {
Pi()
}
}
// scalastyle:on object.name
| fanyon/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/expressionDsl.scala | Scala | apache-2.0 | 25,019 |
package com.stulsoft.yscdcatalogue.scala.service
import org.apache.commons.lang3.StringUtils
import org.apache.logging.log4j.LogManager
import org.apache.logging.log4j.Logger
import javafx.collections.FXCollections
import javafx.collections.ObservableList
import javafx.scene.control.TreeItem
import com.stulsoft.yscdcatalogue.data.DiskItemNode
import com.stulsoft.yscdcatalogue.data.DiskItemTree
import com.stulsoft.yscdcatalogue.data.SoftItem
import com.stulsoft.yscdcatalogue.data.SoftItemNode
import com.stulsoft.yscdcatalogue.data.SoftItemType
import com.stulsoft.yscdcatalogue.data.SoftItemTree
import com.stulsoft.yscdcatalogue.data.SearchResult
import com.stulsoft.yscdcatalogue.persistence.DBManager;
import scala.collection.JavaConverters._
import scala.util.matching.Regex
/**
* Finds items.
*
* @author Yuriy Stul
*
*/
object Search {
val logName = { val c = getClass.getName; c.substring(0, c.lastIndexOf('.')) }
val logger: Logger = LogManager.getLogger(logName)
/**
* Finds items.
*
* @return the collection of the found items
*/
/**
* Finds items that contains specified search text in the full path, in the comments, on in the storage name.
* @param softItemTree
* the three with Soft Items
* @param searchText
* the search text
* @return collection with items that contains specified search text in the full path, in the comments, on in the storage name.
*/
def find(softItemTree: SoftItemTree, searchText: String): ObservableList[SearchResult] = {
require(softItemTree != null, "softItemTree could not be null.")
require(searchText != null && searchText.length > 0, "searchText could not be null or empty.")
logger.debug("Staring searching for {}.", searchText)
val searchTextR = prepareRegEx(searchText)
val results: ObservableList[SearchResult] = FXCollections.observableArrayList()
find(searchTextR, results, softItemTree.getRoot)
logger.debug("{} entries were found.", String.valueOf(results.size))
return results
}
private def find(searchTextR: Regex, results: ObservableList[SearchResult], node: SoftItemNode): Unit = {
if (node.getData.getType == SoftItemType.DISK) {
logger.debug("Looking inside {}", node.getData.getName)
try {
val diskItemTree = DBManager.getInstance.getDiskItemTree(node.getData.getDiskId)
val rootNode = diskItemTree.getRoot
//@formatter:off
findInDisk(searchTextR, results,
rootNode,
node.getParent.getData.getName,
rootNode.getData.getStorageName,
node.getTreeItem);
//@formatter:on
} catch {
case e: Exception => {
logger.error("Failed getting disk for {}. Error: {}", node.getData.getName, e.getMessage, e)
}
}
}
node.getChildren.asScala.foreach { child => find(searchTextR, results, child) }
}
private def findInDisk(searchTextR: Regex, results: ObservableList[SearchResult], diskItemNode: DiskItemNode, categoryName: String, diskName: String, treeItem: TreeItem[SoftItem]): Unit = {
//@formatter:off
if ((diskItemNode.getData.getFullPath != null && searchTextR.findFirstIn(diskItemNode.getData.getFullPath).isDefined)
|| (diskItemNode.getData.getComment != null && searchTextR.findFirstIn(diskItemNode.getData.getComment).isDefined)
|| (diskItemNode.getData.getStorageName != null && searchTextR.findFirstIn(diskItemNode.getData.getStorageName).isDefined)) {
val result = new SearchResult(categoryName, diskName, diskItemNode.getData.getFullPath, treeItem)
results.add(result)
}
//@formatter:on
diskItemNode.getChildren.asScala.foreach { child => findInDisk(searchTextR, results, child, categoryName, diskName, treeItem) }
}
private def prepareRegEx(text: String): Regex = {
var patternText = text.replace(".", "\\\\.").replace("*", ".*(?i)")
if (!patternText.startsWith(".*")) {
patternText = "(?i)" + patternText
}
if (patternText.endsWith("(?i)")) {
patternText = patternText.substring(0, patternText.lastIndexOf("(?i)"))
}
return patternText.r
}
} | ysden123/YSCDCatalogue | src/main/scala/com/stulsoft/yscdcatalogue/scala/service/Search.scala | Scala | mit | 4,155 |
package com.chatwork.sbt.aws.eb
import com.amazonaws.AmazonServiceException
import com.amazonaws.services.elasticbeanstalk.AWSElasticBeanstalkClient
import com.chatwork.sbt.aws.core.SbtAwsCoreKeys._
import com.chatwork.sbt.aws.eb.SbtAwsEbPlugin.autoImport
import org.sisioh.aws4s.eb.Implicits._
import org.sisioh.aws4s.eb.model._
import sbt.Keys._
import sbt._
import scala.collection.JavaConverters._
import scala.util.Try
trait ConfigurationTemplateSupport { this: SbtAwsEb =>
import autoImport._
private[eb] def ebCreateConfigurationTemplate(client: AWSElasticBeanstalkClient,
applicationName: String,
ebConfigurationTemplate: EbConfigurationTemplate)(
implicit logger: Logger): Try[EbConfigurationTemplateDescription] = {
logger.info(s"create configuration template start: $applicationName, $ebConfigurationTemplate")
val request = CreateConfigurationTemplateRequestFactory
.create()
.withTemplateName(ebConfigurationTemplate.name)
.withApplicationName(applicationName)
.withDescriptionOpt(ebConfigurationTemplate.description)
.withSolutionStackName(ebConfigurationTemplate.solutionStackName)
.withOptionSettings(ebConfigurationTemplate.optionSettings)
val result = client.createConfigurationTemplateAsTry(request).map { e =>
EbConfigurationTemplateDescription(
e.getTemplateName,
Option(e.getDescription),
e.getDeploymentStatus,
e.getApplicationName,
e.getEnvironmentName,
e.getSolutionStackName,
e.getOptionSettings.asScala.map { v =>
EbConfigurationOptionSetting(v.getNamespace, v.getOptionName, v.getValue)
},
e.getDateCreated,
e.getDateUpdated
)
}
logger.info(
s"create configuration template finish: $applicationName, $ebConfigurationTemplate")
result
}
def ebCreateConfigurationTemplateTask()
: Def.Initialize[Task[Option[EbConfigurationTemplateDescription]]] = Def.taskDyn[Option[EbConfigurationTemplateDescription]] {
implicit val logger = streams.value.log
(ebConfigurationTemplate in aws).toTask.flatMap{ t =>
ebClient.taskValue.flatMap{ c =>
(ebApplicationName in aws).toTask.taskValue.map { n =>
ebCreateConfigurationTemplate(
c,
n,
t.get
).toOption
}
}
}
}
private val pattern = "No Configuration Template named"
private[eb] def ebUpdateConfigurationTemplate(client: AWSElasticBeanstalkClient,
applicationName: String,
ebConfigurationTemplate: EbConfigurationTemplate)(
implicit logger: Logger): Try[EbConfigurationTemplateDescription] = {
logger.info(s"update configuration template start: $applicationName, $ebConfigurationTemplate")
val request = UpdateConfigurationTemplateRequestFactory
.create()
.withTemplateName(ebConfigurationTemplate.name)
.withApplicationName(applicationName)
.withDescriptionOpt(ebConfigurationTemplate.description)
.withOptionSettings(ebConfigurationTemplate.optionSettings)
.withOptionsToRemove(ebConfigurationTemplate.optionsToRemoves)
val result = client
.updateConfigurationTemplateAsTry(request)
.map { result =>
logger.info(
s"update configuration template finish: $applicationName, $ebConfigurationTemplate")
result
}
.map { e =>
EbConfigurationTemplateDescription(
e.getTemplateName,
Option(e.getDescription),
e.getDeploymentStatus,
e.getApplicationName,
e.getEnvironmentName,
e.getSolutionStackName,
e.getOptionSettings.asScala.map { v =>
EbConfigurationOptionSetting(v.getNamespace, v.getOptionName, v.getValue)
},
e.getDateCreated,
e.getDateUpdated
)
}
.recoverWith {
case ex: AmazonServiceException
if ex.getStatusCode == 400 && ex.getMessage.startsWith(pattern) =>
logger.warn(
s"The configuration template is not found.: $applicationName, ${ebConfigurationTemplate.name}")
throw ConfigurationTemplateNotFoundException(
s"The configuration template is not found.: $applicationName, ${ebConfigurationTemplate.name}",
Some(ex))
case ex: AmazonServiceException if ex.getStatusCode == 404 =>
logger.warn(
s"The configuration template is not found.: $applicationName, ${ebConfigurationTemplate.name}")
throw ConfigurationTemplateNotFoundException(
s"The configuration template is not found.: $applicationName, ${ebConfigurationTemplate.name}",
Some(ex))
}
result
}
def ebUpdateConfigurationTemplateTask()
: Def.Initialize[Task[Option[EbConfigurationTemplateDescription]]] = Def.taskDyn[Option[EbConfigurationTemplateDescription]]{
implicit val logger = streams.value.log
(ebConfigurationTemplate in aws).flatMap { t =>
ebClient.taskValue.flatMap{ c =>
(ebApplicationName in aws).toTask.taskValue.map{ n =>
ebUpdateConfigurationTemplate(
c,
n,
t.get
).toOption
}
}
}
}
def ebCreateOrUpdateConfigurationTemplateTask()
: Def.Initialize[Task[Option[EbConfigurationTemplateDescription]]] = Def.taskDyn {
implicit val logger = streams.value.log
(ebConfigurationTemplate in aws).flatMap { t =>
ebClient.taskValue.flatMap{ c =>
(ebApplicationName in aws).toTask.taskValue.map{ n =>
ebUpdateConfigurationTemplate(
c,
n,
t.get
).recoverWith {
case ex: ConfigurationTemplateNotFoundException =>
ebCreateConfigurationTemplate(
c,
n,
t.get
)
}.toOption
}
}
}
}
private[eb] def ebDeleteConfigurationTemplate(
client: AWSElasticBeanstalkClient,
applicationName: String,
ebConfigurationTemplate: EbConfigurationTemplate)(implicit logger: Logger): Try[Unit] = {
logger.info(s"delete configuration template start: $applicationName, $ebConfigurationTemplate")
val request = DeleteConfigurationTemplateRequestFactory
.create()
.withApplicationName(applicationName)
.withTemplateName(ebConfigurationTemplate.name)
val result = client
.deleteConfigurationTemplateAsTry(request)
.map { _ =>
logger.info(
s"delete configuration template finish: $applicationName, $ebConfigurationTemplate")
}
.recoverWith {
case ex: AmazonServiceException if ex.getStatusCode == 404 =>
logger.warn(
s"The configuration template is not found.: $applicationName, ${ebConfigurationTemplate.name}")
throw ConfigurationTemplateNotFoundException(
s"The configuration template is not found.: $applicationName, ${ebConfigurationTemplate.name}",
Some(ex))
}
result
}
def ebDeleteConfigurationTemplateTask(): Def.Initialize[Task[Unit]] = Def.task {
implicit val logger = streams.value.log
ebDeleteConfigurationTemplate(
ebClient.value,
(ebApplicationName in aws).value,
(ebConfigurationTemplate in aws).value.get
).recover {
case ex: ConfigurationTemplateNotFoundException =>
()
}.get
}
}
| chatwork/sbt-aws | sbt-aws-eb/src/main/scala/com/chatwork/sbt/aws/eb/ConfigurationTemplateSupport.scala | Scala | mit | 7,655 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.MLTestingUtils
import org.apache.spark.mllib.classification.LogisticRegressionSuite._
import org.apache.spark.mllib.linalg.{Vectors, Vector}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.sql.{DataFrame, Row}
class LogisticRegressionSuite extends SparkFunSuite with MLlibTestSparkContext {
@transient var dataset: DataFrame = _
@transient var binaryDataset: DataFrame = _
private val eps: Double = 1e-5
override def beforeAll(): Unit = {
super.beforeAll()
dataset = sqlContext.createDataFrame(generateLogisticInput(1.0, 1.0, nPoints = 100, seed = 42))
/*
Here is the instruction describing how to export the test data into CSV format
so we can validate the training accuracy compared with R's glmnet package.
import org.apache.spark.mllib.classification.LogisticRegressionSuite
val nPoints = 10000
val weights = Array(-0.57997, 0.912083, -0.371077, -0.819866, 2.688191)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val data = sc.parallelize(LogisticRegressionSuite.generateMultinomialLogisticInput(
weights, xMean, xVariance, true, nPoints, 42), 1)
data.map(x=> x.label + ", " + x.features(0) + ", " + x.features(1) + ", "
+ x.features(2) + ", " + x.features(3)).saveAsTextFile("path")
*/
binaryDataset = {
val nPoints = 10000
val weights = Array(-0.57997, 0.912083, -0.371077, -0.819866, 2.688191)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val testData = generateMultinomialLogisticInput(weights, xMean, xVariance, true, nPoints, 42)
sqlContext.createDataFrame(
generateMultinomialLogisticInput(weights, xMean, xVariance, true, nPoints, 42))
}
}
test("params") {
ParamsSuite.checkParams(new LogisticRegression)
val model = new LogisticRegressionModel("logReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("logistic regression: default params") {
val lr = new LogisticRegression
assert(lr.getLabelCol === "label")
assert(lr.getFeaturesCol === "features")
assert(lr.getPredictionCol === "prediction")
assert(lr.getRawPredictionCol === "rawPrediction")
assert(lr.getProbabilityCol === "probability")
assert(lr.getFitIntercept)
assert(lr.getStandardization)
val model = lr.fit(dataset)
model.transform(dataset)
.select("label", "probability", "prediction", "rawPrediction")
.collect()
assert(model.getThreshold === 0.5)
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.getRawPredictionCol === "rawPrediction")
assert(model.getProbabilityCol === "probability")
assert(model.intercept !== 0.0)
assert(model.hasParent)
}
test("setThreshold, getThreshold") {
val lr = new LogisticRegression
// default
assert(lr.getThreshold === 0.5, "LogisticRegression.threshold should default to 0.5")
withClue("LogisticRegression should not have thresholds set by default.") {
intercept[java.util.NoSuchElementException] { // Note: The exception type may change in future
lr.getThresholds
}
}
// Set via threshold.
// Intuition: Large threshold or large thresholds(1) makes class 0 more likely.
lr.setThreshold(1.0)
assert(lr.getThresholds === Array(0.0, 1.0))
lr.setThreshold(0.0)
assert(lr.getThresholds === Array(1.0, 0.0))
lr.setThreshold(0.5)
assert(lr.getThresholds === Array(0.5, 0.5))
// Set via thresholds
val lr2 = new LogisticRegression
lr2.setThresholds(Array(0.3, 0.7))
val expectedThreshold = 1.0 / (1.0 + 0.3 / 0.7)
assert(lr2.getThreshold ~== expectedThreshold relTol 1E-7)
// thresholds and threshold must be consistent
lr2.setThresholds(Array(0.1, 0.2, 0.3))
withClue("getThreshold should throw error if thresholds has length != 2.") {
intercept[IllegalArgumentException] {
lr2.getThreshold
}
}
// thresholds and threshold must be consistent: values
withClue("fit with ParamMap should throw error if threshold, thresholds do not match.") {
intercept[IllegalArgumentException] {
val lr2model = lr2.fit(dataset,
lr2.thresholds -> Array(0.3, 0.7), lr2.threshold -> (expectedThreshold / 2.0))
lr2model.getThreshold
}
}
}
test("logistic regression doesn't fit intercept when fitIntercept is off") {
val lr = new LogisticRegression
lr.setFitIntercept(false)
val model = lr.fit(dataset)
assert(model.intercept === 0.0)
// copied model must have the same parent.
MLTestingUtils.checkCopy(model)
}
test("logistic regression with setters") {
// Set params, train, and check as many params as we can.
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
.setProbabilityCol("myProbability")
val model = lr.fit(dataset)
val parent = model.parent.asInstanceOf[LogisticRegression]
assert(parent.getMaxIter === 10)
assert(parent.getRegParam === 1.0)
assert(parent.getThreshold === 0.6)
assert(model.getThreshold === 0.6)
// Modify model params, and check that the params worked.
model.setThreshold(1.0)
val predAllZero = model.transform(dataset)
.select("prediction", "myProbability")
.collect()
.map { case Row(pred: Double, prob: Vector) => pred }
assert(predAllZero.forall(_ === 0),
s"With threshold=1.0, expected predictions to be all 0, but only" +
s" ${predAllZero.count(_ === 0)} of ${dataset.count()} were 0.")
// Call transform with params, and check that the params worked.
val predNotAllZero =
model.transform(dataset, model.threshold -> 0.0,
model.probabilityCol -> "myProb")
.select("prediction", "myProb")
.collect()
.map { case Row(pred: Double, prob: Vector) => pred }
assert(predNotAllZero.exists(_ !== 0.0))
// Call fit() with new params, and check as many params as we can.
lr.setThresholds(Array(0.6, 0.4))
val model2 = lr.fit(dataset, lr.maxIter -> 5, lr.regParam -> 0.1,
lr.probabilityCol -> "theProb")
val parent2 = model2.parent.asInstanceOf[LogisticRegression]
assert(parent2.getMaxIter === 5)
assert(parent2.getRegParam === 0.1)
assert(parent2.getThreshold === 0.4)
assert(model2.getThreshold === 0.4)
assert(model2.getProbabilityCol === "theProb")
}
test("logistic regression: Predictor, Classifier methods") {
val sqlContext = this.sqlContext
val lr = new LogisticRegression
val model = lr.fit(dataset)
assert(model.numClasses === 2)
val threshold = model.getThreshold
val results = model.transform(dataset)
// Compare rawPrediction with probability
results.select("rawPrediction", "probability").collect().foreach {
case Row(raw: Vector, prob: Vector) =>
assert(raw.size === 2)
assert(prob.size === 2)
val probFromRaw1 = 1.0 / (1.0 + math.exp(-raw(1)))
assert(prob(1) ~== probFromRaw1 relTol eps)
assert(prob(0) ~== 1.0 - probFromRaw1 relTol eps)
}
// Compare prediction with probability
results.select("prediction", "probability").collect().foreach {
case Row(pred: Double, prob: Vector) =>
val predFromProb = prob.toArray.zipWithIndex.maxBy(_._1)._2
assert(pred == predFromProb)
}
}
test("MultiClassSummarizer") {
val summarizer1 = (new MultiClassSummarizer)
.add(0.0).add(3.0).add(4.0).add(3.0).add(6.0)
assert(summarizer1.histogram.zip(Array[Long](1, 0, 0, 2, 1, 0, 1)).forall(x => x._1 === x._2))
assert(summarizer1.countInvalid === 0)
assert(summarizer1.numClasses === 7)
val summarizer2 = (new MultiClassSummarizer)
.add(1.0).add(5.0).add(3.0).add(0.0).add(4.0).add(1.0)
assert(summarizer2.histogram.zip(Array[Long](1, 2, 0, 1, 1, 1)).forall(x => x._1 === x._2))
assert(summarizer2.countInvalid === 0)
assert(summarizer2.numClasses === 6)
val summarizer3 = (new MultiClassSummarizer)
.add(0.0).add(1.3).add(5.2).add(2.5).add(2.0).add(4.0).add(4.0).add(4.0).add(1.0)
assert(summarizer3.histogram.zip(Array[Long](1, 1, 1, 0, 3)).forall(x => x._1 === x._2))
assert(summarizer3.countInvalid === 3)
assert(summarizer3.numClasses === 5)
val summarizer4 = (new MultiClassSummarizer)
.add(3.1).add(4.3).add(2.0).add(1.0).add(3.0)
assert(summarizer4.histogram.zip(Array[Long](0, 1, 1, 1)).forall(x => x._1 === x._2))
assert(summarizer4.countInvalid === 2)
assert(summarizer4.numClasses === 4)
// small map merges large one
val summarizerA = summarizer1.merge(summarizer2)
assert(summarizerA.hashCode() === summarizer2.hashCode())
assert(summarizerA.histogram.zip(Array[Long](2, 2, 0, 3, 2, 1, 1)).forall(x => x._1 === x._2))
assert(summarizerA.countInvalid === 0)
assert(summarizerA.numClasses === 7)
// large map merges small one
val summarizerB = summarizer3.merge(summarizer4)
assert(summarizerB.hashCode() === summarizer3.hashCode())
assert(summarizerB.histogram.zip(Array[Long](1, 2, 2, 1, 3)).forall(x => x._1 === x._2))
assert(summarizerB.countInvalid === 5)
assert(summarizerB.numClasses === 5)
}
test("binary logistic regression with intercept without regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0, lambda = 0))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 2.8366423
data.V2 -0.5895848
data.V3 0.8931147
data.V4 -0.3925051
data.V5 -0.7996864
*/
val interceptR = 2.8366423
val weightsR = Vectors.dense(-0.5895848, 0.8931147, -0.3925051, -0.7996864)
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.weights ~= weightsR relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.weights ~= weightsR relTol 1E-3)
}
test("binary logistic regression without intercept without regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(false).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights =
coef(glmnet(features,label, family="binomial", alpha = 0, lambda = 0, intercept=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 -0.3534996
data.V3 1.2964482
data.V4 -0.3571741
data.V5 -0.7407946
*/
val interceptR = 0.0
val weightsR = Vectors.dense(-0.3534996, 1.2964482, -0.3571741, -0.7407946)
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.weights ~= weightsR relTol 1E-2)
// Without regularization, with or without standardization should converge to the same solution.
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.weights ~= weightsR relTol 1E-2)
}
test("binary logistic regression with intercept with L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 1, lambda = 0.12))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) -0.05627428
data.V2 .
data.V3 .
data.V4 -0.04325749
data.V5 -0.02481551
*/
val interceptR1 = -0.05627428
val weightsR1 = Vectors.dense(0.0, 0.0, -0.04325749, -0.02481551)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.weights ~= weightsR1 absTol 2E-2)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 1, lambda = 0.12,
standardize=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.3722152
data.V2 .
data.V3 .
data.V4 -0.1665453
data.V5 .
*/
val interceptR2 = 0.3722152
val weightsR2 = Vectors.dense(0.0, 0.0, -0.1665453, 0.0)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.weights ~= weightsR2 absTol 1E-3)
}
test("binary logistic regression without intercept with L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(1.0).setRegParam(0.12).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 1, lambda = 0.12,
intercept=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 .
data.V3 .
data.V4 -0.05189203
data.V5 -0.03891782
*/
val interceptR1 = 0.0
val weightsR1 = Vectors.dense(0.0, 0.0, -0.05189203, -0.03891782)
assert(model1.intercept ~== interceptR1 relTol 1E-3)
assert(model1.weights ~= weightsR1 absTol 1E-3)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 1, lambda = 0.12,
intercept=FALSE, standardize=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 .
data.V3 .
data.V4 -0.08420782
data.V5 .
*/
val interceptR2 = 0.0
val weightsR2 = Vectors.dense(0.0, 0.0, -0.08420782, 0.0)
assert(model2.intercept ~== interceptR2 absTol 1E-3)
assert(model2.weights ~= weightsR2 absTol 1E-3)
}
test("binary logistic regression with intercept with L2 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0, lambda = 1.37))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.15021751
data.V2 -0.07251837
data.V3 0.10724191
data.V4 -0.04865309
data.V5 -0.10062872
*/
val interceptR1 = 0.15021751
val weightsR1 = Vectors.dense(-0.07251837, 0.10724191, -0.04865309, -0.10062872)
assert(model1.intercept ~== interceptR1 relTol 1E-3)
assert(model1.weights ~= weightsR1 relTol 1E-3)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0, lambda = 1.37,
standardize=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.48657516
data.V2 -0.05155371
data.V3 0.02301057
data.V4 -0.11482896
data.V5 -0.06266838
*/
val interceptR2 = 0.48657516
val weightsR2 = Vectors.dense(-0.05155371, 0.02301057, -0.11482896, -0.06266838)
assert(model2.intercept ~== interceptR2 relTol 1E-3)
assert(model2.weights ~= weightsR2 relTol 1E-3)
}
test("binary logistic regression without intercept with L2 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.0).setRegParam(1.37).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0, lambda = 1.37,
intercept=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 -0.06099165
data.V3 0.12857058
data.V4 -0.04708770
data.V5 -0.09799775
*/
val interceptR1 = 0.0
val weightsR1 = Vectors.dense(-0.06099165, 0.12857058, -0.04708770, -0.09799775)
assert(model1.intercept ~== interceptR1 absTol 1E-3)
assert(model1.weights ~= weightsR1 relTol 1E-2)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0, lambda = 1.37,
intercept=FALSE, standardize=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 -0.005679651
data.V3 0.048967094
data.V4 -0.093714016
data.V5 -0.053314311
*/
val interceptR2 = 0.0
val weightsR2 = Vectors.dense(-0.005679651, 0.048967094, -0.093714016, -0.053314311)
assert(model2.intercept ~== interceptR2 absTol 1E-3)
assert(model2.weights ~= weightsR2 relTol 1E-2)
}
test("binary logistic regression with intercept with ElasticNet regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0.38, lambda = 0.21))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.57734851
data.V2 -0.05310287
data.V3 .
data.V4 -0.08849250
data.V5 -0.15458796
*/
val interceptR1 = 0.57734851
val weightsR1 = Vectors.dense(-0.05310287, 0.0, -0.08849250, -0.15458796)
assert(model1.intercept ~== interceptR1 relTol 6E-3)
assert(model1.weights ~== weightsR1 absTol 5E-3)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0.38, lambda = 0.21,
standardize=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 0.51555993
data.V2 .
data.V3 .
data.V4 -0.18807395
data.V5 -0.05350074
*/
val interceptR2 = 0.51555993
val weightsR2 = Vectors.dense(0.0, 0.0, -0.18807395, -0.05350074)
assert(model2.intercept ~== interceptR2 relTol 6E-3)
assert(model2.weights ~= weightsR2 absTol 1E-3)
}
test("binary logistic regression without intercept with ElasticNet regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(false)
.setElasticNetParam(0.38).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0.38, lambda = 0.21,
intercept=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 -0.001005743
data.V3 0.072577857
data.V4 -0.081203769
data.V5 -0.142534158
*/
val interceptR1 = 0.0
val weightsR1 = Vectors.dense(-0.001005743, 0.072577857, -0.081203769, -0.142534158)
assert(model1.intercept ~== interceptR1 relTol 1E-3)
assert(model1.weights ~= weightsR1 absTol 1E-2)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 0.38, lambda = 0.21,
intercept=FALSE, standardize=FALSE))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
data.V2 .
data.V3 0.03345223
data.V4 -0.11304532
data.V5 .
*/
val interceptR2 = 0.0
val weightsR2 = Vectors.dense(0.0, 0.03345223, -0.11304532, 0.0)
assert(model2.intercept ~== interceptR2 absTol 1E-3)
assert(model2.weights ~= weightsR2 absTol 1E-3)
}
test("binary logistic regression with intercept with strong L1 regularization") {
val trainer1 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(6.0).setStandardization(true)
val trainer2 = (new LogisticRegression).setFitIntercept(true)
.setElasticNetParam(1.0).setRegParam(6.0).setStandardization(false)
val model1 = trainer1.fit(binaryDataset)
val model2 = trainer2.fit(binaryDataset)
val histogram = binaryDataset.map { case Row(label: Double, features: Vector) => label }
.treeAggregate(new MultiClassSummarizer)(
seqOp = (c, v) => (c, v) match {
case (classSummarizer: MultiClassSummarizer, label: Double) => classSummarizer.add(label)
},
combOp = (c1, c2) => (c1, c2) match {
case (classSummarizer1: MultiClassSummarizer, classSummarizer2: MultiClassSummarizer) =>
classSummarizer1.merge(classSummarizer2)
}).histogram
/*
For binary logistic regression with strong L1 regularization, all the weights will be zeros.
As a result,
{{{
P(0) = 1 / (1 + \\exp(b)), and
P(1) = \\exp(b) / (1 + \\exp(b))
}}}, hence
{{{
b = \\log{P(1) / P(0)} = \\log{count_1 / count_0}
}}}
*/
val interceptTheory = math.log(histogram(1).toDouble / histogram(0).toDouble)
val weightsTheory = Vectors.dense(0.0, 0.0, 0.0, 0.0)
assert(model1.intercept ~== interceptTheory relTol 1E-5)
assert(model1.weights ~= weightsTheory absTol 1E-6)
assert(model2.intercept ~== interceptTheory relTol 1E-5)
assert(model2.weights ~= weightsTheory absTol 1E-6)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE)
label = factor(data$V1)
features = as.matrix(data.frame(data$V2, data$V3, data$V4, data$V5))
weights = coef(glmnet(features,label, family="binomial", alpha = 1.0, lambda = 6.0))
weights
5 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) -0.2480643
data.V2 0.0000000
data.V3 .
data.V4 .
data.V5 .
*/
val interceptR = -0.248065
val weightsR = Vectors.dense(0.0, 0.0, 0.0, 0.0)
assert(model1.intercept ~== interceptR relTol 1E-5)
assert(model1.weights ~== weightsR absTol 1E-6)
}
test("evaluate on test set") {
// Evaluate on test set should be same as that of the transformed training data.
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
val model = lr.fit(dataset)
val summary = model.summary.asInstanceOf[BinaryLogisticRegressionSummary]
val sameSummary = model.evaluate(dataset).asInstanceOf[BinaryLogisticRegressionSummary]
assert(summary.areaUnderROC === sameSummary.areaUnderROC)
assert(summary.roc.collect() === sameSummary.roc.collect())
assert(summary.pr.collect === sameSummary.pr.collect())
assert(
summary.fMeasureByThreshold.collect() === sameSummary.fMeasureByThreshold.collect())
assert(summary.recallByThreshold.collect() === sameSummary.recallByThreshold.collect())
assert(
summary.precisionByThreshold.collect() === sameSummary.precisionByThreshold.collect())
}
test("statistics on training data") {
// Test that loss is monotonically decreasing.
val lr = new LogisticRegression()
.setMaxIter(10)
.setRegParam(1.0)
.setThreshold(0.6)
val model = lr.fit(dataset)
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/test/scala/org/apache/spark/ml/classification/LogisticRegressionSuite.scala | Scala | apache-2.0 | 30,055 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.integration.spark.testsuite.preaggregate
import org.apache.spark.sql.CarbonDatasourceHadoopRelation
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
class TestPreAggregateWithSubQuery extends QueryTest with BeforeAndAfterAll {
override def beforeAll: Unit = {
sql("DROP TABLE IF EXISTS mainTable")
sql("DROP TABLE IF EXISTS mainTable1")
sql("CREATE TABLE mainTable(id int, name string, city string, age string) STORED BY 'org.apache.carbondata.format'")
sql("CREATE TABLE mainTable1(id int, name string, city string, age string) STORED BY 'org.apache.carbondata.format'")
sql("create datamap agg0 on table mainTable using 'preaggregate' as select name,sum(age) from mainTable group by name")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/measureinsertintotest.csv' into table mainTable")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/measureinsertintotest.csv' into table mainTable1")
}
test("test sub query PreAggregate table selection 1") {
val df = sql(
"""
| SELECT t2.newnewname AS newname
| FROM mainTable1 t1
| JOIN (
| select
| name AS newnewname,
| sum(age) AS sum
| FROM mainTable
| GROUP BY name ) t2
| ON t1.name = t2.newnewname
| GROUP BY t2.newnewname
""".stripMargin)
matchTable(collectLogicalRelation(df.queryExecution.analyzed), "maintable_agg0")
}
test("test sub query PreAggregate table selection 2") {
val df = sql("select t1.name,t1.city from mainTable1 t1 join (select name as newnewname,sum(age) as sum from mainTable group by name )t2 on t1.name=t2.newnewname")
matchTable(collectLogicalRelation(df.queryExecution.analyzed), "maintable_agg0")
}
test("test sub query PreAggregate table selection 3") {
val df = sql("select t1.name,t2.sum from mainTable1 t1 join (select name as newnewname,sum(age) as sum from mainTable group by name )t2 on t1.name=t2.newnewname")
matchTable(collectLogicalRelation(df.queryExecution.analyzed), "maintable_agg0")
}
test("test sub query PreAggregate table selection 4") {
val df = sql("select t1.name,t2.sum from mainTable1 t1 join (select name,sum(age) as sum from mainTable group by name )t2 on t1.name=t2.name group by t1.name, t2.sum")
matchTable(collectLogicalRelation(df.queryExecution.analyzed), "maintable_agg0")
}
/**
* Below method will be used to collect all the logical relation from logical plan
* @param logicalPlan
* query logical plan
* @return all the logical relation
*/
def collectLogicalRelation(logicalPlan: LogicalPlan) : Seq[LogicalRelation] = {
logicalPlan.collect{
case l:LogicalRelation => l
}
}
/**
* Below method will be used to match the logical relation
* @param logicalRelations
* all logical relation
* @param tableName
* table name
*/
def matchTable(logicalRelations: Seq[LogicalRelation], tableName: String) {
assert(logicalRelations.exists {
case l:LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
l.relation.asInstanceOf[CarbonDatasourceHadoopRelation].carbonTable.getTableName.
equalsIgnoreCase(tableName)
})
}
override def afterAll: Unit = {
sql("DROP TABLE IF EXISTS mainTable")
sql("DROP TABLE IF EXISTS mainTable1")
}
}
| sgururajshetty/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateWithSubQuery.scala | Scala | apache-2.0 | 4,353 |
package scalaDemo
/**
* Created by liush on 17-7-17.
* scala中hdfs文件的操作
* FileSystem定义了hadoop的一个文件系统接口
* 对于org.apache.Hadoop.fs.Path来说,
path.getName只是文件名,不包括路径
path.getParent也只是父文件的文件名,同样不包括路径
path.toString才是文件的全路径名
*/
import java.io.{FileSystem => _, _}
import org.apache.hadoop.fs._
import scala.collection.mutable.ListBuffer
object HDFSHelper {
def isDir(hdfs: FileSystem, name: String): Boolean = {
hdfs.isDirectory(new Path(name))
}
def isDir(hdfs: FileSystem, name: Path): Boolean = {
hdfs.isDirectory(name)
}
def isFile(hdfs: FileSystem, name: String): Boolean = {
hdfs.isFile(new Path(name))
}
def isFile(hdfs: FileSystem, name: Path): Boolean = {
hdfs.isFile(name)
}
//hdfs文件系统在创建filename对应的文件时,如果相关的文件夹不存在,会自动创建相关的文件夹
def createFile(hdfs: FileSystem, name: String): Boolean = {
hdfs.createNewFile(new Path(name))
}
def createFile(hdfs: FileSystem, name: Path): Boolean = {
hdfs.createNewFile(name)
}
def createFolder(hdfs: FileSystem, name: String): Boolean = {
hdfs.mkdirs(new Path(name))
}
def createFolder(hdfs: FileSystem, name: Path): Boolean = {
hdfs.mkdirs(name)
}
def exists(hdfs: FileSystem, name: String): Boolean = {
hdfs.exists(new Path(name))
}
def exists(hdfs: FileSystem, name: Path): Boolean = {
hdfs.exists(name)
}
def transport(inputStream: InputStream, outputStream: OutputStream): Unit = {
val buffer = new Array[Byte](64 * 1000)
var len = inputStream.read(buffer)
while (len != -1) {
outputStream.write(buffer, 0, len - 1)
len = inputStream.read(buffer)
}
outputStream.flush()
inputStream.close()
outputStream.close()
}
class MyPathFilter extends PathFilter {
override def accept(path: Path): Boolean = true
}
/**
* create a target file and provide parent folder if necessary
* 创建目标文件,并在必要时提供父文件夹
*/
def createLocalFile(fullName: String): File = {
val target: File = new File(fullName)
if (!target.exists) {
val index = fullName.lastIndexOf(File.separator)
val parentFullName = fullName.substring(0, index)
val parent: File = new File(parentFullName)
if (!parent.exists)
parent.mkdirs
else if (!parent.isDirectory)
parent.mkdir
target.createNewFile
}
target
}
/**
* delete file in hdfs
* 删除文件在HDFS
*
* @return true: success, false: failed
*/
def deleteFile(hdfs: FileSystem, path: String): Boolean = {
if (isDir(hdfs, path))
hdfs.delete(new Path(path), true) //true: delete files recursively
else
hdfs.delete(new Path(path), false)
}
/**
* get all file children's full name of a hdfs dir, not include dir children
* 把所有文件子的HDFS目录,不包括你的孩子
*
* @param fullName the hdfs dir's full name
*/
def listChildren(hdfs: FileSystem, fullName: String, holder: ListBuffer[String]): ListBuffer[String] = {
val filesStatus = hdfs.listStatus(new Path(fullName), new MyPathFilter)
for (status <- filesStatus) {
val filePath: Path = status.getPath
if (isFile(hdfs, filePath))
holder += filePath.toString
else
listChildren(hdfs, filePath.toString, holder)
}
holder
}
def copyFile(hdfs: FileSystem, source: String, target: String): Unit = {
val sourcePath = new Path(source)
val targetPath = new Path(target)
if (!exists(hdfs, targetPath))
createFile(hdfs, targetPath)
val inputStream: FSDataInputStream = hdfs.open(sourcePath)
val outputStream: FSDataOutputStream = hdfs.create(targetPath)
transport(inputStream, outputStream)
}
def copyFolder(hdfs: FileSystem, sourceFolder: String, targetFolder: String): Unit = {
val holder: ListBuffer[String] = new ListBuffer[String]
val children: List[String] = listChildren(hdfs, sourceFolder, holder).toList
for (child <- children)
copyFile(hdfs, child, child.replaceFirst(sourceFolder, targetFolder))
}
def copyFileFromLocal(hdfs: FileSystem, localSource: String, hdfsTarget: String): Unit = {
val targetPath = new Path(hdfsTarget)
if (!exists(hdfs, targetPath))
createFile(hdfs, targetPath)
val inputStream: FileInputStream = new FileInputStream(localSource)
val outputStream: FSDataOutputStream = hdfs.create(targetPath)
transport(inputStream, outputStream)
}
def copyFileToLocal(hdfs: FileSystem, hdfsSource: String, localTarget: String): Unit = {
val localFile: File = createLocalFile(localTarget)
val inputStream: FSDataInputStream = hdfs.open(new Path(hdfsSource))
val outputStream: FileOutputStream = new FileOutputStream(localFile)
transport(inputStream, outputStream)
}
def copyFolderFromLocal(hdfs: FileSystem, localSource: String, hdfsTarget: String): Unit = {
val localFolder: File = new File(localSource)
val allChildren: Array[File] = localFolder.listFiles
for (child <- allChildren) {
val fullName = child.getAbsolutePath
val nameExcludeSource: String = fullName.substring(localSource.length)
val targetFileFullName: String = hdfsTarget + Path.SEPARATOR + nameExcludeSource
if (child.isFile)
copyFileFromLocal(hdfs, fullName, targetFileFullName)
else
copyFolderFromLocal(hdfs, fullName, targetFileFullName)
}
}
def copyFolderToLocal(hdfs: FileSystem, hdfsSource: String, localTarget: String): Unit = {
val holder: ListBuffer[String] = new ListBuffer[String]
val children: List[String] = listChildren(hdfs, hdfsSource, holder).toList
val hdfsSourceFullName = hdfs.getFileStatus(new Path(hdfsSource)).getPath.toString
val index = hdfsSourceFullName.length
for (child <- children) {
val nameExcludeSource: String = child.substring(index + 1)
val targetFileFullName: String = localTarget + File.separator + nameExcludeSource
copyFileToLocal(hdfs, child, targetFileFullName)
}
}
} | tophua/spark1.52 | examples/src/main/scala/scalaDemo/HDFSHelper.scala | Scala | apache-2.0 | 6,225 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.impurity
import org.apache.spark.annotation.{DeveloperApi, Experimental, Since}
/**
* :: Experimental ::
* Class for calculating variance during regression
*/
@Since("1.0.0")
@Experimental
object Variance extends Impurity {
/**
* :: DeveloperApi ::
* information calculation for multiclass classification
* @param counts Array[Double] with counts for each label
* @param totalCount sum of counts for all labels
* @return information value, or 0 if totalCount = 0
*/
@Since("1.1.0")
@DeveloperApi
override def calculate(counts: Array[Double], totalCount: Double): Double =
throw new UnsupportedOperationException("Variance.calculate")
/**
* :: DeveloperApi ::
* variance calculation
* @param count number of instances
* @param sum sum of labels
* @param sumSquares summation of squares of the labels
* @return information value, or 0 if count = 0
*/
@Since("1.0.0")
@DeveloperApi
override def calculate(count: Double, sum: Double, sumSquares: Double): Double = {
if (count == 0) {
return 0
}
val squaredLoss = sumSquares - (sum * sum) / count
squaredLoss / count
}
/**
* Get this impurity instance.
* This is useful for passing impurity parameters to a Strategy in Java.
*/
@Since("1.0.0")
//this.type表示当前对象(this)的类型,this指代当前的对象
def instance: this.type = this
}
/**
* Class for updating views of a vector of sufficient statistics,
* in order to compute impurity from a sample.
* Note: Instances of this class do not hold the data; they operate on views of the data.
*/
private[tree] class VarianceAggregator()
extends ImpurityAggregator(statsSize = 3) with Serializable {
/**
* Update stats for one (node, feature, bin) with the given label.
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Double): Unit = {
allStats(offset) += instanceWeight
allStats(offset + 1) += instanceWeight * label
allStats(offset + 2) += instanceWeight * label * label
}
/**
* Get an [[ImpurityCalculator]] for a (node, feature, bin).
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def getCalculator(allStats: Array[Double], offset: Int): VarianceCalculator = {
new VarianceCalculator(allStats.view(offset, offset + statsSize).toArray)
}
}
/**
* Stores statistics for one (node, feature, bin) for calculating impurity.
* Unlike [[GiniAggregator]], this class stores its own data and is for a specific
* (node, feature, bin).
* @param stats Array of sufficient statistics for a (node, feature, bin).
*/
private[spark] class VarianceCalculator(stats: Array[Double]) extends ImpurityCalculator(stats) {
require(stats.size == 3,
s"VarianceCalculator requires sufficient statistics array stats to be of length 3," +
s" but was given array of length ${stats.size}.")
/**
* Make a deep copy of this [[ImpurityCalculator]].
*/
def copy: VarianceCalculator = new VarianceCalculator(stats.clone())
/**
* Calculate the impurity from the stored sufficient statistics.
*/
def calculate(): Double = Variance.calculate(stats(0), stats(1), stats(2))
/**
* Number of data points accounted for in the sufficient statistics.
*/
def count: Long = stats(0).toLong
/**
* Prediction which should be made based on the sufficient statistics.
*/
def predict: Double = if (count == 0) {
0
} else {
stats(1) / count
}
override def toString: String = {
s"VarianceAggregator(cnt = ${stats(0)}, sum = ${stats(1)}, sum2 = ${stats(2)})"
}
}
| tophua/spark1.52 | mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala | Scala | apache-2.0 | 4,722 |
package is.hail.expr.ir
import is.hail.HailContext
import is.hail.annotations._
import is.hail.backend.ExecuteContext
import is.hail.backend.spark.SparkBackend
import is.hail.expr.JSONAnnotationImpex
import is.hail.types.physical.{PArray, PCanonicalStruct, PStruct, PType}
import is.hail.types.virtual._
import is.hail.types.{MatrixType, TableType}
import is.hail.io.{BufferSpec, FileWriteMetadata, MatrixWriteCheckpoint}
import is.hail.io.fs.FS
import is.hail.linalg.RowMatrix
import is.hail.rvd.{AbstractRVDSpec, RVD, _}
import is.hail.sparkextras.ContextRDD
import is.hail.utils._
import is.hail.variant._
import org.apache.commons.lang3.StringUtils
import org.apache.spark.SparkContext
import org.apache.spark.sql.Row
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods.parse
case class MatrixValue(
typ: MatrixType,
tv: TableValue) {
val colFieldType = tv.globals.t.fieldType(LowerMatrixIR.colsFieldName).asInstanceOf[PArray]
assert(colFieldType.required)
assert(colFieldType.elementType.required)
lazy val globals: BroadcastRow = {
val prevGlobals = tv.globals
val newT = prevGlobals.t.deleteField(LowerMatrixIR.colsFieldName)
val rvb = new RegionValueBuilder(prevGlobals.value.region)
rvb.start(newT)
rvb.startStruct()
rvb.addFields(prevGlobals.t, prevGlobals.value,
prevGlobals.t.fields.filter(_.name != LowerMatrixIR.colsFieldName).map(_.index).toArray)
rvb.endStruct()
BroadcastRow(tv.ctx, RegionValue(prevGlobals.value.region, rvb.end()), newT)
}
lazy val colValues: BroadcastIndexedSeq = {
val prevGlobals = tv.globals
val field = prevGlobals.t.field(LowerMatrixIR.colsFieldName)
val t = field.typ.asInstanceOf[PArray]
BroadcastIndexedSeq(tv.ctx,
RegionValue(prevGlobals.value.region, prevGlobals.t.loadField(prevGlobals.value.offset, field.index)),
t)
}
val rvd: RVD = tv.rvd
lazy val rvRowPType: PStruct = rvd.typ.rowType
lazy val rvRowType: TStruct = rvRowPType.virtualType
lazy val entriesIdx: Int = rvRowPType.fieldIdx(MatrixType.entriesIdentifier)
lazy val entryArrayPType: PArray = rvRowPType.types(entriesIdx).asInstanceOf[PArray]
lazy val entryArrayType: TArray = rvRowType.types(entriesIdx).asInstanceOf[TArray]
lazy val entryPType: PStruct = entryArrayPType.elementType.asInstanceOf[PStruct]
lazy val entryType: TStruct = entryArrayType.elementType.asInstanceOf[TStruct]
lazy val entriesRVType: TStruct = TStruct(
MatrixType.entriesIdentifier -> TArray(entryType))
require(rvd.typ.key.startsWith(typ.rowKey), s"\\nmat row key: ${ typ.rowKey }\\nrvd key: ${ rvd.typ.key }")
def sparkContext: SparkContext = rvd.sparkContext
def nPartitions: Int = rvd.getNumPartitions
lazy val nCols: Int = colValues.t.loadLength(colValues.value.offset)
def stringSampleIds: IndexedSeq[String] = {
val colKeyTypes = typ.colKeyStruct.types
assert(colKeyTypes.length == 1 && colKeyTypes(0) == TString, colKeyTypes.toSeq)
val querier = typ.colType.query(typ.colKey(0))
colValues.javaValue.map(querier(_).asInstanceOf[String])
}
def requireUniqueSamples(method: String) {
val dups = stringSampleIds.counter().filter(_._2 > 1).toArray
if (dups.nonEmpty)
fatal(s"Method '$method' does not support duplicate column keys. Duplicates:" +
s"\\n @1", dups.sortBy(-_._2).map { case (id, count) => s"""($count) "$id"""" }.truncatable("\\n "))
}
def referenceGenome: ReferenceGenome = typ.referenceGenome
def colsTableValue(ctx: ExecuteContext): TableValue =
TableValue(ctx, typ.colsTableType, globals, colsRVD(ctx))
private def writeCols(ctx: ExecuteContext, path: String, bufferSpec: BufferSpec): Long = {
val fs = ctx.fs
val fileData = AbstractRVDSpec.writeSingle(ctx, path + "/rows", colValues.t.elementType.asInstanceOf[PStruct], bufferSpec, colValues.javaValue)
val partitionCounts = fileData.map(_.rowsWritten)
val colsSpec = TableSpecParameters(
FileFormat.version.rep,
is.hail.HAIL_PRETTY_VERSION,
"../references",
typ.colsTableType.copy(key = FastIndexedSeq[String]()),
Map("globals" -> RVDComponentSpec("../globals/rows"),
"rows" -> RVDComponentSpec("rows"),
"partition_counts" -> PartitionCountsComponentSpec(partitionCounts)))
colsSpec.write(fs, path)
using(fs.create(path + "/_SUCCESS"))(out => ())
fileData.map(_.bytesWritten).sum
}
private def writeGlobals(ctx: ExecuteContext, path: String, bufferSpec: BufferSpec): Long = {
val fs = ctx.fs
val fileData = AbstractRVDSpec.writeSingle(ctx, path + "/rows", globals.t, bufferSpec, Array(globals.javaValue))
val partitionCounts = fileData.map(_.rowsWritten)
AbstractRVDSpec.writeSingle(ctx, path + "/globals", PCanonicalStruct.empty(required = true), bufferSpec, Array[Annotation](Row()))
val globalsSpec = TableSpecParameters(
FileFormat.version.rep,
is.hail.HAIL_PRETTY_VERSION,
"../references",
TableType(typ.globalType, FastIndexedSeq(), TStruct.empty),
Map("globals" -> RVDComponentSpec("globals"),
"rows" -> RVDComponentSpec("rows"),
"partition_counts" -> PartitionCountsComponentSpec(partitionCounts)))
globalsSpec.write(fs, path)
using(fs.create(path + "/_SUCCESS"))(out => ())
fileData.map(_.bytesWritten).sum
}
private def finalizeWrite(
ctx: ExecuteContext,
path: String,
bufferSpec: BufferSpec,
fileData: Array[FileWriteMetadata],
consoleInfo: Boolean
): Unit = {
val fs = ctx.fs
val globalsPath = path + "/globals"
fs.mkDir(globalsPath)
val globalBytesWritten = writeGlobals(ctx, globalsPath, bufferSpec)
val partitionCounts = fileData.map(_.rowsWritten)
val rowsSpec = TableSpecParameters(
FileFormat.version.rep,
is.hail.HAIL_PRETTY_VERSION,
"../references",
typ.rowsTableType,
Map("globals" -> RVDComponentSpec("../globals/rows"),
"rows" -> RVDComponentSpec("rows"),
"partition_counts" -> PartitionCountsComponentSpec(partitionCounts)))
rowsSpec.write(fs, path + "/rows")
using(fs.create(path + "/rows/_SUCCESS"))(out => ())
val entriesSpec = TableSpecParameters(
FileFormat.version.rep,
is.hail.HAIL_PRETTY_VERSION,
"../references",
TableType(entriesRVType, FastIndexedSeq(), typ.globalType),
Map("globals" -> RVDComponentSpec("../globals/rows"),
"rows" -> RVDComponentSpec("rows"),
"partition_counts" -> PartitionCountsComponentSpec(partitionCounts)))
entriesSpec.write(fs, path + "/entries")
using(fs.create(path + "/entries/_SUCCESS"))(out => ())
fs.mkDir(path + "/cols")
val colBytesWritten = writeCols(ctx, path + "/cols", bufferSpec)
val refPath = path + "/references"
fs.mkDir(refPath)
Array(typ.colType, typ.rowType, entryType, typ.globalType).foreach { t =>
ReferenceGenome.exportReferences(fs, refPath, t)
}
val spec = MatrixTableSpecParameters(
FileFormat.version.rep,
is.hail.HAIL_PRETTY_VERSION,
"references",
typ,
Map("globals" -> RVDComponentSpec("globals/rows"),
"cols" -> RVDComponentSpec("cols/rows"),
"rows" -> RVDComponentSpec("rows/rows"),
"entries" -> RVDComponentSpec("entries/rows"),
"partition_counts" -> PartitionCountsComponentSpec(partitionCounts)))
spec.write(fs, path)
writeNativeFileReadMe(fs, path)
using(fs.create(path + "/_SUCCESS"))(_ => ())
val nRows = partitionCounts.sum
val printer: String => Unit = if (consoleInfo) info else log.info
val partitionBytesWritten = fileData.map(_.bytesWritten)
val totalRowsEntriesBytes = partitionBytesWritten.sum
val totalBytesWritten: Long = totalRowsEntriesBytes + colBytesWritten + globalBytesWritten
val (smallestStr, largestStr) = if (fileData.isEmpty) ("N/A", "N/A") else {
val smallestPartition = fileData.minBy(_.bytesWritten)
val largestPartition = fileData.maxBy(_.bytesWritten)
val smallestStr = s"${ smallestPartition.rowsWritten } rows (${ formatSpace(smallestPartition.bytesWritten) })"
val largestStr = s"${ largestPartition.rowsWritten } rows (${ formatSpace(largestPartition.bytesWritten) })"
(smallestStr, largestStr)
}
printer(s"wrote matrix table with $nRows ${ plural(nRows, "row") } " +
s"and $nCols ${ plural(nCols, "column") } " +
s"in ${ partitionCounts.length } ${ plural(partitionCounts.length, "partition") } " +
s"to $path" +
s"\\n Total size: ${ formatSpace(totalBytesWritten) }" +
s"\\n * Rows/entries: ${ formatSpace(totalRowsEntriesBytes) }" +
s"\\n * Columns: ${ formatSpace(colBytesWritten) }" +
s"\\n * Globals: ${ formatSpace(globalBytesWritten) }" +
s"\\n * Smallest partition: $smallestStr" +
s"\\n * Largest partition: $largestStr")
}
def write(ctx: ExecuteContext,
path: String,
overwrite: Boolean,
stageLocally: Boolean,
codecSpecJSON: String,
partitions: String,
partitionsTypeStr: String,
checkpointFile: String): Unit = {
assert(typ.isCanonical)
val fs = ctx.fs
val bufferSpec = BufferSpec.parseOrDefault(codecSpecJSON)
if (overwrite) {
if (checkpointFile != null)
fatal(s"cannot currently use a checkpoint file with overwrite=True")
fs.delete(path, recursive = true)
} else if (fs.exists(path))
if (checkpointFile == null || fs.exists(path + "/_SUCCESS"))
fatal(s"file already exists: $path")
fs.mkDir(path)
val targetPartitioner =
if (partitions != null) {
if (checkpointFile != null)
fatal(s"cannot currently use a checkpoint file with `partitions` argument")
val partitionsType = IRParser.parseType(partitionsTypeStr)
val jv = JsonMethods.parse(partitions)
val rangeBounds = JSONAnnotationImpex.importAnnotation(jv, partitionsType)
.asInstanceOf[IndexedSeq[Interval]]
new RVDPartitioner(typ.rowKey.toArray, typ.rowKeyStruct, rangeBounds)
} else
null
val checkpoint = Option(checkpointFile).map(path => MatrixWriteCheckpoint.read(fs, path, path, rvd.getNumPartitions))
val fileData = rvd.writeRowsSplit(ctx, path, bufferSpec, stageLocally, targetPartitioner, checkpoint)
finalizeWrite(ctx, path, bufferSpec, fileData, consoleInfo = true)
}
def colsRVD(ctx: ExecuteContext): RVD = {
// only used in exportPlink
assert(typ.colKey.isEmpty)
val colPType = PType.canonical(typ.colType).setRequired(true).asInstanceOf[PStruct]
RVD.coerce(ctx,
typ.colsTableType.canonicalRVDType,
ContextRDD.parallelize(colValues.safeJavaValue)
.cmapPartitions { (ctx, it) => it.copyToRegion(ctx.region, colPType) })
}
def toRowMatrix(entryField: String): RowMatrix = {
val partCounts: Array[Long] = rvd.countPerPartition()
val partStarts = partCounts.scanLeft(0L)(_ + _)
assert(partStarts.length == rvd.getNumPartitions + 1)
val partStartsBc = HailContext.backend.broadcast(partStarts)
val localRvRowPType = rvRowPType
val localEntryArrayPType = entryArrayPType
val localEntryPType = entryPType
val fieldType = entryPType.field(entryField).typ
assert(fieldType.virtualType == TFloat64)
val localEntryArrayIdx = entriesIdx
val fieldIdx = entryType.fieldIdx(entryField)
val numColsLocal = nCols
val rows = rvd.mapPartitionsWithIndex { (pi, _, it) =>
var i = partStartsBc.value(pi)
it.map { ptr =>
val data = new Array[Double](numColsLocal)
val entryArrayOffset = localRvRowPType.loadField(ptr, localEntryArrayIdx)
var j = 0
while (j < numColsLocal) {
if (localEntryArrayPType.isElementDefined(entryArrayOffset, j)) {
val entryOffset = localEntryArrayPType.loadElement(entryArrayOffset, j)
if (localEntryPType.isFieldDefined(entryOffset, fieldIdx)) {
val fieldOffset = localEntryPType.loadField(entryOffset, fieldIdx)
data(j) = Region.loadDouble(fieldOffset)
} else
fatal(s"Cannot create RowMatrix: missing value at row $i and col $j")
} else
fatal(s"Cannot create RowMatrix: filtered entry at row $i and col $j")
j += 1
}
val row = (i, data)
i += 1
row
}
}
new RowMatrix(rows, nCols, Some(partStarts.last), Some(partCounts))
}
def typeCheck(): Unit = {
assert(typ.globalType.typeCheck(globals.value))
assert(TArray(typ.colType).typeCheck(colValues.value))
val localRVRowType = rvRowType
assert(rvd.toRows.forall(r => localRVRowType.typeCheck(r)))
}
def toTableValue: TableValue = tv
}
object MatrixValue {
def writeMultiple(
ctx: ExecuteContext,
mvs: IndexedSeq[MatrixValue],
paths: IndexedSeq[String],
overwrite: Boolean,
stageLocally: Boolean,
bufferSpec: BufferSpec
): Unit = {
val first = mvs.head
require(mvs.forall(_.typ == first.typ))
require(mvs.length == paths.length, s"found ${ mvs.length } matrix tables but ${ paths.length } paths")
val fs = ctx.fs
paths.foreach { path =>
if (overwrite)
fs.delete(path, recursive = true)
else if (fs.exists(path))
fatal(s"file already exists: $path")
fs.mkDir(path)
}
val fileData = RVD.writeRowsSplitFiles(ctx, mvs.map(_.rvd), paths, bufferSpec, stageLocally)
for ((mv, path, fd) <- (mvs, paths, fileData).zipped) {
mv.finalizeWrite(ctx, path, bufferSpec, fd, consoleInfo = false)
}
}
def apply(
ctx: ExecuteContext,
typ: MatrixType,
globals: Row,
colValues: IndexedSeq[Row],
rvd: RVD): MatrixValue = {
val globalsType = typ.globalType.appendKey(LowerMatrixIR.colsFieldName, TArray(typ.colType))
val globalsPType = PType.canonical(globalsType).asInstanceOf[PStruct]
val rvb = new RegionValueBuilder(ctx.r)
rvb.start(globalsPType)
rvb.startStruct()
typ.globalType.fields.foreach { f =>
rvb.addAnnotation(f.typ, globals.get(f.index))
}
rvb.addAnnotation(TArray(typ.colType), colValues)
MatrixValue(typ,
TableValue(ctx, TableType(
rowType = rvd.rowType,
key = typ.rowKey,
globalType = globalsType),
BroadcastRow(ctx, RegionValue(ctx.r, rvb.end()), globalsPType),
rvd))
}
}
| hail-is/hail | hail/src/main/scala/is/hail/expr/ir/MatrixValue.scala | Scala | mit | 14,426 |
package cbt
import java.io.File
import java.net.URL
import scala.collection.immutable.Seq
abstract class PublishBuild(context: Context) extends PackageBuild(context){
def name = artifactId
def description: String
def url: URL
def developers: Seq[Developer]
def licenses: Seq[License]
def scmUrl: String
def scmConnection: String
def pomExtra: Seq[scala.xml.Node] = Seq()
// ========== package ==========
/** put additional xml that should go into the POM file in here */
def pom: File = lib.pom(
groupId = groupId,
artifactId = artifactId,
version = version,
name = name,
description = description,
url = url,
developers = developers,
licenses = licenses,
scmUrl = scmUrl,
scmConnection = scmConnection,
dependencies = dependencies,
pomExtra = pomExtra,
jarTarget = jarTarget
)
// ========== publish ==========
final protected def releaseFolder = s"/${groupId.replace(".","/")}/$artifactId/$version/"
def snapshotUrl = new URL("https://oss.sonatype.org/content/repositories/snapshots")
def releaseUrl = new URL("https://oss.sonatype.org/service/local/staging/deploy/maven2")
def publishSnapshot: Unit = lib.publishSnapshot(sourceFiles, pom +: `package`, snapshotUrl ++ releaseFolder )
def publishSigned: Unit = lib.publishSigned(sourceFiles, pom +: `package`, releaseUrl ++ releaseFolder )
}
| tobias-johansson/cbt | stage2/PublishBuild.scala | Scala | bsd-2-clause | 1,384 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.nio.charset.StandardCharsets
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
class LiteralExpressionSuite extends SparkFunSuite with ExpressionEvalHelper {
test("null") {
checkEvaluation(Literal.create(null, BooleanType), null)
checkEvaluation(Literal.create(null, ByteType), null)
checkEvaluation(Literal.create(null, ShortType), null)
checkEvaluation(Literal.create(null, IntegerType), null)
checkEvaluation(Literal.create(null, LongType), null)
checkEvaluation(Literal.create(null, FloatType), null)
checkEvaluation(Literal.create(null, DoubleType), null)
checkEvaluation(Literal.create(null, StringType), null)
checkEvaluation(Literal.create(null, BinaryType), null)
checkEvaluation(Literal.create(null, DecimalType.USER_DEFAULT), null)
checkEvaluation(Literal.create(null, DateType), null)
checkEvaluation(Literal.create(null, TimestampType), null)
checkEvaluation(Literal.create(null, CalendarIntervalType), null)
checkEvaluation(Literal.create(null, ArrayType(ByteType, true)), null)
checkEvaluation(Literal.create(null, MapType(StringType, IntegerType)), null)
checkEvaluation(Literal.create(null, StructType(Seq.empty)), null)
}
test("default") {
checkEvaluation(Literal.default(BooleanType), false)
checkEvaluation(Literal.default(ByteType), 0.toByte)
checkEvaluation(Literal.default(ShortType), 0.toShort)
checkEvaluation(Literal.default(IntegerType), 0)
checkEvaluation(Literal.default(LongType), 0L)
checkEvaluation(Literal.default(FloatType), 0.0f)
checkEvaluation(Literal.default(DoubleType), 0.0)
checkEvaluation(Literal.default(StringType), "")
checkEvaluation(Literal.default(BinaryType), "".getBytes(StandardCharsets.UTF_8))
checkEvaluation(Literal.default(DecimalType.USER_DEFAULT), Decimal(0))
checkEvaluation(Literal.default(DecimalType.SYSTEM_DEFAULT), Decimal(0))
checkEvaluation(Literal.default(DateType), DateTimeUtils.toJavaDate(0))
checkEvaluation(Literal.default(TimestampType), DateTimeUtils.toJavaTimestamp(0L))
checkEvaluation(Literal.default(CalendarIntervalType), new CalendarInterval(0, 0L))
checkEvaluation(Literal.default(ArrayType(StringType)), Array())
checkEvaluation(Literal.default(MapType(IntegerType, StringType)), Map())
checkEvaluation(Literal.default(StructType(StructField("a", StringType) :: Nil)), Row(""))
}
test("boolean literals") {
checkEvaluation(Literal(true), true)
checkEvaluation(Literal(false), false)
}
test("int literals") {
List(0, 1, Int.MinValue, Int.MaxValue).foreach { d =>
checkEvaluation(Literal(d), d)
checkEvaluation(Literal(d.toLong), d.toLong)
checkEvaluation(Literal(d.toShort), d.toShort)
checkEvaluation(Literal(d.toByte), d.toByte)
}
checkEvaluation(Literal(Long.MinValue), Long.MinValue)
checkEvaluation(Literal(Long.MaxValue), Long.MaxValue)
}
test("double literals") {
List(0.0, -0.0, Double.NegativeInfinity, Double.PositiveInfinity).foreach { d =>
checkEvaluation(Literal(d), d)
checkEvaluation(Literal(d.toFloat), d.toFloat)
}
checkEvaluation(Literal(Double.MinValue), Double.MinValue)
checkEvaluation(Literal(Double.MaxValue), Double.MaxValue)
checkEvaluation(Literal(Float.MinValue), Float.MinValue)
checkEvaluation(Literal(Float.MaxValue), Float.MaxValue)
}
test("string literals") {
checkEvaluation(Literal(""), "")
checkEvaluation(Literal("test"), "test")
checkEvaluation(Literal("\\u0000"), "\\u0000")
}
test("sum two literals") {
checkEvaluation(Add(Literal(1), Literal(1)), 2)
}
test("binary literals") {
checkEvaluation(Literal.create(new Array[Byte](0), BinaryType), new Array[Byte](0))
checkEvaluation(Literal.create(new Array[Byte](2), BinaryType), new Array[Byte](2))
}
test("decimal") {
List(-0.0001, 0.0, 0.001, 1.2, 1.1111, 5).foreach { d =>
checkEvaluation(Literal(Decimal(d)), Decimal(d))
checkEvaluation(Literal(Decimal(d.toInt)), Decimal(d.toInt))
checkEvaluation(Literal(Decimal(d.toLong)), Decimal(d.toLong))
checkEvaluation(Literal(Decimal((d * 1000L).toLong, 10, 3)),
Decimal((d * 1000L).toLong, 10, 3))
checkEvaluation(Literal(BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal(new java.math.BigDecimal(d.toString)), Decimal(d))
}
}
// TODO(davies): add tests for ArrayType, MapType and StructType
}
| gioenn/xSpark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/LiteralExpressionSuite.scala | Scala | apache-2.0 | 5,493 |
package gv
package jleon4
trait PathPackage {
this: TypeClassPackage ⇒
trait Path[T] extends Any with TypeClass.WithTypeParams[T, Path.Ops]
object Path extends TypeClassCompanion[Path] {
trait Ops extends Any {
type Self <: JPath
def self: Self
final def addExt(ext: String): JPath = self resolveSibling s"${self.getFileName}.$ext"
}
final object Ops
}
}
| mouchtaris/jleon | src/main/scala-2.12/gv/jleon4/PathPackage.scala | Scala | mit | 402 |
/*start*/ <elem> content </elem> /*end*/;
//Elem | ilinum/intellij-scala | testdata/typeInference/xml/Element.scala | Scala | apache-2.0 | 48 |
/* **************************************************************************
* *
* Copyright (C) 2011-2012 Peter Kossek, Nils Foken, Christian Krause *
* *
* Peter Kossek <[email protected]> *
* Nils Foken <[email protected]> *
* Christian Krause <[email protected]> *
* *
****************************************************************************
* *
* This file is part of 'scalomator'. *
* *
* This project is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* any later version. *
* *
* This project is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this project. If not, see <http://www.gnu.org/licenses/>. *
* *
****************************************************************************/
package scalax.automata
import org.specs2._
class DSLSpec extends Specification { def is =
// -----------------------------------------------------------------------
// fragments
// -----------------------------------------------------------------------
"DSL Specification" ^
p^
""""Does Accept" example""" ! e1 ^
end
// -----------------------------------------------------------------------
// tests
// -----------------------------------------------------------------------
def e1 = {
val dfa = DFA(1, Set(1), Map(
1 -> 0 -> 2,
1 -> 1 -> 1,
2 -> 0 -> 1,
2 -> 1 -> 2
))
does the dfa accept (1,0,0,1) must_== true
}
}
| wookietreiber/scalomator | core/src/test/scala/DSLSpec.scala | Scala | gpl-3.0 | 2,965 |
/**
* Copyright 2017-2020 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package brave.play.module
import java.util.Collections
import akka.actor.CoordinatedShutdown
import brave.Tracing
import brave.play.{ZipkinTraceService, ZipkinTraceServiceLike}
import org.scalatest.AsyncFlatSpec
import play.api.inject.guice.GuiceApplicationBuilder
import zipkin2.reporter.Sender
import zipkin2.reporter.okhttp3.OkHttpSender
class ZipkinModuleSpec extends AsyncFlatSpec {
val injector = new GuiceApplicationBuilder()
.bindings(new ZipkinModule)
.injector()
it should "provide an okhttp sender" in {
val sender = injector.instanceOf[Sender]
assert(sender.isInstanceOf[OkHttpSender])
}
it should "eventually close the sender" in {
// provisioning the sender so we can tell if it is closed on shutdown
val sender = injector.instanceOf[Sender]
// stopping the application should close the sender!
injector.instanceOf[CoordinatedShutdown].run(CoordinatedShutdown.UnknownReason) map { _ =>
val thrown = intercept[Exception] {
sender.sendSpans(Collections.emptyList[Array[Byte]]).execute()
}
assert(thrown.getMessage === "closed")
}
}
it should "provide a tracing component" in instanceOfTracing { tracing =>
assert(Tracing.current() != null)
assert(Tracing.current() == tracing)
}
it should "eventually close the tracing component" in instanceOfTracing { tracing =>
// stopping the application should close the tracing component!
injector.instanceOf[CoordinatedShutdown].run(CoordinatedShutdown.UnknownReason) map { _ =>
assert(Tracing.current() == null)
}
}
private def instanceOfTracing[A](test: Tracing => A): A = {
val tracing = injector.instanceOf[Tracing]
try {
test(tracing)
} finally {
// Ensures there is no active Tracing object
tracing.close()
}
}
it should "provide a zipkin trace service" in {
// TODO: dies due to missing dispatcher
val service = injector.instanceOf[ZipkinTraceServiceLike]
assert(service.isInstanceOf[ZipkinTraceService])
}
}
| bizreach/play-zipkin-tracing | play/src/test/scala/brave/play/module/ZipkinModuleSpec.scala | Scala | apache-2.0 | 2,641 |
import sbt._
class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
val bankSimpleRepo = "BankSimple Repo" at "http://nexus.banksimple.com/content/groups/public"
val sbtIdeaRepo = "sbt-idea-repo" at "http://mpeltonen.github.com/maven/"
val sbtIdeaPlugin = "com.github.mpeltonen" % "sbt-idea-plugin" % "0.2.0"
val assemblySBT = "com.codahale" % "assembly-sbt" % "0.1"
val lessis = "less is repo" at "http://repo.lessis.me"
val ghIssues = "me.lessis" % "sbt-gh-issues" % "0.0.1"
}
| KirinDave/Clipping | project/plugins/plugins.scala | Scala | apache-2.0 | 507 |
package test.scala
import org.specs.Specification
import com.protose.resque._
import com.protose.resque.FancySeq._
object FancySeqSpec extends Specification {
"it joins seqs of strings together" in {
List("1", "2", "3").join must_== "123"
}
"it joins with a delimiter" in {
List("1", "2", "3").join(",") must_== "1,2,3"
}
}
// vim: set ts=4 sw=4 et:
| jamesgolick/scala-resque-worker | src/test/scala/FancySeqSpec.scala | Scala | mit | 384 |
package chrome.webRequest
import chrome.events._
import chrome.webRequest.bindings._
import scala.language.implicitConversions
import scala.scalajs.js
import scala.scalajs.js.UndefOr
trait WebRequestEventSource[T <: js.Function] {
def listen(callback: T,
filter: RequestFilter,
opt_extraInfoSpec: js.UndefOr[js.Array[String]] = js.undefined): Subscription
}
private[webRequest] class SubscriptionImpl[A <: js.Function](event: WebRequestEvent[A], fn: A) extends Subscription {
override def cancel(): Unit = event.removeListener(fn)
}
class WebRequestEventSourceImpl[T <: js.Function](event: WebRequestEvent[T])
extends WebRequestEventSource[T] {
override def listen(callback: T,
filter: RequestFilter,
opt_extraInfoSpec: UndefOr[js.Array[String]] = js.undefined): Subscription = {
event.addListener(callback, filter, opt_extraInfoSpec)
new SubscriptionImpl(event, callback)
}
}
object WebRequestEventSource {
implicit def eventAsEventSource[T <: js.Function](event: WebRequestEvent[T]): WebRequestEventSource[T] =
new WebRequestEventSourceImpl(event)
}
| lucidd/scala-js-chrome | bindings/src/main/scala/chrome/webRequest/BlockingRequestEventSource.scala | Scala | mit | 1,155 |
package uima.ae
import geography.GeographyExtractorForQuestion
import jeqa.types._
import org.apache.uima.UimaContext
import org.apache.uima.analysis_component.JCasAnnotator_ImplBase
import org.apache.uima.analysis_engine.AnalysisEngineProcessException
import org.apache.uima.cas.FSIterator
import org.apache.uima.jcas.JCas
import org.apache.uima.jcas.cas.FSArray
import org.apache.uima.resource.ResourceInitializationException
import question.{QueryGenerator, QuestionFocusAnalyzer}
import text.StringOption
import time.{TimeExtractorForQuestion, TimeTmp}
import util.uima.FSListUtils._
import util.uima.JCasUtils
import util.uima.SeqStringUtils._
import util.uima.SeqUtils._
/**
* <p>
* ExamをQuestionに分解して、それぞれのQuestion Format Typeを推定する。
* もし、Question Format Typeが論述なら
* <ul>
* <li>検索する知識源の種類</li>
* <li>必要な記述を検索するためのクエリ</li>
* <li>時間・地域など検索結果を絞り込む条件</li>
* <li>解候補の適切性を計測するために使用する情報</li>
* </ul>
* を抽出・生成・推定する。
* </p>
* @author K.Sakamoto
* Created on 15/10/30
*/
class QuestionAnalyzer extends JCasAnnotator_ImplBase with DocumentAnnotator {
@throws[ResourceInitializationException]
override def initialize(aContext: UimaContext): Unit = {
println(">> Question Analyzer Initializing")
super.initialize(aContext)
}
@throws[AnalysisEngineProcessException]
override def process(aJCas: JCas): Unit = {
println(">> Question Analyzer Processing")
JCasUtils.setAJCasOpt(Option(aJCas))
@SuppressWarnings(Array[String]("rawtypes"))
val itExam: FSIterator[Nothing] = aJCas.getAnnotationIndex(Exam.`type`).iterator(true)
while (itExam.hasNext) {
val exam: Exam = itExam.next
val questionSet: FSArray = exam.getQuestionSet
for (i <- 0 until questionSet.size) {
val question: Question = questionSet.get(i).asInstanceOf[Question]
val document: Document = question.getDocument
if (StringOption(document.getText).nonEmpty) {
annotate(aJCas, document, Nil)
document.setTitle(question.getLabel)
question.setDocument(document)
val sentenceList: Seq[Sentence] = document.getSentenceSet.toSeq.asInstanceOf[Seq[Sentence]]
//文脈的な時間解析
val sentenceArray: Array[Sentence] = sentenceList.toArray
for (j <- 1 until sentenceArray.length) {
val sentence: Sentence = sentenceArray(j)
if (Option(sentence.getBeginTime).isEmpty && Option(sentence.getEndTime).isEmpty) {
val previousSentence: Sentence = sentenceArray(j - 1)
sentence.setBeginTime(previousSentence.getBeginTime)
sentence.setEndTime(previousSentence.getEndTime)
}
}
//begin {time limit analysis}
val timeLimit: TimeTmp = TimeExtractorForQuestion.extract(sentenceList)
timeLimit.beginTime match {
case Some(beginTime) =>
val beginTimeLimit: Time = new Time(aJCas)
beginTimeLimit.addToIndexes()
beginTimeLimit.setYear(beginTime)
beginTimeLimit.setTextList(timeLimit.beginTimeTextList.toStringList)
question.setBeginTimeLimit(beginTimeLimit)
case None =>
// Do nothing
}
timeLimit.endTime match {
case Some(endTime) =>
val endTimeLimit: Time = new Time(aJCas)
endTimeLimit.addToIndexes()
endTimeLimit.setYear(endTime)
endTimeLimit.setTextList(timeLimit.endTimeTextList.toStringList)
question.setEndTimeLimit(endTimeLimit)
case None =>
// Do nothing
}
//end {time limit analysis}
//begin {geography limit}
val (areaList, termList): (Seq[String], Seq[String]) = GeographyExtractorForQuestion.extract(sentenceList)
val geographyLimit: Geography = new Geography(aJCas)
geographyLimit.addToIndexes()
geographyLimit.setTermList(termList.toStringList)
geographyLimit.setArea(areaList.toStringList)
question.setGeographyLimit(geographyLimit)
//end {geography limit}
//begin {question focus}
question.setQuestionFocusSet(QuestionFocusAnalyzer.analyze(sentenceList).toStringList)
//end {question focus}
//begin {question format type}
question.setQuestionFormatType(
if (question.getKeywordSet.toSeq.nonEmpty) {
"essayWithKeywords"
} else {
"essayWithoutKeyword"
}
)
//end {question format type}
//begin {answer format type}
question.setAnswerFormatType("essay")
//end {answer format type}
//begin {lexical answer type}
//question.setLexicalAnswerTypeSet(document.getContentWordList)
//end {lexical answer type}
//begin {semantic answer type}
//question.setSemanticAnswerTypeSet(document.getContentWordList)
//end {semantic answer type}
//begin {query}
question.setQuerySet(QueryGenerator.generate(aJCas, question).toFSArray)
//end {query}
}
}
}
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/uima/ae/QuestionAnalyzer.scala | Scala | apache-2.0 | 5,450 |
package com.splicemachine.spark.splicemachine
import org.apache.spark.sql.SparkSession
object ThisVersionSpecificItems {
def beforeAll(spark: SparkSession): Unit = {}
val schema = SparkVersionSpecificItems.schemaWithoutMetadata
}
| splicemachine/spliceengine | splice_spark/src/test/spark2.4/com/splicemachine/spark/splicemachine/ThisVersionSpecificItems.scala | Scala | agpl-3.0 | 238 |
package com.twitter.scalding.spark_backend
import com.stripe.dagon.{FunctionK, Memoize}
import com.twitter.algebird.Semigroup
import com.twitter.scalding.Config
import com.twitter.scalding.typed._
import com.twitter.scalding.typed.functions.{DebugFn, FilterKeysToFilter}
import java.util.{LinkedHashMap => JLinkedHashMap, Map => JMap}
import org.apache.spark.storage.StorageLevel
import scala.collection.mutable.{ArrayBuffer, Map => MMap}
object SparkPlanner {
import SparkMode.SparkConfigMethods
sealed trait PartitionComputer {
def apply(currentNumPartitions: Int): Int
}
final case object IdentityPartitionComputer extends PartitionComputer {
def apply(currentNumPartitions: Int): Int = currentNumPartitions
}
/**
* A PartitionComputer which returns the desired number of partitions given the configured max partition
* count, reducer scaling factor, number of scalding reducers, and current number of partitions. We
* calculate the desired number of partitions in two stages:
* 1. If the number of scalding reducers is provided, we scale this number by the reducer scaling factor.
* If it is <= 0 or missing, we use the current number of partitions. 2. If we have a configured a max
* number of partitions, we cap the result of 1 by this number. Otherwise, just return the result of 1.
*/
final case class ConfigPartitionComputer(config: Config, scaldingReducers: Option[Int])
extends PartitionComputer {
def apply(currentNumPartitions: Int): Int = {
val maxPartitions = config.getMaxPartitionCount
val getReducerScaling = config.getReducerScaling.getOrElse(1.0d)
val candidate = scaldingReducers match {
case None =>
currentNumPartitions
case Some(i) if i <= 0 =>
// scaldingReducers should be > 0, otherwise we default to the current number of partitions
currentNumPartitions
case Some(red) =>
(getReducerScaling * red).toInt
}
if (candidate > 0) {
maxPartitions match {
case Some(maxP) =>
Math.min(maxP, candidate)
case None =>
candidate
}
} else if (candidate == 0) {
// we probably always want at least 1 partition
1
} else {
throw new IllegalArgumentException(
"Got a negative partition count. Check configured maxPartitionCount or reducerScaling."
)
}
}
}
/**
* Convert a TypedPipe to an RDD
*/
def plan(config: Config, srcs: Resolver[TypedSource, SparkSource]): FunctionK[TypedPipe, Op] =
Memoize.functionK(new Memoize.RecursiveK[TypedPipe, Op] {
import TypedPipe._
def toFunction[A] = {
case (cp @ CounterPipe(_), rec) =>
// TODO: counters not yet supported
def go[A](p: CounterPipe[A]): Op[A] =
rec(p.pipe).map(_._1)
go(cp)
case (cp @ CrossPipe(_, _), rec) =>
def go[A, B](cp: CrossPipe[A, B]): Op[(A, B)] =
rec(cp.viaHashJoin)
go(cp)
case (CrossValue(left, EmptyValue), rec) => rec(EmptyTypedPipe)
case (CrossValue(left, LiteralValue(v)), rec) =>
val op = rec(left) // linter:disable:UndesirableTypeInference
op.map((_, v))
case (CrossValue(left, ComputedValue(right)), rec) =>
rec(CrossPipe(left, right))
case (p: DebugPipe[a], rec) =>
// There is really little that can be done here but println
rec[a](p.input.map(DebugFn()))
case (EmptyTypedPipe, rec) =>
Op.Empty
case (fk @ FilterKeys(_, _), rec) =>
def go[K, V](node: FilterKeys[K, V]): Op[(K, V)] = {
val FilterKeys(pipe, fn) = node
rec(pipe).filter(FilterKeysToFilter(fn))
}
go(fk)
case (f @ Filter(_, _), rec) =>
def go[T](f: Filter[T]): Op[T] = {
val Filter(p, fn) = f
rec[T](p).filter(fn)
}
go(f)
case (f @ FlatMapValues(_, _), rec) =>
def go[K, V, U](node: FlatMapValues[K, V, U]) = {
val fn = node.fn
rec(node.input).flatMapValues(fn)
}
go(f)
case (FlatMapped(prev, fn), rec) =>
val op = rec(prev) // linter:disable:UndesirableTypeInference
op.concatMap(fn)
case (ForceToDisk(pipe), rec) =>
val sparkPipe = rec(pipe)
config.getForceToDiskPersistMode.getOrElse(StorageLevel.DISK_ONLY) match {
case StorageLevel.NONE => sparkPipe
case notNone => sparkPipe.persist(notNone)
}
case (Fork(pipe), rec) =>
val sparkPipe = rec(pipe)
// just let spark do it's default thing on Forks.
// unfortunately, that may mean recomputing the upstream
// multiple times, so users may want to override this,
// or be careful about using forceToDisk
config.getForkPersistMode.getOrElse(StorageLevel.NONE) match {
case StorageLevel.NONE => sparkPipe
case notNone => sparkPipe.persist(notNone)
}
case (IterablePipe(iterable), _) =>
Op.FromIterable(iterable)
case (f @ MapValues(_, _), rec) =>
def go[K, V, U](node: MapValues[K, V, U]): Op[(K, U)] =
rec(node.input).mapValues(node.fn)
go(f)
case (Mapped(input, fn), rec) =>
val op = rec(input) // linter:disable:UndesirableTypeInference
op.map(fn)
case (m @ MergedTypedPipe(_, _), rec) =>
// Spark can handle merging several inputs at once,
// but won't otherwise optimize if not given in
// one batch
OptimizationRules.unrollMerge(m) match {
case Nil => rec(EmptyTypedPipe)
case h :: Nil => rec(h)
case h :: rest =>
val pc = ConfigPartitionComputer(config, None)
Op.Merged(pc, rec(h), rest.map(rec(_)))
}
case (SourcePipe(src), _) =>
Op.Source(config, src, srcs(src))
case (slk @ SumByLocalKeys(_, _), rec) =>
def sum[K, V](sblk: SumByLocalKeys[K, V]): Op[(K, V)] = {
// we can use Algebird's SummingCache https://github.com/twitter/algebird/blob/develop/algebird-core/src/main/scala/com/twitter/algebird/SummingCache.scala#L36
// plus mapPartitions to implement this
val SumByLocalKeys(p, sg) = sblk
// TODO set a default in a better place
val defaultCapacity = 10000
val capacity = config.getMapSideAggregationThreshold.getOrElse(defaultCapacity)
rec(p).mapPartitions(CachingSum(capacity, sg))
}
sum(slk)
case (tp: TrappedPipe[a], rec) => rec[a](tp.input)
// this can be interpretted as catching any exception
// on the map-phase until the next partition, so it can
// be made to work by changing Op to return all
// the values that fail on error
case (wd: WithDescriptionTypedPipe[a], rec) =>
// TODO we could optionally print out the descriptions
// after the future completes
rec[a](wd.input)
case (woc: WithOnComplete[a], rec) =>
// TODO
rec[a](woc.input)
case (hcg @ HashCoGroup(_, _, _), rec) =>
def go[K, V1, V2, W](hcg: HashCoGroup[K, V1, V2, W]): Op[(K, W)] = {
val leftOp = rec(hcg.left)
val rightOp = rec(ReduceStepPipe(HashJoinable.toReduceStep(hcg.right)))
leftOp.hashJoin(rightOp)(hcg.joiner)
}
go(hcg)
case (CoGroupedPipe(cg), rec) =>
planCoGroup(config, cg, rec)
case (ReduceStepPipe(ir @ IdentityReduce(_, _, _, descriptions, _)), rec) =>
def go[K, V1, V2](ir: IdentityReduce[K, V1, V2]): Op[(K, V2)] = {
type OpT[V] = Op[(K, V)]
val op = rec(ir.mapped)
ir.evidence.subst[OpT](op)
}
go(ir)
case (ReduceStepPipe(uir @ UnsortedIdentityReduce(_, _, _, descriptions, _)), rec) =>
def go[K, V1, V2](uir: UnsortedIdentityReduce[K, V1, V2]): Op[(K, V2)] = {
type OpT[V] = Op[(K, V)]
val op = rec(uir.mapped)
uir.evidence.subst[OpT](op)
}
go(uir)
case (ReduceStepPipe(ivsr @ IdentityValueSortedReduce(_, _, _, _, _, _)), rec) =>
def go[K, V1, V2](uir: IdentityValueSortedReduce[K, V1, V2]): Op[(K, V2)] = {
type OpT[V] = Op[(K, V)]
val op = rec(uir.mapped)
val pc = ConfigPartitionComputer(config, uir.reducers)
val sortedOp = op.sorted(pc)(uir.keyOrdering, uir.valueSort)
uir.evidence.subst[OpT](sortedOp)
}
go(ivsr)
case (ReduceStepPipe(ValueSortedReduce(ordK, pipe, ordV, fn, red, _)), rec) =>
val op = rec(pipe)
val pc = ConfigPartitionComputer(config, red)
op.sortedMapGroup(pc)(fn)(ordK, ordV)
case (ReduceStepPipe(IteratorMappedReduce(ordK, pipe, fn, red, _)), rec) =>
val op = rec(pipe)
val pc = ConfigPartitionComputer(config, red)
op.mapGroup(pc)(fn)(ordK)
}
})
case class OnEmptyIterator[A](it: Iterator[A], fn: () => Unit) extends Iterator[A] {
var fnMut: () => Unit = fn
def hasNext = it.hasNext || {
if (fnMut != null) { fnMut(); fnMut = null }
false
}
def next = it.next
}
case class CachingSum[K, V](capacity: Int, semigroup: Semigroup[V])
extends Function1[Iterator[(K, V)], Iterator[(K, V)]] {
def newCache(evicted: MMap[K, V]): JMap[K, V] = new JLinkedHashMap[K, V](capacity + 1, 0.75f, true) {
override protected def removeEldestEntry(eldest: JMap.Entry[K, V]) =
if (super.size > capacity) {
evicted.put(eldest.getKey, eldest.getValue)
true
} else {
false
}
}
def apply(kvs: Iterator[(K, V)]) = {
val evicted = MMap.empty[K, V]
val currentCache = newCache(evicted)
new Iterator[(K, V)] {
var resultIterator: Iterator[(K, V)] = Iterator.empty
def hasNext = kvs.hasNext || resultIterator.hasNext
@annotation.tailrec
def next: (K, V) =
if (resultIterator.hasNext) {
resultIterator.next
} else if (kvs.hasNext) {
val (k, deltav) = kvs.next
val vold = currentCache.get(k)
if (vold == null) {
currentCache.put(k, deltav)
} else {
currentCache.put(k, semigroup.plus(vold, deltav))
}
// let's see if we have anything to evict
if (evicted.nonEmpty) {
resultIterator = OnEmptyIterator(evicted.iterator, () => evicted.clear())
}
next
} else if (currentCache.isEmpty) {
throw new java.util.NoSuchElementException("next called on empty CachingSum Iterator")
} else {
// time to flush the cache
import scala.collection.JavaConverters._
val cacheIter = currentCache.entrySet.iterator.asScala.map(e => (e.getKey, e.getValue))
resultIterator = OnEmptyIterator(cacheIter, () => currentCache.clear())
next
}
}
}
}
private def planHashJoinable[K, V](hj: HashJoinable[K, V], rec: FunctionK[TypedPipe, Op]): Op[(K, V)] =
rec(TypedPipe.ReduceStepPipe(HashJoinable.toReduceStep(hj)))
private def planCoGroup[K, V](
config: Config,
cg: CoGrouped[K, V],
rec: FunctionK[TypedPipe, Op]
): Op[(K, V)] = {
import CoGrouped._
cg match {
case FilterKeys(cg, fn) =>
planCoGroup(config, cg, rec).filter { case (k, _) => fn(k) }
case MapGroup(cg, fn) =>
// don't need to repartition, just mapPartitions
// we know this because cg MUST be a join, there is no non-joined CoGrouped
// so we know the output op planCoGroup(config, cg, rec) is already partitioned
planCoGroup(config, cg, rec)
.mapPartitions { its =>
val grouped = Iterators.groupSequential(its)
grouped.flatMap { case (k, vs) => fn(k, vs).map((k, _)) }
}
case pair @ Pair(_, _, _) =>
// ideally, we do one partitioning of all the data,
// but for now just do the naive thing:
def planSide[A, B](cg: CoGroupable[A, B]): Op[(A, B)] =
cg match {
case hg: HashJoinable[A, B] => planHashJoinable(hg, rec)
case cg: CoGrouped[A, B] => planCoGroup(config, cg, rec)
}
def planPair[A, B, C, D](p: Pair[A, B, C, D]): Op[(A, D)] = {
val eleft: Op[(A, Either[B, C])] = planSide(p.larger).map { case (k, v) => (k, Left(v)) }
val eright: Op[(A, Either[B, C])] = planSide(p.smaller).map { case (k, v) => (k, Right(v)) }
val joinFn = p.fn
val pc = ConfigPartitionComputer(config, p.reducers)
// we repartition in sorted, so no need to repartition in merge
(eleft
.merge(IdentityPartitionComputer, eright))
.sorted(pc)(p.keyOrdering, JoinOrdering())
.mapPartitions { it =>
val grouped = Iterators.groupSequential(it)
grouped.flatMap { case (k, eithers) =>
val kfn: Function2[Iterator[B], Iterable[C], Iterator[D]] = joinFn(k, _, _)
JoinIterator[B, C, D](kfn)(eithers).map((k, _))
}
}
}
planPair(pair)
case WithDescription(cg, d) =>
// TODO handle descriptions in some way
planCoGroup(config, cg, rec)
case WithReducers(cg, r) =>
// TODO handle the number of reducers, maybe by setting the number of partitions
planCoGroup(config, cg, rec)
}
}
// Put the rights first
case class JoinOrdering[A, B]() extends Ordering[Either[A, B]] {
def compare(left: Either[A, B], right: Either[A, B]): Int =
if (left.isLeft && right.isRight) 1
else if (left.isRight && right.isLeft) -1
else 0
}
case class JoinIterator[A, B, C](fn: (Iterator[A], Iterable[B]) => Iterator[C])
extends Function1[Iterator[Either[A, B]], Iterator[C]] {
@SuppressWarnings(Array("org.wartremover.warts.EitherProjectionPartial"))
def apply(eitherIter: Iterator[Either[A, B]]) = {
val buffered = eitherIter.buffered
val bs: Iterable[B] = {
@annotation.tailrec
def loop(buf: ArrayBuffer[B]): Iterable[B] =
if (buffered.isEmpty) buf
else if (buffered.head.isLeft) buf
else {
buf += buffered.next.right.get
loop(buf)
}
loop(ArrayBuffer())
}
val iterA: Iterator[A] = buffered.map(_.left.get)
fn(iterA, bs)
}
}
}
| twitter/scalding | scalding-spark/src/main/scala/com/twitter/scalding/spark_backend/SparkBackend.scala | Scala | apache-2.0 | 14,909 |
package com.kubukoz.adventofcode2016
import com.kubukoz.adventofcode2016.Day7._
import org.scalatest.{FlatSpec, Matchers}
class Day7Tests extends FlatSpec with Matchers {
"supportsTLS" should "work properly for case 1" in {
supportsTLS("abba[mnop]qrst") shouldBe true
}
it should "work for case 2" in {
supportsTLS("abcd[bddb]xyyx") shouldBe false
}
it should "work for case 3" in {
supportsTLS("aaaa[qwer]tyui") shouldBe false
}
it should "work for case 4" in {
supportsTLS("ioxxoj[asdfgh]zxcvbn") shouldBe true
}
it should "work for case 5" in {
supportsTLS("asdf[dupa]oxxo") shouldBe true
}
it should "work for case 6" in {
supportsTLS("asdf[dupa]oxxo[yolo]swag") shouldBe true
}
it should "work for case 7" in {
supportsTLS("asdf[ollo]oxxo[yolo]swag") shouldBe false
}
it should "count the input" in {
input.count(supportsTLS) shouldBe 118
}
/*
"supportsSSL" should "work properly for case 1" in {
supportsSSL("aba[bab]xyz") shouldBe true
}
it should "work for case 2" in {
supportsSSL("xyx[xyx]xyx") shouldBe false
}
it should "work for case 3" in {
supportsSSL("aaa[kek]eke") shouldBe true
}
it should "work for case 4" in {
supportsSSL("zazbz[bzb]cdb") shouldBe true
}
it should "work for my case" in {
supportsSSL("asdf[babXaba]dupa") shouldBe false
}
it should "count the input" in {
input.count(supportsSSL) shouldBe 260
}
*/
it should "work for the weird case" in {
supportsSSL("xtugntiubziynpzbju[onxffxfoxibzzzd]wineojjetzitpemflx[jlncrpyrujpoxluwyc]fxvfnhyqsiwndzoh[lkwwatmiesspwcqulnc]cbimtxmazbbzlvjf") shouldBe false
}
}
| kubukoz/advent-of-code-2016 | src/test/scala/com/kubukoz/adventofcode2016/Day7Tests.scala | Scala | apache-2.0 | 1,674 |
/*
* Copyright 2009-2015 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.mrgeo.spark
import java.io.{ObjectInput, ObjectOutput, Externalizable}
import org.apache.hadoop.conf.Configuration
import org.mrgeo.spark.job.{JobArguments, MrGeoDriver}
import scala.collection.mutable
object SlopeDriver extends MrGeoDriver with Externalizable {
def slope(input:String, units:String, output:String, conf:Configuration) = {
val args = mutable.Map[String, String]()
val name = "Slope (" + input + ")"
args += SlopeAspectDriver.Input -> input
args += SlopeAspectDriver.Units -> units
args += SlopeAspectDriver.Output -> output
args += SlopeAspectDriver.Type -> SlopeAspectDriver.Slope
run(name, classOf[SlopeAspectDriver].getName, args.toMap, conf)
}
override def writeExternal(out: ObjectOutput): Unit = {}
override def readExternal(in: ObjectInput): Unit = {}
override def setup(job: JobArguments): Boolean = {
true
}
}
| bradh/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-terrain/src/main/scala/org/mrgeo/spark/SlopeDriver.scala | Scala | apache-2.0 | 1,504 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import kafka.common.KafkaException
/**
* ResourceTypes.
*/
sealed trait ResourceType {
def name: String
}
case object Cluster extends ResourceType {
val name = "Cluster"
}
case object Topic extends ResourceType {
val name = "Topic"
}
case object ConsumerGroup extends ResourceType {
val name = "ConsumerGroup"
}
object ResourceType {
def fromString(resourceType: String): ResourceType = {
val rType = values.find(rType => rType.name.equalsIgnoreCase(resourceType))
rType.getOrElse(throw new KafkaException(resourceType + " not a valid resourceType name. The valid names are " + values.mkString(",")))
}
def values: Seq[ResourceType] = List(Cluster, Topic, ConsumerGroup)
} | usakey/kafka | core/src/main/scala/kafka/security/auth/ResourceType.scala | Scala | apache-2.0 | 1,544 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.ops
import com.intel.analytics.bigdl.dllib.nn.CMulTable
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
import org.scalatest.{FlatSpec, Matchers}
class MultiplySpec extends FlatSpec with Matchers {
"Multiply operation" should "works correctly" in {
import com.intel.analytics.bigdl.numeric.NumericFloat
val input =
T(
Tensor(T(1f, 2f, 3f)),
Tensor(T(2f, 2f, 4f)),
Tensor(T(7f, 3f, 1f))
)
val expectOutput = Tensor(T(14f, 12f, 12f))
val output = CMulTable().forward(input)
output should be(expectOutput)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ops/MultplySpec.scala | Scala | apache-2.0 | 1,268 |
package command
case class SmsJob() extends Job {
private var id: Int = 0
private var _sms: Some[Sms] = _
def setSms(sms: Sms): Unit = _sms = Some(sms); id = id + 1
override def run: Unit = {
println(s"Job ID: $id executing sms jobs.")
_sms.foreach(_.sendSms())
}
}
| BBK-PiJ-2015-67/sdp-portfolio | exercises/week10/src/main/scala/command/SmsJob.scala | Scala | unlicense | 287 |
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.spark.wrappers.estimators
import scala.reflect.runtime.universe._
import org.apache.spark.ml.classification.{GBTClassificationModel => SparkGBTClassificationModel, GBTClassifier => SparkGBTClassifier}
import io.deepsense.commons.utils.Logging
import io.deepsense.deeplang.TypeUtils
import io.deepsense.deeplang.doperables.SparkEstimatorWrapper
import io.deepsense.deeplang.doperables.spark.wrappers.models.{GBTClassificationModel, VanillaGBTClassificationModel}
import io.deepsense.deeplang.doperables.spark.wrappers.params.GBTParams
import io.deepsense.deeplang.doperables.spark.wrappers.params.common.HasClassificationImpurityParam
import io.deepsense.deeplang.doperables.stringindexingwrapper.StringIndexingEstimatorWrapper
import io.deepsense.deeplang.params.Param
import io.deepsense.deeplang.params.choice.Choice
import io.deepsense.deeplang.params.wrappers.spark.ChoiceParamWrapper
class GBTClassifier private (val vanillaGBTClassifier: VanillaGBTClassifier)
extends StringIndexingEstimatorWrapper[
SparkGBTClassificationModel,
SparkGBTClassifier,
VanillaGBTClassificationModel,
GBTClassificationModel](vanillaGBTClassifier) {
def this() = this(new VanillaGBTClassifier())
}
class VanillaGBTClassifier()
extends SparkEstimatorWrapper[
SparkGBTClassificationModel,
SparkGBTClassifier,
VanillaGBTClassificationModel]
with GBTParams
with HasClassificationImpurityParam
with Logging {
import GBTClassifier._
override lazy val maxIterationsDefault = 10.0
val estimator = TypeUtils.instanceOfType(typeTag[SparkGBTClassifier])
val lossType = new ChoiceParamWrapper[SparkGBTClassifier, LossType](
name = "loss function",
description = Some("The loss function which GBT tries to minimize."),
sparkParamGetter = _.lossType)
setDefault(lossType, Logistic())
override val params: Array[Param[_]] = Array(
impurity,
lossType,
maxBins,
maxDepth,
maxIterations,
minInfoGain,
minInstancesPerNode,
seed,
stepSize,
subsamplingRate,
labelColumn,
featuresColumn,
predictionColumn)
override protected def estimatorName: String = classOf[GBTClassifier].getSimpleName
}
object GBTClassifier {
sealed abstract class LossType(override val name: String) extends Choice {
override val params: Array[Param[_]] = Array()
override val choiceOrder: List[Class[_ <: Choice]] = List(
classOf[Logistic]
)
}
case class Logistic() extends LossType("logistic")
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperables/spark/wrappers/estimators/GBTClassifier.scala | Scala | apache-2.0 | 3,132 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon.lang
import leon.annotation._
object Set {
@library
def empty[T] = Set[T]()
@ignore
def apply[T](elems: T*) = {
new Set[T](scala.collection.immutable.Set[T](elems : _*))
}
@extern @library
def mkString[A](map: Set[A], infix: String, fA : A => String) = {
map.theSet.map(fA).toList.sorted.mkString(infix)
}
}
@ignore
case class Set[T](val theSet: scala.collection.immutable.Set[T]) {
def +(a: T): Set[T] = new Set[T](theSet + a)
def ++(a: Set[T]): Set[T] = new Set[T](theSet ++ a.theSet)
def -(a: T): Set[T] = new Set[T](theSet - a)
def --(a: Set[T]): Set[T] = new Set[T](theSet -- a.theSet)
def size: BigInt = theSet.size
def contains(a: T): Boolean = theSet.contains(a)
def isEmpty: Boolean = theSet.isEmpty
def subsetOf(b: Set[T]): Boolean = theSet.subsetOf(b.theSet)
def &(a: Set[T]): Set[T] = new Set[T](theSet & a.theSet)
}
| epfl-lara/leon | library/leon/lang/Set.scala | Scala | gpl-3.0 | 933 |
import scala.tools.nsc.doc.model._
import scala.tools.partest.ScaladocModelTest
import language._
object Test extends ScaladocModelTest {
override def resourceFile = "negative-defaults.scala"
override def scaladocSettings = ""
def testModel(root: Package) = {
import access._
val pkg = root._package("test")
val intparam = pkg._object("Test")._method("int").valueParams.head.head
val longparam = pkg._object("Test")._method("long").valueParams.head.head
val floatparam = pkg._object("Test")._method("float").valueParams.head.head
val doubleparam = pkg._object("Test")._method("double").valueParams.head.head
val spacesparam = pkg._object("Test")._method("spaces").valueParams.head.head
println(intparam.defaultValue)
println(longparam.defaultValue)
println(floatparam.defaultValue)
println(doubleparam.defaultValue)
println(spacesparam.defaultValue)
}
}
| lrytz/scala | test/scaladoc/run/t10391.scala | Scala | apache-2.0 | 919 |
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
* This file incorporates work covered by the following copyright and permission notice:
*
* Copyright 2012 silenteh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import com.fasterxml.jackson.annotation.{JsonIgnoreProperties, JsonProperty}
import configs.ConfigService
import org.slf4j.LoggerFactory
import records.A
import utils.HostnameUtils
import scala.util.Random
@JsonIgnoreProperties(Array("typ"))
case class AddressHost(
@JsonProperty("class") cls: String = null,
@JsonProperty("name") name: String = null,
@JsonProperty("value") ips: Array[WeightedIP] = null,
@JsonProperty("ttl") timetolive: Long
) extends Host("A") {
val logger = LoggerFactory.getLogger("app")
val randomizeRecords = ConfigService.config.getBoolean("randomizeRecords")
def setName(newname: String) = AddressHost(cls, newname, ips, timetolive)
override def equals(other: Any) = other match {
case h: AddressHost => cls == h.cls && name == h.name && h.ips.forall(wip => ips.exists(_.ip == wip.ip))
case _ => false
}
override def toAbsoluteNames(domain: ExtendedDomain) =
new AddressHost(cls, HostnameUtils.absoluteHostName(name, domain.fullName), ips, timetolive)
private def ipToLong(ip: String) = ip.split( """\\.""").reverse.foldRight(0L) { case (part, total) => (total << 8) + part.toLong}
protected def getRData =
if (ips.size == 1) {
logger.debug("Single A")
ips(0).weightIP.map(ip => new A(ipToLong(ip), timetolive))
}
else if (randomizeRecords == true) {
/**
Se c'è un array di weighted ip (SBAGLIATO fare più record con lo stesso nome e weight diverso, basta un record
con più values weighted) scegline uno a caso.
*/
logger.debug("Collapsing duplicate weighted As")
val list = ips.map(wip => wip.weightIP.map(ip => new A(ipToLong(ip), timetolive))).flatten.toList
Array[A](Random.shuffle(list).head)
}
else ips.map(wip => wip.weightIP.map(ip => new A(ipToLong(ip), timetolive))).flatten
}
case class WeightedIP(
@JsonProperty("weight") weight: Int = 1,
@JsonProperty("ip") ip: String = null
) {
def weightIP =
if (weight < 1) Array[String]() else Array.tabulate(weight) { i => ip}
} | Moydco/AlwaysResolveDNS | src/main/scala/models/AddressHost.scala | Scala | apache-2.0 | 2,824 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.transactor
import forms.DeclarationCapacityForm
import forms.DeclarationCapacityForm._
import org.jsoup.Jsoup
import views.VatRegViewSpec
import views.html.transactor.DeclarationCapacityView
class DeclarationCapacityViewSpec extends VatRegViewSpec {
val form = DeclarationCapacityForm()
val view = app.injector.instanceOf[DeclarationCapacityView]
implicit val doc = Jsoup.parse(view(form).body)
object ExpectedContent {
val heading = "What is your role with the business you are registering for VAT?"
val title = s"$heading - Register for VAT - GOV.UK"
val radio1 = "Accountant"
val radio2 = "Appointed representative"
val radio3 = "Board member"
val radio4 = "Employee authorised to complete the application"
val radio5 = "Other"
val continue = "Save and continue"
}
"The SellOrMoveNip view" must {
"have a back link" in new ViewSetup {
doc.hasBackLink mustBe true
}
"have the correct heading" in new ViewSetup {
doc.heading mustBe Some(ExpectedContent.heading)
}
"have the correct title" in new ViewSetup {
doc.title() mustBe ExpectedContent.title
}
"have the correct radios" in new ViewSetup {
doc.radio(accountant) mustBe Some(ExpectedContent.radio1)
doc.radio(representative) mustBe Some(ExpectedContent.radio2)
doc.radio(boardMember) mustBe Some(ExpectedContent.radio3)
doc.radio(authorisedEmployee) mustBe Some(ExpectedContent.radio4)
doc.radio(other) mustBe Some(ExpectedContent.radio5)
}
"have a primary action" in new ViewSetup {
doc.submitButton mustBe Some(ExpectedContent.continue)
}
}
}
| hmrc/vat-registration-frontend | test/views/transactor/DeclarationCapacityViewSpec.scala | Scala | apache-2.0 | 2,268 |
package blended.updater.config
/**
* A bundle with a start configuration.
* Used as part of [[Profile]] oder [[FeatureConfig]].
*
* @param artifact The artifact (file).
* @param start `true` if the bundle should be auto-started on container start.
* @param startLevel The start level of this bundle.
* @see [[Profile]]
* @see [[FeatureConfig]]
*/
case class BundleConfig(
artifact : Artifact,
start : Boolean,
startLevel : Option[Int]
) {
def url : String = artifact.url
def jarName : Option[String] = artifact.fileName
def sha1Sum : Option[String] = artifact.sha1Sum
override def toString() : String = s"${getClass().getSimpleName()}(artifact=$artifact,start=$start,url=$url,startLevel=$startLevel)"
}
object BundleConfig extends ((Artifact, Boolean, Option[Int]) => BundleConfig) {
def apply(
url : String,
jarName : String = null,
sha1Sum : String = null,
start : Boolean = false,
startLevel : Integer = null
) : BundleConfig =
BundleConfig(
artifact = Artifact(fileName = Option(jarName), url = url, sha1Sum = Option(sha1Sum)),
start = start,
startLevel = if (startLevel != null) Some(startLevel.intValue()) else None
)
}
| woq-blended/blended | blended.updater.config/shared/src/main/scala/blended/updater/config/BundleConfig.scala | Scala | apache-2.0 | 1,206 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.sql
import slamdata.Predef._
import scalaz._
sealed abstract class OrderType extends Product with Serializable
final case object ASC extends OrderType
final case object DESC extends OrderType
object OrderType {
implicit val equal: Equal[OrderType] = Equal.equalRef
implicit val show: Show[OrderType] = Show.showFromToString
}
| drostron/quasar | frontend/src/main/scala/quasar/sql/OrderType.scala | Scala | apache-2.0 | 955 |
package org.jetbrains.plugins.scala.lang.parser.parsing.top
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.{ErrMsg, ScalaElementTypes, ScalaTokenBinders}
import org.jetbrains.plugins.scala.lang.parser.parsing.base.Modifier
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.Annotation
/**
* @author Alexander Podkhalyuzin
* Date: 05.02.2008
*/
/*
* TmplDef ::= {Annotation} {Modifier}
[case] class ClassDef
* | [case] object ObjectDef
* | trait TraitDef
*
*/
object TmplDef extends TmplDef {
override protected def classDef = ClassDef
override protected def objectDef = ObjectDef
override protected def traitDef = TraitDef
override protected def annotation = Annotation
}
trait TmplDef {
protected def classDef: ClassDef
protected def objectDef: ObjectDef
protected def traitDef: TraitDef
protected def annotation: Annotation
def parse(builder: ScalaPsiBuilder): Boolean = {
val templateMarker = builder.mark()
templateMarker.setCustomEdgeTokenBinders(ScalaTokenBinders.PRECEEDING_COMMENTS_TOKEN, null)
val annotationsMarker = builder.mark()
while (annotation.parse(builder)) {
}
annotationsMarker.done(ScalaElementTypes.ANNOTATIONS)
annotationsMarker.setCustomEdgeTokenBinders(ScalaTokenBinders.DEFAULT_LEFT_EDGE_BINDER, null)
val modifierMarker = builder.mark()
while (Modifier.parse(builder)) {
}
val caseState = isCaseState(builder)
modifierMarker.done(ScalaElementTypes.MODIFIERS)
templateParser(builder.getTokenType, caseState) match {
case Some((parse, elementType)) =>
builder.advanceLexer()
if (parse(builder)) {
templateMarker.done(elementType)
} else {
templateMarker.drop()
}
true
case None =>
templateMarker.rollbackTo()
false
}
}
private def isCaseState(builder: ScalaPsiBuilder) = {
val caseMarker = builder.mark()
val result = builder.getTokenType match {
case ScalaTokenTypes.kCASE =>
builder.advanceLexer() // Ate case
true
case _ => false
}
builder.getTokenType match {
case ScalaTokenTypes.kTRAIT if result =>
caseMarker.rollbackTo()
builder.error(ErrMsg("wrong.case.modifier"))
builder.advanceLexer() // Ate case
case _ => caseMarker.drop()
}
result
}
private def templateParser(tokenType: IElementType, caseState: Boolean) = tokenType match {
case ScalaTokenTypes.kCLASS => Some(classDef.parse _, ScalaElementTypes.CLASS_DEFINITION)
case ScalaTokenTypes.kOBJECT => Some(objectDef.parse _, ScalaElementTypes.OBJECT_DEFINITION)
case ScalaTokenTypes.kTRAIT =>
def parse(builder: ScalaPsiBuilder): Boolean = {
val result = traitDef.parse(builder)
if (caseState) true else result
}
Some(parse _, ScalaElementTypes.TRAIT_DEFINITION)
case _ => None
}
}
| gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/parser/parsing/top/TmplDef.scala | Scala | apache-2.0 | 3,126 |
package services
import org.scalamock.scalatest.MockFactory
import org.scalatest.{BeforeAndAfter, Matchers, WordSpecLike}
import play.api.Configuration
class ConfigurationServiceImplTest extends WordSpecLike with BeforeAndAfter with Matchers with MockFactory {
var config: Configuration = _
var service: ConfigurationServiceImpl = _
before {
config = stub[Configuration]
service = new ConfigurationServiceImpl(config)
}
"ConfigurationServiceImplTest" should {
"get salt" in {
val salt = "test salt"
(config.getString _).when("stocks.salt", *).returns(Some(salt))
service.salt shouldEqual salt
}
"get password" in {
val password = "test password"
(config.getString _).when("stocks.password", *).returns(Some(password))
service.password shouldEqual password
}
"get login" in {
val login = "test login"
(config.getString _).when("stocks.login", *).returns(Some(login))
service.login shouldEqual login
}
}
}
| atanana/vue-stocks | test/services/ConfigurationServiceImplTest.scala | Scala | mit | 1,010 |
package nars.logic.language
import nars.storage.Memory
//remove if not needed
import scala.collection.JavaConversions._
object Instance {
/**
* Try to make a new compound from two components. Called by the inference rules.
* <p>
* A {-- B becomes {A} --> B
* @param subject The first compoment
* @param predicate The second compoment
* @param memory Reference to the memory
* @return A compound generated or null
*/
def make(subject: Term, predicate: Term, memory: Memory): Statement = {
Inheritance.make(SetExt.make(subject, memory), predicate, memory)
}
}
| printedheart/opennars | nars_lab_x/nars_scala/src/main/scala/nars/language/Instance.scala | Scala | agpl-3.0 | 596 |
import java.io._
import scala.xml._
import scala.util.matching.Regex
//IMPROVE ERROR HANDLING EVERYWHERE, ESP. IN XML CREATION!
class PerseusText(
val urn: String,
val filePath: String) {
def xmlText:scala.xml.Elem = {
val plnText = scala.io.Source.fromFile(filePath).mkString.replaceAll("TEI.2", "TEI")
if ("&.*?;".r.findFirstIn(plnText).isEmpty) {
return XML.loadString(plnText)
} else {
//dealing with referenced but undeclared entities, just commenting out for now, but this is lazy and they should be fixed better (but maybe in the next EpiDoc process?)
return XML.loadString("&.*?;".r.replaceAllIn(plnText, "<!--" + "$0" + "-->"))
//change this to log : ("This text has the following problematic undeclared entities:" + "&.*?;".r.findAllIn(plnText).toList.distinct) //we'll need to go back and deal with them later,change the printing to logging
}
//add error handling to the above, in case the XML still isnt valid
}
def citeSchema: scala.collection.immutable.Seq[scala.xml.NodeSeq] ={
(xmlText \\\\ "refsDecl" \\ "state").map(x=> x \\ "@unit")
}
def refs: List[String] ={
citeSchema.toList.map(x=>x.toString)
}
//Tests and Fixes
//checking and fixing if lines are numbered every 5-do anytime
def lineNumFix:Any ={
//check if all lines have numbers, if not, number the lines, port fix-misc.pl
}
def lineNumTest:Any = {
if (citeSchema.map(x=>x.mkString).contains("line")){
print("Lines required re-numbering")
return lineNumFix
} else{
print("No lines requiring re-numbering")
}
}
//checking and fixing if divs are numbered, e.g. div1, div2, div3 instead of just div
def numDivFix:Any = {
"div\\\\d".r.replaceAllIn(xmlText.mkString, "div")
}
def numDivTest:Any = {
if ("div\\\\d".r.findFirstIn(xmlText.mkString).mkString.contains("div")) {
print("Numbered divs changed to unnumbered divs")
return numDivFix
} else{
print("No numbered divs found")
}
}
//checking and fixing for problematic milestones, changing them to <div>s
def probMilestoneFix:Any ={
//port milestones_to_divs.xsl
}
def probMilestoneTest: Any = {
if(refs.map(x=> (xmlText \\\\ "milestone").map(m=> m.mkString).filter(m => m.contains(x)).toList).flatten.nonEmpty) {
print("Problematic milestones exist")
return probMilestoneFix
} else{
print("No problematic milestones found")
}
//TO-DO: speakers to said (port speakerstosaid.xsl), splitting multiple works from a single file, unicode conversions, q tags that get split, <sp> and <said> tags that get split
}
}
val mTest = new PerseusText("tlg0032.tlg011.perseus-grc1", "milestoneTest.xml")
val nTest = new PerseusText("tlg0001.tlg001.perseus-grc1", "linerenumtest.xml")
//mTest.probMilestoneTest should find problems, but the other tests should not
//nTest.numDivTest should find and fix problems, nTest.lineNumTest should find problems, nTest.probMilestonTest should find NO problems
| srdee/tei-conversion-tools | scala/tests.scala | Scala | gpl-3.0 | 2,933 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.partition
import org.apache.spark.sql.CarbonEnv
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.datatype.DataTypes
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.partition.PartitionType
import org.apache.carbondata.core.util.CarbonProperties
class TestDDLForPartitionTableWithDefaultProperties extends QueryTest with BeforeAndAfterAll {
override def beforeAll = {
dropTable
}
test("create partition table: hash partition") {
sql(
"""
| CREATE TABLE default.hashTable (empname String, designation String, doj Timestamp,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (empno int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='HASH','NUM_PARTITIONS'='3')
""".stripMargin)
val carbonTable = CarbonEnv.getCarbonTable(Some("default"), "hashTable")(sqlContext.sparkSession)
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("empno"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.INT)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 0)
assert(partitionInfo.getPartitionType == PartitionType.HASH)
assert(partitionInfo.getNumPartitions == 3)
}
test("create partition table: range partition") {
sql(
"""
| CREATE TABLE default.rangeTable (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2017-06-11 00:00:02, 2017-06-13 23:59:59','DICTIONARY_INCLUDE'='doj')
""".stripMargin)
val carbonTable = CarbonEnv.getCarbonTable(Some("default"), "rangeTable")(sqlContext.sparkSession)
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("doj"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.TIMESTAMP)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 3)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.DIRECT_DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(2) == Encoding.INVERTED_INDEX)
assert(partitionInfo.getPartitionType == PartitionType.RANGE)
assert(partitionInfo.getRangeInfo.size == 2)
assert(partitionInfo.getRangeInfo.get(0).equals("2017-06-11 00:00:02"))
assert(partitionInfo.getRangeInfo.get(1).equals("2017-06-13 23:59:59"))
}
test("create partition table: list partition with timestamp datatype") {
sql(
"""
| CREATE TABLE default.listTable (empno int, empname String, designation String, doj Timestamp,
| workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, attendance int,
| utilization int,salary int)
| PARTITIONED BY (projectenddate Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='2017-06-11 00:00:02, 2017-06-13 23:59:59',
| 'DICTIONARY_INCLUDE'='projectenddate')
""".stripMargin)
val carbonTable = CarbonEnv.getCarbonTable(Some("default"), "listTable")(sqlContext.sparkSession)
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("projectenddate"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.TIMESTAMP)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 3)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.DIRECT_DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(2) == Encoding.INVERTED_INDEX)
assert(partitionInfo.getPartitionType == PartitionType.LIST)
assert(partitionInfo.getListInfo.size == 2)
assert(partitionInfo.getListInfo.get(0).size == 1)
assert(partitionInfo.getListInfo.get(0).get(0).equals("2017-06-11 00:00:02"))
assert(partitionInfo.getListInfo.get(1).size == 1)
assert(partitionInfo.getListInfo.get(1).get(0).equals("2017-06-13 23:59:59"))
}
test("create partition table: list partition with date datatype") {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
sql(
"""
| CREATE TABLE default.listTableDate (empno int, empname String, designation String, doj Timestamp,
| workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, attendance int,
| utilization int,salary int)
| PARTITIONED BY (projectenddate date)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='2017-06-11 , 2017-06-13')
""".stripMargin)
val carbonTable = CarbonEnv.getCarbonTable(Some("default"), "listTableDate")(sqlContext.sparkSession)
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("projectenddate"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.DATE)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 3)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.DIRECT_DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(2) == Encoding.INVERTED_INDEX)
assert(partitionInfo.getPartitionType == PartitionType.LIST)
assert(partitionInfo.getListInfo.size == 2)
assert(partitionInfo.getListInfo.get(0).size == 1)
assert(partitionInfo.getListInfo.get(0).get(0).equals("2017-06-11"))
assert(partitionInfo.getListInfo.get(1).size == 1)
assert(partitionInfo.getListInfo.get(1).get(0).equals("2017-06-13"))
}
test("test exception when values in list_info can not match partition column type") {
sql("DROP TABLE IF EXISTS test_list_int")
val exception_test_list_int: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_int(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='1,2,(abc,efg)')
""".stripMargin)
}
assert(exception_test_list_int.getMessage.contains("Invalid Partition Values"))
}
test("test exception when partition values in rangeTable are in group ") {
sql("DROP TABLE IF EXISTS rangeTable")
val exception_test_list_int: Exception = intercept[Exception] {
sql(
"""
|CREATE TABLE default.rangeTable (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2017-06-11 00:00:02, (2017-06-13 23:59:59, 2017-09-13 23:45:59)')
""".stripMargin)
}
assert(exception_test_list_int.getMessage.contains("Invalid Partition Values"))
}
test("test exception when values in rangeTable does not match partition column type") {
sql("DROP TABLE IF EXISTS rangeTable")
val exception_test_list_int: Exception = intercept[Exception] {
sql(
"""
|CREATE TABLE default.rangeTable (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2017-06-11 00:00:02, abc, 2017-09-13 23:45:59')
""".stripMargin)
}
assert(exception_test_list_int.getMessage.contains("Invalid Partition Values"))
}
override def afterAll = {
dropTable
}
def dropTable = {
sql("drop table if exists hashTable")
sql("drop table if exists rangeTable")
sql("drop table if exists listTable")
sql("drop table if exists listTableDate")
}
}
| sgururajshetty/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTableWithDefaultProperties.scala | Scala | apache-2.0 | 10,628 |
package service
import model.domain.{Category, CategoryDistribution, StoreProduct}
import scala.concurrent.Future
trait StoreFront {
/**
* Retrieves a map of categories alongside a count of products in each category.
* @return A Map where each entry is a [[Category]] together with the total count of products in that category.
*/
def getCategoryDistribution: Future[CategoryDistribution]
/**
* Requirement 2:
* A tuple of categories where the right hand set is the set of distinct product titles that correspond to that [[Category]].
* @return A map of categories with the product titles for each.
*/
def productTitlesForCategory(title: String): Future[Set[String]]
/**
* Retrieves a product alongside the full set of attributes and categories.
* @param title The title of the product to retrieve from storage.
* @return A future wrapping an Option, [[Some]] if a product with the given title exists, [[None]] otherwise.
*/
def getProduct(title: String): Future[Option[StoreProduct]]
def addProduct(product: StoreProduct): Future[Unit]
/**
* Adds a product to a category. Implementors of this method will check
* that the product title belongs to an existing product.
* @param title The title of the the product to add to a category.
* @param category A category to add the product to.
* @return A future that will succeed if the attribution is successful.
*/
def addProductToCategory(title: String, category: Category): Future[Unit]
}
| alexflav23/exercises | rps/app/service/StoreFront.scala | Scala | apache-2.0 | 1,531 |
/*
* Copyright (c) 2017-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow.enrich
package common.enrichments.registry
package pii
// Scala
import scala.collection.JavaConverters._
import scala.collection.mutable.MutableList
// Scala libraries
import org.json4s
import org.json4s.{DefaultFormats, Diff, JValue}
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods
import org.json4s.jackson.JsonMethods.{compact, parse, render}
import org.json4s.jackson.Serialization.write
import org.json4s.Extraction.decompose
// Java
import org.apache.commons.codec.digest.DigestUtils
// Java libraries
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.node.{ArrayNode, ObjectNode, TextNode}
import com.jayway.jsonpath.{Configuration, JsonPath => JJsonPath}
import com.jayway.jsonpath.MapFunction
// Scalaz
import scalaz._
import Scalaz._
// Iglu
import iglu.client.validation.ProcessingMessageMethods._
import iglu.client.{SchemaCriterion, SchemaKey}
// This project
import common.ValidatedNelMessage
import common.utils.ScalazJson4sUtils.{extract, fieldExists}
import common.outputs.EnrichedEvent
/**
* Companion object. Lets us create a PiiPseudonymizerEnrichment
* from a JValue.
*/
object PiiPseudonymizerEnrichment extends ParseableEnrichment {
implicit val formats = DefaultFormats + new PiiStrategyPseudonymizeSerializer
override val supportedSchema =
SchemaCriterion("com.snowplowanalytics.snowplow.enrichments", "pii_enrichment_config", "jsonschema", 2, 0, 0)
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[PiiPseudonymizerEnrichment] = {
for {
conf <- matchesSchema(config, schemaKey)
emitIdentificationEvent = extract[Boolean](conf, "emitEvent").toOption
.getOrElse(false)
piiFields <- extract[List[JObject]](conf, "parameters", "pii").leftMap(_.getMessage)
piiStrategy <- extractStrategy(config)
piiFieldList <- extractFields(piiFields)
} yield PiiPseudonymizerEnrichment(piiFieldList, emitIdentificationEvent, piiStrategy)
}.leftMap(_.toProcessingMessageNel)
private[pii] def getHashFunction(strategyFunction: String): Validation[String, DigestFunction] =
strategyFunction match {
case "MD2" => { DigestUtils.md2Hex(_: Array[Byte]) }.success
case "MD5" => { DigestUtils.md5Hex(_: Array[Byte]) }.success
case "SHA-1" => { DigestUtils.sha1Hex(_: Array[Byte]) }.success
case "SHA-256" => { DigestUtils.sha256Hex(_: Array[Byte]) }.success
case "SHA-384" => { DigestUtils.sha384Hex(_: Array[Byte]) }.success
case "SHA-512" => { DigestUtils.sha512Hex(_: Array[Byte]) }.success
case fName => s"Unknown function $fName".failure
}
private def extractFields(piiFields: List[JObject]): Validation[String, List[PiiField]] =
piiFields.map {
case field: JObject =>
if (fieldExists(field, "pojo"))
extractString(field, "pojo", "field").flatMap(extractPiiScalarField)
else if (fieldExists(field, "json")) extractPiiJsonField(field \\ "json")
else
s"PII Configuration: pii field does not include 'pojo' nor 'json' fields. Got: [${compact(field)}]"
.failure[PiiField]
case json => s"PII Configuration: pii field does not contain an object. Got: [${compact(json)}]".failure[PiiField]
}.sequenceU
private def extractPiiScalarField(fieldName: String): Validation[String, PiiScalar] =
ScalarMutators
.get(fieldName)
.map(PiiScalar(_).success)
.getOrElse(s"The specified pojo field $fieldName is not supported".failure)
private def extractPiiJsonField(jsonField: JValue): Validation[String, PiiJson] = {
val schemaCriterion = extractString(jsonField, "schemaCriterion")
.flatMap(sc => SchemaCriterion.parse(sc).leftMap(_.getMessage))
.toValidationNel
val jsonPath = extractString(jsonField, "jsonPath").toValidationNel
val mutator = extractString(jsonField, "field")
.flatMap(getJsonMutator)
.toValidationNel
val validatedNel = (mutator |@| schemaCriterion |@| jsonPath)(PiiJson.apply)
validatedNel.leftMap(x => s"Unable to extract PII JSON: ${x.list.mkString(",")}")
}
private def getJsonMutator(fieldName: String): Validation[String, Mutator] =
JsonMutators
.get(fieldName)
.map(_.success)
.getOrElse(s"The specified json field $fieldName is not supported".failure)
private def extractString(jValue: JValue, field: String, tail: String*): Validation[String, String] =
extract[String](jValue, field, tail: _*).leftMap(_.getMessage)
private def extractStrategy(config: JValue): Validation[String, PiiStrategyPseudonymize] =
extract[PiiStrategyPseudonymize](config, "parameters", "strategy")
.leftMap(_.getMessage)
private def matchesSchema(config: JValue, schemaKey: SchemaKey): Validation[String, JValue] =
if (supportedSchema.matches(schemaKey))
config.success
else
s"Schema key $schemaKey is not supported. A '${supportedSchema.name}' enrichment must have schema '$supportedSchema'.".failure
}
/**
* Implements a pseudonymization strategy using any algorithm known to DigestFunction
* @param functionName string representation of the function
* @param hashFunction the DigestFunction to apply
* @param salt salt added to the plain string before hashing
*/
final case class PiiStrategyPseudonymize(functionName: String, hashFunction: DigestFunction, salt: String)
extends PiiStrategy {
val TextEncoding = "UTF-8"
override def scramble(clearText: String): String = hash(clearText + salt)
def hash(text: String): String = hashFunction(text.getBytes(TextEncoding))
}
/**
* The PiiPseudonymizerEnrichment runs after all other enrichments to find fields that are configured as PII (personally
* identifiable information) and apply some anonymization (currently only pseudonymization) on them. Currently a single
* strategy for all the fields is supported due to the configuration format, and there is only one implemented strategy,
* however the enrichment supports a strategy per field.
*
* The user may specify two types of fields in the config `pojo` or `json`. A `pojo` field is effectively a scalar field in the
* EnrichedEvent, whereas a `json` is a "context" formatted field and it can either contain a single value in the case of
* unstruct_event, or an array in the case of derived_events and contexts.
*
* @param fieldList a list of configured PiiFields
* @param emitIdentificationEvent whether to emit an identification event
* @param strategy the pseudonymization strategy to use
*/
case class PiiPseudonymizerEnrichment(fieldList: List[PiiField],
emitIdentificationEvent: Boolean,
strategy: PiiStrategy)
extends Enrichment {
implicit val json4sFormats = DefaultFormats +
new PiiModifiedFieldsSerializer +
new PiiStrategyPseudonymizeSerializer
private val UnstructEventSchema =
SchemaKey("com.snowplowanalytics.snowplow", "unstruct_event", "jsonschema", "1-0-0").toSchemaUri
def transformer(event: EnrichedEvent): Unit = {
val modifiedFields: ModifiedFields = fieldList.flatMap(_.transform(event, strategy))
event.pii =
if (emitIdentificationEvent && modifiedFields.nonEmpty)
write(
("schema" -> UnstructEventSchema) ~ ("data" -> decompose(PiiModifiedFields(modifiedFields, strategy)))
)
else null
}
}
/**
* Specifies a scalar field in POJO and the strategy that should be applied to it.
* @param fieldMutator the field mutator where the strategy will be applied
*/
final case class PiiScalar(fieldMutator: Mutator) extends PiiField {
override def applyStrategy(fieldValue: String, strategy: PiiStrategy): (String, ModifiedFields) =
if (fieldValue != null) {
val modifiedValue = strategy.scramble(fieldValue)
(modifiedValue, List(ScalarModifiedField(fieldMutator.fieldName, fieldValue, modifiedValue)))
} else (null, List())
}
/**
* Specifies a strategy to use, a field mutator where the JSON can be found in the EnrichedEvent POJO, a schema criterion to
* discriminate which contexts to apply this strategy to, and a JSON path within the contexts where this strategy will
* be applied (the path may correspond to multiple fields).
*
* @param fieldMutator the field mutator for the JSON field
* @param schemaCriterion the schema for which the strategy will be applied
* @param jsonPath the path where the strategy will be applied
*/
final case class PiiJson(fieldMutator: Mutator, schemaCriterion: SchemaCriterion, jsonPath: String) extends PiiField {
implicit val json4sFormats = DefaultFormats
override def applyStrategy(fieldValue: String, strategy: PiiStrategy): (String, ModifiedFields) =
if (fieldValue != null) {
val (parsedAndSubistuted: JValue, modifiedFields: List[JsonModifiedField]) = parse(fieldValue) match {
case JObject(jObject) => {
val jObjectMap: Map[String, JValue] = jObject.toMap
val contextMapped: Map[String, (JValue, List[JsonModifiedField])] =
jObjectMap.map(mapContextTopFields(_, strategy))
(JObject(contextMapped.mapValues(_._1).toList), contextMapped.values.map(_._2).flatten)
}
case x => (x, List.empty[JsonModifiedField])
}
val compacted = compact(render(parsedAndSubistuted))
(compacted, modifiedFields)
} else (null, List.empty[JsonModifiedField])
/**
* Map context top fields with strategy if they match.
*/
private def mapContextTopFields(tuple: (String, json4s.JValue),
strategy: PiiStrategy): (String, (JValue, List[JsonModifiedField])) = tuple match {
case (k: String, contexts: JValue) if k == "data" =>
(k, contexts match {
case JArray(contexts) =>
val updatedAndModified: List[(JValue, List[JsonModifiedField])] =
contexts.map(getModifiedContext(_, strategy))
(JArray(updatedAndModified.map(_._1)), updatedAndModified.map(_._2).flatten)
case x => getModifiedContext(x, strategy)
})
case (k: String, x: JValue) => (k, (x, List.empty[JsonModifiedField]))
}
/**
* Returns a modified context or unstruct event along with a list of modified fields.
*/
private def getModifiedContext(jv: JValue, strategy: PiiStrategy): (JValue, List[JsonModifiedField]) = jv match {
case JObject(context) => modifyObjectIfSchemaMatches(context, strategy)
case x => (x, List.empty[JsonModifiedField])
}
/**
* Tests whether the schema for this event matches the schema criterion and if it does modifies it.
*/
private def modifyObjectIfSchemaMatches(context: List[(String, json4s.JValue)],
strategy: PiiStrategy): (JObject, List[JsonModifiedField]) = {
val fieldsObj = context.toMap
(for {
schema <- fieldsObj.get("schema")
schemaStr <- schema.extractOpt[String]
parsedSchemaMatches <- SchemaKey.parse(schemaStr).map(schemaCriterion.matches).toOption
data <- fieldsObj.get("data")
if parsedSchemaMatches
updated = jsonPathReplace(data, strategy, schemaStr)
} yield (JObject(fieldsObj.updated("schema", schema).updated("data", updated._1).toList), updated._2))
.getOrElse((JObject(context), List()))
}
/**
* Replaces a value in the given context data with the result of applying the strategy that value.
*/
private def jsonPathReplace(jValue: JValue,
strategy: PiiStrategy,
schema: String): (JValue, List[JsonModifiedField]) = {
val objectNode = JsonMethods.mapper.valueToTree[ObjectNode](jValue)
val documentContext = JJsonPath.using(JsonPathConf).parse(objectNode)
val modifiedFields = MutableList[JsonModifiedField]()
val documentContext2 = documentContext.map(
jsonPath,
new ScrambleMapFunction(strategy, modifiedFields, fieldMutator.fieldName, jsonPath, schema))
// make sure it is a structure preserving method, see #3636
val transformedJValue = JsonMethods.fromJsonNode(documentContext.json[JsonNode]())
val Diff(_, erroneouslyAdded, _) = jValue diff transformedJValue
val Diff(_, withoutCruft, _) = erroneouslyAdded diff transformedJValue
(withoutCruft, modifiedFields.toList)
}
}
private final class ScrambleMapFunction(strategy: PiiStrategy,
modifiedFields: MutableList[JsonModifiedField],
fieldName: String,
jsonPath: String,
schema: String)
extends MapFunction {
override def map(currentValue: AnyRef, configuration: Configuration): AnyRef = currentValue match {
case s: String =>
val newValue = strategy.scramble(s)
val _ = modifiedFields += JsonModifiedField(fieldName, s, newValue, jsonPath, schema)
newValue
case a: ArrayNode =>
a.elements.asScala.map {
case t: TextNode =>
val originalValue = t.asText()
val newValue = strategy.scramble(originalValue)
modifiedFields += JsonModifiedField(fieldName, originalValue, newValue, jsonPath, schema)
newValue
case default: AnyRef => default
}
case default: AnyRef => default
}
}
| RetentionGrid/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/pii/PiiPseudonymizerEnrichment.scala | Scala | apache-2.0 | 14,263 |
// Generated by ScalaBuff, the Scala Protocol Buffers compiler. DO NOT EDIT!
// source: riak.proto
package com.basho.riak.protobuf
final case class RpbErrorResp(
`errmsg`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`errcode`: Int = 0) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbErrorResp]
with net.sandrogrzicic.scalabuff.Parser[RpbErrorResp] {
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `errmsg`)
output.writeUInt32(2, `errcode`)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `errmsg`)
__size += computeUInt32Size(2, `errcode`)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbErrorResp = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __errmsg: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __errcode: Int = 0
def __newMerged = RpbErrorResp(
__errmsg,
__errcode)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __errmsg = in.readBytes()
case 16 ⇒ __errcode = in.readUInt32()
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbErrorResp) = {
RpbErrorResp(
m.`errmsg`,
m.`errcode`)
}
def getDefaultInstanceForType = RpbErrorResp.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbErrorResp {
@beans.BeanProperty val defaultInstance = new RpbErrorResp()
def parseFrom(data: Array[Byte]): RpbErrorResp = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbErrorResp = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbErrorResp = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbErrorResp = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbErrorResp] = defaultInstance.mergeDelimitedFromStream(stream)
val ERRMSG_FIELD_NUMBER = 1
val ERRCODE_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbErrorResp) = defaultInstance.mergeFrom(prototype)
}
final case class RpbGetServerInfoResp(
`node`: Option[com.google.protobuf.ByteString] = None,
`serverVersion`: Option[com.google.protobuf.ByteString] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbGetServerInfoResp]
with net.sandrogrzicic.scalabuff.Parser[RpbGetServerInfoResp] {
def setNode(_f: com.google.protobuf.ByteString) = copy(`node` = Some(_f))
def setServerVersion(_f: com.google.protobuf.ByteString) = copy(`serverVersion` = Some(_f))
def clearNode = copy(`node` = None)
def clearServerVersion = copy(`serverVersion` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
if (`node`.isDefined) output.writeBytes(1, `node`.get)
if (`serverVersion`.isDefined) output.writeBytes(2, `serverVersion`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
if (`node`.isDefined) __size += computeBytesSize(1, `node`.get)
if (`serverVersion`.isDefined) __size += computeBytesSize(2, `serverVersion`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbGetServerInfoResp = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __node: Option[com.google.protobuf.ByteString] = `node`
var __serverVersion: Option[com.google.protobuf.ByteString] = `serverVersion`
def __newMerged = RpbGetServerInfoResp(
__node,
__serverVersion)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __node = Some(in.readBytes())
case 18 ⇒ __serverVersion = Some(in.readBytes())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbGetServerInfoResp) = {
RpbGetServerInfoResp(
m.`node`.orElse(`node`),
m.`serverVersion`.orElse(`serverVersion`))
}
def getDefaultInstanceForType = RpbGetServerInfoResp.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbGetServerInfoResp {
@beans.BeanProperty val defaultInstance = new RpbGetServerInfoResp()
def parseFrom(data: Array[Byte]): RpbGetServerInfoResp = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbGetServerInfoResp = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbGetServerInfoResp = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbGetServerInfoResp = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbGetServerInfoResp] = defaultInstance.mergeDelimitedFromStream(stream)
val NODE_FIELD_NUMBER = 1
val SERVER_VERSION_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbGetServerInfoResp) = defaultInstance.mergeFrom(prototype)
}
final case class RpbPair(
`key`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`value`: Option[com.google.protobuf.ByteString] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbPair]
with net.sandrogrzicic.scalabuff.Parser[RpbPair] {
def setValue(_f: com.google.protobuf.ByteString) = copy(`value` = Some(_f))
def clearValue = copy(`value` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `key`)
if (`value`.isDefined) output.writeBytes(2, `value`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `key`)
if (`value`.isDefined) __size += computeBytesSize(2, `value`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbPair = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __key: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __value: Option[com.google.protobuf.ByteString] = `value`
def __newMerged = RpbPair(
__key,
__value)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __key = in.readBytes()
case 18 ⇒ __value = Some(in.readBytes())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbPair) = {
RpbPair(
m.`key`,
m.`value`.orElse(`value`))
}
def getDefaultInstanceForType = RpbPair.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbPair {
@beans.BeanProperty val defaultInstance = new RpbPair()
def parseFrom(data: Array[Byte]): RpbPair = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbPair = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbPair = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbPair = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbPair] = defaultInstance.mergeDelimitedFromStream(stream)
val KEY_FIELD_NUMBER = 1
val VALUE_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbPair) = defaultInstance.mergeFrom(prototype)
}
final case class RpbGetBucketReq(
`bucket`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`type`: Option[com.google.protobuf.ByteString] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbGetBucketReq]
with net.sandrogrzicic.scalabuff.Parser[RpbGetBucketReq] {
def setType(_f: com.google.protobuf.ByteString) = copy(`type` = Some(_f))
def clearType = copy(`type` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `bucket`)
if (`type`.isDefined) output.writeBytes(2, `type`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `bucket`)
if (`type`.isDefined) __size += computeBytesSize(2, `type`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbGetBucketReq = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __bucket: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __type: Option[com.google.protobuf.ByteString] = `type`
def __newMerged = RpbGetBucketReq(
__bucket,
__type)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __bucket = in.readBytes()
case 18 ⇒ __type = Some(in.readBytes())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbGetBucketReq) = {
RpbGetBucketReq(
m.`bucket`,
m.`type`.orElse(`type`))
}
def getDefaultInstanceForType = RpbGetBucketReq.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbGetBucketReq {
@beans.BeanProperty val defaultInstance = new RpbGetBucketReq()
def parseFrom(data: Array[Byte]): RpbGetBucketReq = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbGetBucketReq = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbGetBucketReq = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbGetBucketReq = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbGetBucketReq] = defaultInstance.mergeDelimitedFromStream(stream)
val BUCKET_FIELD_NUMBER = 1
val TYPE_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbGetBucketReq) = defaultInstance.mergeFrom(prototype)
}
final case class RpbGetBucketResp(
`props`: RpbBucketProps = RpbBucketProps.defaultInstance) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbGetBucketResp]
with net.sandrogrzicic.scalabuff.Parser[RpbGetBucketResp] {
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeMessage(1, `props`)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeMessageSize(1, `props`)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbGetBucketResp = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __props: RpbBucketProps = RpbBucketProps.defaultInstance
def __newMerged = RpbGetBucketResp(
__props)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __props = readMessage[RpbBucketProps](in, __props, _emptyRegistry)
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbGetBucketResp) = {
RpbGetBucketResp(
m.`props`)
}
def getDefaultInstanceForType = RpbGetBucketResp.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbGetBucketResp {
@beans.BeanProperty val defaultInstance = new RpbGetBucketResp()
def parseFrom(data: Array[Byte]): RpbGetBucketResp = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbGetBucketResp = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbGetBucketResp = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbGetBucketResp = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbGetBucketResp] = defaultInstance.mergeDelimitedFromStream(stream)
val PROPS_FIELD_NUMBER = 1
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbGetBucketResp) = defaultInstance.mergeFrom(prototype)
}
final case class RpbSetBucketReq(
`bucket`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`props`: RpbBucketProps = RpbBucketProps.defaultInstance,
`type`: Option[com.google.protobuf.ByteString] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbSetBucketReq]
with net.sandrogrzicic.scalabuff.Parser[RpbSetBucketReq] {
def setType(_f: com.google.protobuf.ByteString) = copy(`type` = Some(_f))
def clearType = copy(`type` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `bucket`)
output.writeMessage(2, `props`)
if (`type`.isDefined) output.writeBytes(3, `type`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `bucket`)
__size += computeMessageSize(2, `props`)
if (`type`.isDefined) __size += computeBytesSize(3, `type`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbSetBucketReq = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __bucket: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __props: RpbBucketProps = RpbBucketProps.defaultInstance
var __type: Option[com.google.protobuf.ByteString] = `type`
def __newMerged = RpbSetBucketReq(
__bucket,
__props,
__type)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __bucket = in.readBytes()
case 18 ⇒ __props = readMessage[RpbBucketProps](in, __props, _emptyRegistry)
case 26 ⇒ __type = Some(in.readBytes())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbSetBucketReq) = {
RpbSetBucketReq(
m.`bucket`,
m.`props`,
m.`type`.orElse(`type`))
}
def getDefaultInstanceForType = RpbSetBucketReq.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbSetBucketReq {
@beans.BeanProperty val defaultInstance = new RpbSetBucketReq()
def parseFrom(data: Array[Byte]): RpbSetBucketReq = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbSetBucketReq = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbSetBucketReq = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbSetBucketReq = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbSetBucketReq] = defaultInstance.mergeDelimitedFromStream(stream)
val BUCKET_FIELD_NUMBER = 1
val PROPS_FIELD_NUMBER = 2
val TYPE_FIELD_NUMBER = 3
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbSetBucketReq) = defaultInstance.mergeFrom(prototype)
}
final case class RpbResetBucketReq(
`bucket`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`type`: Option[com.google.protobuf.ByteString] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbResetBucketReq]
with net.sandrogrzicic.scalabuff.Parser[RpbResetBucketReq] {
def setType(_f: com.google.protobuf.ByteString) = copy(`type` = Some(_f))
def clearType = copy(`type` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `bucket`)
if (`type`.isDefined) output.writeBytes(2, `type`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `bucket`)
if (`type`.isDefined) __size += computeBytesSize(2, `type`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbResetBucketReq = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __bucket: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __type: Option[com.google.protobuf.ByteString] = `type`
def __newMerged = RpbResetBucketReq(
__bucket,
__type)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __bucket = in.readBytes()
case 18 ⇒ __type = Some(in.readBytes())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbResetBucketReq) = {
RpbResetBucketReq(
m.`bucket`,
m.`type`.orElse(`type`))
}
def getDefaultInstanceForType = RpbResetBucketReq.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbResetBucketReq {
@beans.BeanProperty val defaultInstance = new RpbResetBucketReq()
def parseFrom(data: Array[Byte]): RpbResetBucketReq = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbResetBucketReq = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbResetBucketReq = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbResetBucketReq = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbResetBucketReq] = defaultInstance.mergeDelimitedFromStream(stream)
val BUCKET_FIELD_NUMBER = 1
val TYPE_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbResetBucketReq) = defaultInstance.mergeFrom(prototype)
}
final case class RpbGetBucketTypeReq(
`type`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbGetBucketTypeReq]
with net.sandrogrzicic.scalabuff.Parser[RpbGetBucketTypeReq] {
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `type`)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `type`)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbGetBucketTypeReq = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __type: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
def __newMerged = RpbGetBucketTypeReq(
__type)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __type = in.readBytes()
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbGetBucketTypeReq) = {
RpbGetBucketTypeReq(
m.`type`)
}
def getDefaultInstanceForType = RpbGetBucketTypeReq.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbGetBucketTypeReq {
@beans.BeanProperty val defaultInstance = new RpbGetBucketTypeReq()
def parseFrom(data: Array[Byte]): RpbGetBucketTypeReq = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbGetBucketTypeReq = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbGetBucketTypeReq = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbGetBucketTypeReq = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbGetBucketTypeReq] = defaultInstance.mergeDelimitedFromStream(stream)
val TYPE_FIELD_NUMBER = 1
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbGetBucketTypeReq) = defaultInstance.mergeFrom(prototype)
}
final case class RpbSetBucketTypeReq(
`type`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`props`: RpbBucketProps = RpbBucketProps.defaultInstance) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbSetBucketTypeReq]
with net.sandrogrzicic.scalabuff.Parser[RpbSetBucketTypeReq] {
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `type`)
output.writeMessage(2, `props`)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `type`)
__size += computeMessageSize(2, `props`)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbSetBucketTypeReq = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __type: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __props: RpbBucketProps = RpbBucketProps.defaultInstance
def __newMerged = RpbSetBucketTypeReq(
__type,
__props)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __type = in.readBytes()
case 18 ⇒ __props = readMessage[RpbBucketProps](in, __props, _emptyRegistry)
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbSetBucketTypeReq) = {
RpbSetBucketTypeReq(
m.`type`,
m.`props`)
}
def getDefaultInstanceForType = RpbSetBucketTypeReq.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbSetBucketTypeReq {
@beans.BeanProperty val defaultInstance = new RpbSetBucketTypeReq()
def parseFrom(data: Array[Byte]): RpbSetBucketTypeReq = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbSetBucketTypeReq = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbSetBucketTypeReq = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbSetBucketTypeReq = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbSetBucketTypeReq] = defaultInstance.mergeDelimitedFromStream(stream)
val TYPE_FIELD_NUMBER = 1
val PROPS_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbSetBucketTypeReq) = defaultInstance.mergeFrom(prototype)
}
final case class RpbModFun(
`module`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`function`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbModFun]
with net.sandrogrzicic.scalabuff.Parser[RpbModFun] {
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `module`)
output.writeBytes(2, `function`)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `module`)
__size += computeBytesSize(2, `function`)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbModFun = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __module: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __function: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
def __newMerged = RpbModFun(
__module,
__function)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __module = in.readBytes()
case 18 ⇒ __function = in.readBytes()
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbModFun) = {
RpbModFun(
m.`module`,
m.`function`)
}
def getDefaultInstanceForType = RpbModFun.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbModFun {
@beans.BeanProperty val defaultInstance = new RpbModFun()
def parseFrom(data: Array[Byte]): RpbModFun = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbModFun = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbModFun = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbModFun = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbModFun] = defaultInstance.mergeDelimitedFromStream(stream)
val MODULE_FIELD_NUMBER = 1
val FUNCTION_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbModFun) = defaultInstance.mergeFrom(prototype)
}
final case class RpbCommitHook(
`modfun`: Option[RpbModFun] = None,
`name`: Option[com.google.protobuf.ByteString] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbCommitHook]
with net.sandrogrzicic.scalabuff.Parser[RpbCommitHook] {
def setModfun(_f: RpbModFun) = copy(`modfun` = Some(_f))
def setName(_f: com.google.protobuf.ByteString) = copy(`name` = Some(_f))
def clearModfun = copy(`modfun` = None)
def clearName = copy(`name` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
if (`modfun`.isDefined) output.writeMessage(1, `modfun`.get)
if (`name`.isDefined) output.writeBytes(2, `name`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
if (`modfun`.isDefined) __size += computeMessageSize(1, `modfun`.get)
if (`name`.isDefined) __size += computeBytesSize(2, `name`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbCommitHook = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __modfun: Option[RpbModFun] = `modfun`
var __name: Option[com.google.protobuf.ByteString] = `name`
def __newMerged = RpbCommitHook(
__modfun,
__name)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __modfun = Some(readMessage[RpbModFun](in, __modfun.orElse({
__modfun = RpbModFun.defaultInstance
__modfun
}).get, _emptyRegistry))
case 18 ⇒ __name = Some(in.readBytes())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbCommitHook) = {
RpbCommitHook(
m.`modfun`.orElse(`modfun`),
m.`name`.orElse(`name`))
}
def getDefaultInstanceForType = RpbCommitHook.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbCommitHook {
@beans.BeanProperty val defaultInstance = new RpbCommitHook()
def parseFrom(data: Array[Byte]): RpbCommitHook = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbCommitHook = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbCommitHook = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbCommitHook = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbCommitHook] = defaultInstance.mergeDelimitedFromStream(stream)
val MODFUN_FIELD_NUMBER = 1
val NAME_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbCommitHook) = defaultInstance.mergeFrom(prototype)
}
final case class RpbBucketProps(
`nVal`: Option[Int] = None,
`allowMult`: Option[Boolean] = None,
`lastWriteWins`: Option[Boolean] = None,
`precommit`: scala.collection.immutable.Seq[RpbCommitHook] = Vector.empty[RpbCommitHook],
`hasPrecommit`: Option[Boolean] = Some(false),
`postcommit`: scala.collection.immutable.Seq[RpbCommitHook] = Vector.empty[RpbCommitHook],
`hasPostcommit`: Option[Boolean] = Some(false),
`chashKeyfun`: Option[RpbModFun] = None,
`linkfun`: Option[RpbModFun] = None,
`oldVclock`: Option[Int] = None,
`youngVclock`: Option[Int] = None,
`bigVclock`: Option[Int] = None,
`smallVclock`: Option[Int] = None,
`pr`: Option[Int] = None,
`r`: Option[Int] = None,
`w`: Option[Int] = None,
`pw`: Option[Int] = None,
`dw`: Option[Int] = None,
`rw`: Option[Int] = None,
`basicQuorum`: Option[Boolean] = None,
`notfoundOk`: Option[Boolean] = None,
`backend`: Option[com.google.protobuf.ByteString] = None,
`search`: Option[Boolean] = None,
`repl`: Option[RpbBucketProps.RpbReplMode.EnumVal] = None,
`searchIndex`: Option[com.google.protobuf.ByteString] = None,
`datatype`: Option[com.google.protobuf.ByteString] = None,
`consistent`: Option[Boolean] = None) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbBucketProps]
with net.sandrogrzicic.scalabuff.Parser[RpbBucketProps] {
def setNVal(_f: Int) = copy(`nVal` = Some(_f))
def setAllowMult(_f: Boolean) = copy(`allowMult` = Some(_f))
def setLastWriteWins(_f: Boolean) = copy(`lastWriteWins` = Some(_f))
def setPrecommit(_i: Int, _v: RpbCommitHook) = copy(`precommit` = `precommit`.updated(_i, _v))
def addPrecommit(_f: RpbCommitHook) = copy(`precommit` = `precommit` :+ _f)
def addAllPrecommit(_f: RpbCommitHook*) = copy(`precommit` = `precommit` ++ _f)
def addAllPrecommit(_f: TraversableOnce[RpbCommitHook]) = copy(`precommit` = `precommit` ++ _f)
def setHasPrecommit(_f: Boolean) = copy(`hasPrecommit` = Some(_f))
def setPostcommit(_i: Int, _v: RpbCommitHook) = copy(`postcommit` = `postcommit`.updated(_i, _v))
def addPostcommit(_f: RpbCommitHook) = copy(`postcommit` = `postcommit` :+ _f)
def addAllPostcommit(_f: RpbCommitHook*) = copy(`postcommit` = `postcommit` ++ _f)
def addAllPostcommit(_f: TraversableOnce[RpbCommitHook]) = copy(`postcommit` = `postcommit` ++ _f)
def setHasPostcommit(_f: Boolean) = copy(`hasPostcommit` = Some(_f))
def setChashKeyfun(_f: RpbModFun) = copy(`chashKeyfun` = Some(_f))
def setLinkfun(_f: RpbModFun) = copy(`linkfun` = Some(_f))
def setOldVclock(_f: Int) = copy(`oldVclock` = Some(_f))
def setYoungVclock(_f: Int) = copy(`youngVclock` = Some(_f))
def setBigVclock(_f: Int) = copy(`bigVclock` = Some(_f))
def setSmallVclock(_f: Int) = copy(`smallVclock` = Some(_f))
def setPr(_f: Int) = copy(`pr` = Some(_f))
def setR(_f: Int) = copy(`r` = Some(_f))
def setW(_f: Int) = copy(`w` = Some(_f))
def setPw(_f: Int) = copy(`pw` = Some(_f))
def setDw(_f: Int) = copy(`dw` = Some(_f))
def setRw(_f: Int) = copy(`rw` = Some(_f))
def setBasicQuorum(_f: Boolean) = copy(`basicQuorum` = Some(_f))
def setNotfoundOk(_f: Boolean) = copy(`notfoundOk` = Some(_f))
def setBackend(_f: com.google.protobuf.ByteString) = copy(`backend` = Some(_f))
def setSearch(_f: Boolean) = copy(`search` = Some(_f))
def setRepl(_f: RpbBucketProps.RpbReplMode.EnumVal) = copy(`repl` = Some(_f))
def setSearchIndex(_f: com.google.protobuf.ByteString) = copy(`searchIndex` = Some(_f))
def setDatatype(_f: com.google.protobuf.ByteString) = copy(`datatype` = Some(_f))
def setConsistent(_f: Boolean) = copy(`consistent` = Some(_f))
def clearNVal = copy(`nVal` = None)
def clearAllowMult = copy(`allowMult` = None)
def clearLastWriteWins = copy(`lastWriteWins` = None)
def clearPrecommit = copy(`precommit` = Vector.empty[RpbCommitHook])
def clearHasPrecommit = copy(`hasPrecommit` = None)
def clearPostcommit = copy(`postcommit` = Vector.empty[RpbCommitHook])
def clearHasPostcommit = copy(`hasPostcommit` = None)
def clearChashKeyfun = copy(`chashKeyfun` = None)
def clearLinkfun = copy(`linkfun` = None)
def clearOldVclock = copy(`oldVclock` = None)
def clearYoungVclock = copy(`youngVclock` = None)
def clearBigVclock = copy(`bigVclock` = None)
def clearSmallVclock = copy(`smallVclock` = None)
def clearPr = copy(`pr` = None)
def clearR = copy(`r` = None)
def clearW = copy(`w` = None)
def clearPw = copy(`pw` = None)
def clearDw = copy(`dw` = None)
def clearRw = copy(`rw` = None)
def clearBasicQuorum = copy(`basicQuorum` = None)
def clearNotfoundOk = copy(`notfoundOk` = None)
def clearBackend = copy(`backend` = None)
def clearSearch = copy(`search` = None)
def clearRepl = copy(`repl` = None)
def clearSearchIndex = copy(`searchIndex` = None)
def clearDatatype = copy(`datatype` = None)
def clearConsistent = copy(`consistent` = None)
def writeTo(output: com.google.protobuf.CodedOutputStream) {
if (`nVal`.isDefined) output.writeUInt32(1, `nVal`.get)
if (`allowMult`.isDefined) output.writeBool(2, `allowMult`.get)
if (`lastWriteWins`.isDefined) output.writeBool(3, `lastWriteWins`.get)
for (_v ← `precommit`) output.writeMessage(4, _v)
if (`hasPrecommit`.isDefined) output.writeBool(5, `hasPrecommit`.get)
for (_v ← `postcommit`) output.writeMessage(6, _v)
if (`hasPostcommit`.isDefined) output.writeBool(7, `hasPostcommit`.get)
if (`chashKeyfun`.isDefined) output.writeMessage(8, `chashKeyfun`.get)
if (`linkfun`.isDefined) output.writeMessage(9, `linkfun`.get)
if (`oldVclock`.isDefined) output.writeUInt32(10, `oldVclock`.get)
if (`youngVclock`.isDefined) output.writeUInt32(11, `youngVclock`.get)
if (`bigVclock`.isDefined) output.writeUInt32(12, `bigVclock`.get)
if (`smallVclock`.isDefined) output.writeUInt32(13, `smallVclock`.get)
if (`pr`.isDefined) output.writeUInt32(14, `pr`.get)
if (`r`.isDefined) output.writeUInt32(15, `r`.get)
if (`w`.isDefined) output.writeUInt32(16, `w`.get)
if (`pw`.isDefined) output.writeUInt32(17, `pw`.get)
if (`dw`.isDefined) output.writeUInt32(18, `dw`.get)
if (`rw`.isDefined) output.writeUInt32(19, `rw`.get)
if (`basicQuorum`.isDefined) output.writeBool(20, `basicQuorum`.get)
if (`notfoundOk`.isDefined) output.writeBool(21, `notfoundOk`.get)
if (`backend`.isDefined) output.writeBytes(22, `backend`.get)
if (`search`.isDefined) output.writeBool(23, `search`.get)
if (`repl`.isDefined) output.writeEnum(24, `repl`.get)
if (`searchIndex`.isDefined) output.writeBytes(25, `searchIndex`.get)
if (`datatype`.isDefined) output.writeBytes(26, `datatype`.get)
if (`consistent`.isDefined) output.writeBool(27, `consistent`.get)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
if (`nVal`.isDefined) __size += computeUInt32Size(1, `nVal`.get)
if (`allowMult`.isDefined) __size += computeBoolSize(2, `allowMult`.get)
if (`lastWriteWins`.isDefined) __size += computeBoolSize(3, `lastWriteWins`.get)
for (_v ← `precommit`) __size += computeMessageSize(4, _v)
if (`hasPrecommit`.isDefined) __size += computeBoolSize(5, `hasPrecommit`.get)
for (_v ← `postcommit`) __size += computeMessageSize(6, _v)
if (`hasPostcommit`.isDefined) __size += computeBoolSize(7, `hasPostcommit`.get)
if (`chashKeyfun`.isDefined) __size += computeMessageSize(8, `chashKeyfun`.get)
if (`linkfun`.isDefined) __size += computeMessageSize(9, `linkfun`.get)
if (`oldVclock`.isDefined) __size += computeUInt32Size(10, `oldVclock`.get)
if (`youngVclock`.isDefined) __size += computeUInt32Size(11, `youngVclock`.get)
if (`bigVclock`.isDefined) __size += computeUInt32Size(12, `bigVclock`.get)
if (`smallVclock`.isDefined) __size += computeUInt32Size(13, `smallVclock`.get)
if (`pr`.isDefined) __size += computeUInt32Size(14, `pr`.get)
if (`r`.isDefined) __size += computeUInt32Size(15, `r`.get)
if (`w`.isDefined) __size += computeUInt32Size(16, `w`.get)
if (`pw`.isDefined) __size += computeUInt32Size(17, `pw`.get)
if (`dw`.isDefined) __size += computeUInt32Size(18, `dw`.get)
if (`rw`.isDefined) __size += computeUInt32Size(19, `rw`.get)
if (`basicQuorum`.isDefined) __size += computeBoolSize(20, `basicQuorum`.get)
if (`notfoundOk`.isDefined) __size += computeBoolSize(21, `notfoundOk`.get)
if (`backend`.isDefined) __size += computeBytesSize(22, `backend`.get)
if (`search`.isDefined) __size += computeBoolSize(23, `search`.get)
if (`repl`.isDefined) __size += computeEnumSize(24, `repl`.get)
if (`searchIndex`.isDefined) __size += computeBytesSize(25, `searchIndex`.get)
if (`datatype`.isDefined) __size += computeBytesSize(26, `datatype`.get)
if (`consistent`.isDefined) __size += computeBoolSize(27, `consistent`.get)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbBucketProps = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __nVal: Option[Int] = `nVal`
var __allowMult: Option[Boolean] = `allowMult`
var __lastWriteWins: Option[Boolean] = `lastWriteWins`
val __precommit: scala.collection.mutable.Buffer[RpbCommitHook] = `precommit`.toBuffer
var __hasPrecommit: Option[Boolean] = `hasPrecommit`
val __postcommit: scala.collection.mutable.Buffer[RpbCommitHook] = `postcommit`.toBuffer
var __hasPostcommit: Option[Boolean] = `hasPostcommit`
var __chashKeyfun: Option[RpbModFun] = `chashKeyfun`
var __linkfun: Option[RpbModFun] = `linkfun`
var __oldVclock: Option[Int] = `oldVclock`
var __youngVclock: Option[Int] = `youngVclock`
var __bigVclock: Option[Int] = `bigVclock`
var __smallVclock: Option[Int] = `smallVclock`
var __pr: Option[Int] = `pr`
var __r: Option[Int] = `r`
var __w: Option[Int] = `w`
var __pw: Option[Int] = `pw`
var __dw: Option[Int] = `dw`
var __rw: Option[Int] = `rw`
var __basicQuorum: Option[Boolean] = `basicQuorum`
var __notfoundOk: Option[Boolean] = `notfoundOk`
var __backend: Option[com.google.protobuf.ByteString] = `backend`
var __search: Option[Boolean] = `search`
var __repl: Option[RpbBucketProps.RpbReplMode.EnumVal] = `repl`
var __searchIndex: Option[com.google.protobuf.ByteString] = `searchIndex`
var __datatype: Option[com.google.protobuf.ByteString] = `datatype`
var __consistent: Option[Boolean] = `consistent`
def __newMerged = RpbBucketProps(
__nVal,
__allowMult,
__lastWriteWins,
Vector(__precommit: _*),
__hasPrecommit,
Vector(__postcommit: _*),
__hasPostcommit,
__chashKeyfun,
__linkfun,
__oldVclock,
__youngVclock,
__bigVclock,
__smallVclock,
__pr,
__r,
__w,
__pw,
__dw,
__rw,
__basicQuorum,
__notfoundOk,
__backend,
__search,
__repl,
__searchIndex,
__datatype,
__consistent)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 8 ⇒ __nVal = Some(in.readUInt32())
case 16 ⇒ __allowMult = Some(in.readBool())
case 24 ⇒ __lastWriteWins = Some(in.readBool())
case 34 ⇒ __precommit += readMessage[RpbCommitHook](in, RpbCommitHook.defaultInstance, _emptyRegistry)
case 40 ⇒ __hasPrecommit = Some(in.readBool())
case 50 ⇒ __postcommit += readMessage[RpbCommitHook](in, RpbCommitHook.defaultInstance, _emptyRegistry)
case 56 ⇒ __hasPostcommit = Some(in.readBool())
case 66 ⇒ __chashKeyfun = Some(readMessage[RpbModFun](in, __chashKeyfun.orElse({
__chashKeyfun = RpbModFun.defaultInstance
__chashKeyfun
}).get, _emptyRegistry))
case 74 ⇒ __linkfun = Some(readMessage[RpbModFun](in, __linkfun.orElse({
__linkfun = RpbModFun.defaultInstance
__linkfun
}).get, _emptyRegistry))
case 80 ⇒ __oldVclock = Some(in.readUInt32())
case 88 ⇒ __youngVclock = Some(in.readUInt32())
case 96 ⇒ __bigVclock = Some(in.readUInt32())
case 104 ⇒ __smallVclock = Some(in.readUInt32())
case 112 ⇒ __pr = Some(in.readUInt32())
case 120 ⇒ __r = Some(in.readUInt32())
case 128 ⇒ __w = Some(in.readUInt32())
case 136 ⇒ __pw = Some(in.readUInt32())
case 144 ⇒ __dw = Some(in.readUInt32())
case 152 ⇒ __rw = Some(in.readUInt32())
case 160 ⇒ __basicQuorum = Some(in.readBool())
case 168 ⇒ __notfoundOk = Some(in.readBool())
case 178 ⇒ __backend = Some(in.readBytes())
case 184 ⇒ __search = Some(in.readBool())
case 192 ⇒ __repl = Some(RpbBucketProps.RpbReplMode.valueOf(in.readEnum()))
case 202 ⇒ __searchIndex = Some(in.readBytes())
case 210 ⇒ __datatype = Some(in.readBytes())
case 216 ⇒ __consistent = Some(in.readBool())
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbBucketProps) = {
RpbBucketProps(
m.`nVal`.orElse(`nVal`),
m.`allowMult`.orElse(`allowMult`),
m.`lastWriteWins`.orElse(`lastWriteWins`),
`precommit` ++ m.`precommit`,
m.`hasPrecommit`.orElse(`hasPrecommit`),
`postcommit` ++ m.`postcommit`,
m.`hasPostcommit`.orElse(`hasPostcommit`),
m.`chashKeyfun`.orElse(`chashKeyfun`),
m.`linkfun`.orElse(`linkfun`),
m.`oldVclock`.orElse(`oldVclock`),
m.`youngVclock`.orElse(`youngVclock`),
m.`bigVclock`.orElse(`bigVclock`),
m.`smallVclock`.orElse(`smallVclock`),
m.`pr`.orElse(`pr`),
m.`r`.orElse(`r`),
m.`w`.orElse(`w`),
m.`pw`.orElse(`pw`),
m.`dw`.orElse(`dw`),
m.`rw`.orElse(`rw`),
m.`basicQuorum`.orElse(`basicQuorum`),
m.`notfoundOk`.orElse(`notfoundOk`),
m.`backend`.orElse(`backend`),
m.`search`.orElse(`search`),
m.`repl`.orElse(`repl`),
m.`searchIndex`.orElse(`searchIndex`),
m.`datatype`.orElse(`datatype`),
m.`consistent`.orElse(`consistent`))
}
def getDefaultInstanceForType = RpbBucketProps.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbBucketProps {
@beans.BeanProperty val defaultInstance = new RpbBucketProps()
def parseFrom(data: Array[Byte]): RpbBucketProps = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbBucketProps = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbBucketProps = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbBucketProps = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbBucketProps] = defaultInstance.mergeDelimitedFromStream(stream)
val N_VAL_FIELD_NUMBER = 1
val ALLOW_MULT_FIELD_NUMBER = 2
val LAST_WRITE_WINS_FIELD_NUMBER = 3
val PRECOMMIT_FIELD_NUMBER = 4
val HAS_PRECOMMIT_FIELD_NUMBER = 5
val POSTCOMMIT_FIELD_NUMBER = 6
val HAS_POSTCOMMIT_FIELD_NUMBER = 7
val CHASH_KEYFUN_FIELD_NUMBER = 8
val LINKFUN_FIELD_NUMBER = 9
val OLD_VCLOCK_FIELD_NUMBER = 10
val YOUNG_VCLOCK_FIELD_NUMBER = 11
val BIG_VCLOCK_FIELD_NUMBER = 12
val SMALL_VCLOCK_FIELD_NUMBER = 13
val PR_FIELD_NUMBER = 14
val R_FIELD_NUMBER = 15
val W_FIELD_NUMBER = 16
val PW_FIELD_NUMBER = 17
val DW_FIELD_NUMBER = 18
val RW_FIELD_NUMBER = 19
val BASIC_QUORUM_FIELD_NUMBER = 20
val NOTFOUND_OK_FIELD_NUMBER = 21
val BACKEND_FIELD_NUMBER = 22
val SEARCH_FIELD_NUMBER = 23
val REPL_FIELD_NUMBER = 24
val SEARCH_INDEX_FIELD_NUMBER = 25
val DATATYPE_FIELD_NUMBER = 26
val CONSISTENT_FIELD_NUMBER = 27
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbBucketProps) = defaultInstance.mergeFrom(prototype)
object RpbReplMode extends net.sandrogrzicic.scalabuff.Enum {
sealed trait EnumVal extends Value
val _UNINITIALIZED = new EnumVal { val name = "UNINITIALIZED ENUM VALUE"; val id = -1 }
val FALSE = new EnumVal { val name = "FALSE"; val id = 0 }
val REALTIME = new EnumVal { val name = "REALTIME"; val id = 1 }
val FULLSYNC = new EnumVal { val name = "FULLSYNC"; val id = 2 }
val TRUE = new EnumVal { val name = "TRUE"; val id = 3 }
val FALSE_VALUE = 0
val REALTIME_VALUE = 1
val FULLSYNC_VALUE = 2
val TRUE_VALUE = 3
def valueOf(id: Int) = id match {
case 0 ⇒ FALSE
case 1 ⇒ REALTIME
case 2 ⇒ FULLSYNC
case 3 ⇒ TRUE
case _default ⇒ throw new net.sandrogrzicic.scalabuff.UnknownEnumException(_default)
}
val internalGetValueMap = new com.google.protobuf.Internal.EnumLiteMap[EnumVal] {
def findValueByNumber(id: Int): EnumVal = valueOf(id)
}
}
}
final case class RpbAuthReq(
`user`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY,
`password`: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY) extends com.google.protobuf.GeneratedMessageLite
with com.google.protobuf.MessageLite.Builder
with net.sandrogrzicic.scalabuff.Message[RpbAuthReq]
with net.sandrogrzicic.scalabuff.Parser[RpbAuthReq] {
def writeTo(output: com.google.protobuf.CodedOutputStream) {
output.writeBytes(1, `user`)
output.writeBytes(2, `password`)
}
def getSerializedSize = {
import com.google.protobuf.CodedOutputStream._
var __size = 0
__size += computeBytesSize(1, `user`)
__size += computeBytesSize(2, `password`)
__size
}
def mergeFrom(in: com.google.protobuf.CodedInputStream, extensionRegistry: com.google.protobuf.ExtensionRegistryLite): RpbAuthReq = {
import com.google.protobuf.ExtensionRegistryLite.{ getEmptyRegistry ⇒ _emptyRegistry }
var __user: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
var __password: com.google.protobuf.ByteString = com.google.protobuf.ByteString.EMPTY
def __newMerged = RpbAuthReq(
__user,
__password)
while (true) in.readTag match {
case 0 ⇒ return __newMerged
case 10 ⇒ __user = in.readBytes()
case 18 ⇒ __password = in.readBytes()
case default ⇒ if (!in.skipField(default)) return __newMerged
}
null
}
def mergeFrom(m: RpbAuthReq) = {
RpbAuthReq(
m.`user`,
m.`password`)
}
def getDefaultInstanceForType = RpbAuthReq.defaultInstance
def clear = getDefaultInstanceForType
def isInitialized = true
def build = this
def buildPartial = this
def parsePartialFrom(cis: com.google.protobuf.CodedInputStream, er: com.google.protobuf.ExtensionRegistryLite) = mergeFrom(cis, er)
override def getParserForType = this
def newBuilderForType = getDefaultInstanceForType
def toBuilder = this
def toJson(indent: Int = 0): String = "ScalaBuff JSON generation not enabled. Use --generate_json_method to enable."
}
object RpbAuthReq {
@beans.BeanProperty val defaultInstance = new RpbAuthReq()
def parseFrom(data: Array[Byte]): RpbAuthReq = defaultInstance.mergeFrom(data)
def parseFrom(data: Array[Byte], offset: Int, length: Int): RpbAuthReq = defaultInstance.mergeFrom(data, offset, length)
def parseFrom(byteString: com.google.protobuf.ByteString): RpbAuthReq = defaultInstance.mergeFrom(byteString)
def parseFrom(stream: java.io.InputStream): RpbAuthReq = defaultInstance.mergeFrom(stream)
def parseDelimitedFrom(stream: java.io.InputStream): Option[RpbAuthReq] = defaultInstance.mergeDelimitedFromStream(stream)
val USER_FIELD_NUMBER = 1
val PASSWORD_FIELD_NUMBER = 2
def newBuilder = defaultInstance.newBuilderForType
def newBuilder(prototype: RpbAuthReq) = defaultInstance.mergeFrom(prototype)
}
object RiakPB {
def registerAllExtensions(registry: com.google.protobuf.ExtensionRegistryLite) {
}
}
| gideondk/Raiku | src/main/scala/com/basho/riak/protobuf/RiakPB.scala | Scala | apache-2.0 | 54,688 |
package com.sksamuel.scapegoat.inspections.math
import com.sksamuel.scapegoat._
/**
* @author
* Stephen Samuel
*/
class BigDecimalScaleWithoutRoundingMode
extends Inspection(
text = "BigDecimal `setScale()` without rounding mode",
defaultLevel = Levels.Warning,
description =
"Checks for use of `setScale()` on a BigDecimal without setting the rounding mode can throw an exception.",
explanation =
"When using `setScale()` on a BigDecimal without setting the rounding mode, this can throw an exception " +
"if rounding is required. Did you mean to call `setScale(s, RoundingMode.XYZ)`?"
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser: context.Traverser =
new context.Traverser {
import context.global._
private def isBigDecimal(t: Tree) =
t.tpe <:< typeOf[BigDecimal] || t.tpe <:< typeOf[java.math.BigDecimal]
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(Select(lhs, TermName("setScale")), List(_)) if isBigDecimal(lhs) =>
context.warn(tree.pos, self)
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scapegoat | src/main/scala/com/sksamuel/scapegoat/inspections/math/BigDecimalScaleWithoutRoundingMode.scala | Scala | apache-2.0 | 1,297 |
package de.htwg.zeta.server.routing
import com.google.inject.Provider
import de.htwg.zeta.server.controller.ActivateAccountController
import de.htwg.zeta.server.controller.ApplicationController
import de.htwg.zeta.server.controller.ChangePasswordController
import de.htwg.zeta.server.controller.DynamicFileController
import de.htwg.zeta.server.controller.ForgotPasswordController
import de.htwg.zeta.server.controller.MetaModelController
import de.htwg.zeta.server.controller.ModelController
import de.htwg.zeta.server.controller.ResetPasswordController
import de.htwg.zeta.server.controller.SignInController
import de.htwg.zeta.server.controller.SignUpController
import de.htwg.zeta.server.controller.WebSocketController
import de.htwg.zeta.server.controller.codeEditor.CodeEditorController
import de.htwg.zeta.server.controller.generatorControlForwader.GeneratorControlController
import de.htwg.zeta.server.controller.restApi.BondedTaskRestApi
import de.htwg.zeta.server.controller.restApi.DslRestApi
import de.htwg.zeta.server.controller.restApi.EventDrivenTaskRestApi
import de.htwg.zeta.server.controller.restApi.FileRestApi
import de.htwg.zeta.server.controller.restApi.FilterRestApi
import de.htwg.zeta.server.controller.restApi.GeneratorImageRestApi
import de.htwg.zeta.server.controller.restApi.GeneratorRestApi
import de.htwg.zeta.server.controller.restApi.GraphicalDslRestApi
import de.htwg.zeta.server.controller.restApi.MetaModelReleaseRestApi
import de.htwg.zeta.server.controller.restApi.ModelRestApi
import de.htwg.zeta.server.controller.restApi.TimedTaskRestApi
import de.htwg.zeta.server.controller.restApi.v2
import de.htwg.zeta.server.controller.webpage.WebpageController
import javax.inject.Inject
import org.webjars.play.WebJarAssets
/**
*/
class WebControllerContainer @Inject() private(
val backendController: Provider[GeneratorControlController],
val applicationController: Provider[ApplicationController],
val signUpController: Provider[SignUpController],
val signInController: Provider[SignInController],
val forgotPasswordController: Provider[ForgotPasswordController],
val resetPasswordController: Provider[ResetPasswordController],
val changePasswordController: Provider[ChangePasswordController],
val activateAccountController: Provider[ActivateAccountController],
val webpageController: Provider[WebpageController],
val metaModelController: Provider[MetaModelController],
val modelController: Provider[ModelController],
val codeEditorController: Provider[CodeEditorController],
val webJarAssets: Provider[WebJarAssets],
val dynamicFileController: Provider[DynamicFileController],
val metaModelRestApi: Provider[GraphicalDslRestApi],
val metaModelRestApiV2: Provider[v2.GraphicalDslRestApi],
val modelRestApi: Provider[ModelRestApi],
val generatorImageRestApi: Provider[GeneratorImageRestApi],
val generatorRestApi: Provider[GeneratorRestApi],
val filterRestApi: Provider[FilterRestApi],
val metaModelReleaseRestApi: Provider[MetaModelReleaseRestApi],
val bondedTaskRestApi: Provider[BondedTaskRestApi],
val eventDrivenTaskRestApi: Provider[EventDrivenTaskRestApi],
val timedTaskRestApi: Provider[TimedTaskRestApi],
val fileRestApi: Provider[FileRestApi],
val dslRestApi: Provider[DslRestApi],
val webSocket: Provider[WebSocketController]
)
| Zeta-Project/zeta | api/server/app/de/htwg/zeta/server/routing/WebControllerContainer.scala | Scala | bsd-2-clause | 3,383 |
package polyfunic
/**
Represents a sequent in a proof tree.
*/
sealed trait SequentLike {
val lhs : List[ScalaType]
val rhs : ScalaType
}
/**
Represents a sequent with two contexts (lefts - Gamma, rights - Delta) and a primary formula on the left hand side.
*/
case class Context( lefts : List[ScalaType], focus : ScalaType, rights : List[ScalaType], rhs : ScalaType ) extends SequentLike {
override def toString : String = {
val lhs : List[String] = lefts.map(_.toString) ++ (("[" + focus.toString + "]") :: rights.map(_.toString))
(lhs mkString ",") + " |- " + rhs
}
override val lhs = lefts ++ (focus :: rights)
def furtherChoices : BT[Context] = BT.unit(this) interleave { rights match {
case Nil => Failure()
case (newFocus :: newRights) => {
Context( lefts :+ focus, newFocus, newRights, rhs ).furtherChoices
}}
}
def add( xs : ScalaType* ) : Context = Context( xs.toList ++ lefts, focus, rights, rhs )
def replace( x : ScalaType ) : Context = Context( lefts, focus, rights, x )
def unfocus : Sequent = Sequent( lefts ++ rights, rhs )
}
/**
Represents a sequent not divided into contexts on the left hand side.
*/
case class Sequent( lhs : List[ScalaType], rhs : ScalaType ) extends SequentLike {
override def toString : String = {
(lhs mkString ",") + " |- " + rhs
}
def add( xs : List[ScalaType] ) : Sequent = Sequent( xs ++ lhs, rhs )
def replace( rhs : ScalaType ) : Sequent = Sequent( lhs, rhs )
/**
Represents all possible choices of contexts on the left hand side.
*/
def choices : BT[Context] = lhs match {
case Nil => Failure()
case (focus :: rights) => Context(Nil,focus,rights,rhs).furtherChoices
}
}
| JerrySwan/PolyFunic-pub | Sequent.scala | Scala | bsd-3-clause | 1,731 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.async.scalaasync
import javax.inject.Inject
import scala.concurrent._
import akka.actor._
import play.api._
import play.api.mvc._
import play.api.test._
class ScalaAsyncSpec extends PlaySpecification {
def samples(implicit app: Application): ScalaAsyncSamples = app.injector.instanceOf[ScalaAsyncSamples]
"scala async" should {
"allow returning a future" in new WithApplication() {
contentAsString(samples.futureResult) must startWith("PI value computed: 3.14")
}
"allow dispatching an intensive computation" in new WithApplication() {
await(samples.intensiveComp) must_== 10
}
"allow returning an async result" in new WithApplication() {
contentAsString(samples.asyncResult()(FakeRequest())) must_== "Got result: 10"
}
"allow timing out a future" in new WithApplication() {
status(samples.timeout(1200)(FakeRequest())) must_== INTERNAL_SERVER_ERROR
status(samples.timeout(10)(FakeRequest())) must_== OK
}
}
}
//#my-execution-context
import play.api.libs.concurrent.CustomExecutionContext
trait MyExecutionContext extends ExecutionContext
class MyExecutionContextImpl @Inject()(system: ActorSystem)
extends CustomExecutionContext(system, "my.executor") with MyExecutionContext
class HomeController @Inject()(myExecutionContext: MyExecutionContext) extends Controller {
def index = Action.async {
Future {
// Call some blocking API
Ok("result of blocking call")
}(myExecutionContext)
}
}
//#my-execution-context
class ScalaAsyncSamples @Inject() (implicit actorSystem: ActorSystem, ec: ExecutionContext) extends Controller {
def futureResult = {
def computePIAsynchronously() = Future.successful(3.14)
//#future-result
val futurePIValue: Future[Double] = computePIAsynchronously()
val futureResult: Future[Result] = futurePIValue.map { pi =>
Ok("PI value computed: " + pi)
}
//#future-result
futureResult
}
def intensiveComputation() = 10
def intensiveComp = {
//#intensive-computation
val futureInt: Future[Int] = scala.concurrent.Future {
intensiveComputation()
}
//#intensive-computation
futureInt
}
def asyncResult = {
//#async-result
def index = Action.async {
val futureInt = scala.concurrent.Future { intensiveComputation() }
futureInt.map(i => Ok("Got result: " + i))
}
//#async-result
index
}
def timeout(t: Long) = {
def intensiveComputation() = Future {
Thread.sleep(t)
10
}
//#timeout
import scala.concurrent.duration._
import play.api.libs.concurrent.Futures._
def index = Action.async {
// futures instance implicit here
intensiveComputation().withTimeout(1.seconds).map { i =>
Ok("Got result: " + i)
}.recover {
case e: TimeoutException =>
InternalServerError("timeout")
}
}
//#timeout
index
}
}
| wsargent/playframework | documentation/manual/working/scalaGuide/main/async/code/ScalaAsync.scala | Scala | apache-2.0 | 3,031 |
package com.twitter.scalding.reducer_estimation
import cascading.flow.{ FlowStep, Flow, FlowStepStrategy }
import com.twitter.algebird.Monoid
import com.twitter.scalding.{ StringUtility, Config }
import org.apache.hadoop.mapred.JobConf
import java.util.{ List => JList }
import scala.collection.JavaConverters._
import scala.util.Try
object EstimatorConfig {
/** Output param: what the Reducer Estimator recommended, regardless of if it was used. */
val estimatedNumReducers = "scalding.reducer.estimator.result"
/** Output param: what the original job config was. */
val originalNumReducers = "scalding.reducer.estimator.original.mapred.reduce.tasks"
/** Maximum number of history items to use for reducer estimation. */
val maxHistoryKey = "scalding.reducer.estimator.max.history"
def getMaxHistory(conf: JobConf): Int = conf.getInt(maxHistoryKey, 1)
}
case class FlowStrategyInfo(
flow: Flow[JobConf],
predecessorSteps: Seq[FlowStep[JobConf]],
step: FlowStep[JobConf])
class ReducerEstimator {
/**
* Estimate how many reducers should be used. Called for each FlowStep before
* it is scheduled. Custom reducer estimators should override this rather than
* apply() directly.
*
* @param info Holds information about the overall flow (.flow),
* previously-run steps (.predecessorSteps),
* and the current step (.step).
* @return Number of reducers recommended by the estimator, or None to keep the default.
*/
def estimateReducers(info: FlowStrategyInfo): Option[Int] = None
}
case class FallbackEstimator(first: ReducerEstimator, fallback: ReducerEstimator) extends ReducerEstimator {
override def estimateReducers(info: FlowStrategyInfo): Option[Int] =
first.estimateReducers(info) orElse fallback.estimateReducers(info)
}
object ReducerEstimatorStepStrategy extends FlowStepStrategy[JobConf] {
implicit val estimatorMonoid: Monoid[ReducerEstimator] = new Monoid[ReducerEstimator] {
override def zero: ReducerEstimator = new ReducerEstimator
override def plus(l: ReducerEstimator, r: ReducerEstimator): ReducerEstimator =
FallbackEstimator(l, r)
}
/**
* Make reducer estimate, possibly overriding explicitly-set numReducers,
* and save useful info (such as the default & estimate) in JobConf for
* later consumption.
*
* Called by Cascading at the start of each job step.
*/
final override def apply(flow: Flow[JobConf],
preds: JList[FlowStep[JobConf]],
step: FlowStep[JobConf]): Unit = {
val conf = step.getConfig
val flowNumReducers = flow.getConfig.get(Config.HadoopNumReducers)
val stepNumReducers = conf.get(Config.HadoopNumReducers)
// assuming that if the step's reducers is different than the default for the flow,
// it was probably set by `withReducers` explicitly. This isn't necessarily true --
// Cascading may have changed it for its own reasons.
// TODO: disambiguate this by setting something in JobConf when `withReducers` is called
// (will be addressed by https://github.com/twitter/scalding/pull/973)
val setExplicitly = flowNumReducers != stepNumReducers
// log in JobConf what was explicitly set by 'withReducers'
if (setExplicitly) conf.set(EstimatorConfig.originalNumReducers, stepNumReducers)
// whether we should override explicitly-specified numReducers
val overrideExplicit = conf.getBoolean(Config.ReducerEstimatorOverride, false)
Option(conf.get(Config.ReducerEstimators)).map { clsNames =>
val clsLoader = Thread.currentThread.getContextClassLoader
val estimators = StringUtility.fastSplit(clsNames, ",")
.map(clsLoader.loadClass(_).newInstance.asInstanceOf[ReducerEstimator])
val combinedEstimator = Monoid.sum(estimators)
// try to make estimate
val info = FlowStrategyInfo(flow, preds.asScala, step)
// if still None, make it '-1' to make it simpler to log
val numReducers = combinedEstimator.estimateReducers(info)
// save the estimate in the JobConf which should be saved by hRaven
conf.setInt(EstimatorConfig.estimatedNumReducers, numReducers.getOrElse(-1))
// set number of reducers
if (!setExplicitly || overrideExplicit) {
numReducers.foreach(conf.setNumReduceTasks)
}
}
}
}
/**
* Info about a prior FlowStep, provided by implementers of HistoryService
*/
sealed trait FlowStepHistory {
/** Size of input to mappers (in bytes) */
def mapperBytes: Long
/** Size of input to reducers (in bytes) */
def reducerBytes: Long
}
object FlowStepHistory {
def apply(m: Long, r: Long) = new FlowStepHistory {
override def mapperBytes: Long = m
override def reducerBytes: Long = r
}
}
/**
* Provider of information about prior runs.
*/
trait HistoryService {
/**
* Retrieve history for matching FlowSteps, up to `max`
*/
def fetchHistory(f: FlowStep[JobConf], max: Int): Try[Seq[FlowStepHistory]]
}
| nvoron23/scalding | scalding-core/src/main/scala/com/twitter/scalding/reducer_estimation/Common.scala | Scala | apache-2.0 | 4,973 |
import scala.concurrent.stm._
object HelloWorld {
def main(args: Array[String]) {
val x = Ref("hello world!")
println(x.single())
}
}
| djspiewak/scala-stm | dep_tests/sbt/src/main/scala/HelloWorld.scala | Scala | bsd-3-clause | 147 |
package org.sisioh.aws4s.sqs.model
import com.amazonaws.services.sqs.model._
import org.sisioh.aws4s.PimpedType
import scala.collection.JavaConverters._
object DeleteMessageBatchRequestFactory {
def create(): DeleteMessageBatchRequest = new DeleteMessageBatchRequest()
def create(queueUrl: String): DeleteMessageBatchRequest =
new DeleteMessageBatchRequest(queueUrl)
}
class RichDeleteMessageBatchRequest(val underlying: DeleteMessageBatchRequest)
extends AnyVal
with PimpedType[DeleteMessageBatchRequest] {
def queueUrlOpt: Option[String] = Option(underlying.getQueueUrl)
def queueUrlOpt_=(value: Option[String]): Unit =
underlying.setQueueUrl(value.orNull)
def withQueueUrlOpt(value: Option[String]): DeleteMessageBatchRequest =
underlying.withQueueUrl(value.orNull)
// ---
def entries: Seq[DeleteMessageBatchRequestEntry] =
underlying.getEntries.asScala.toVector
def entries_=(value: Seq[DeleteMessageBatchRequestEntry]): Unit =
underlying.setEntries(value.asJava)
def withEntries(value: Seq[DeleteMessageBatchRequestEntry]): DeleteMessageBatchRequest =
underlying.withEntries(value.asJava)
}
| sisioh/aws4s | aws4s-sqs/src/main/scala/org/sisioh/aws4s/sqs/model/RichDeleteMessageBatchRequest.scala | Scala | mit | 1,159 |
package example
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* This class implements a ScalaTest test suite for the methods in object
* `Lists` that need to be implemented as part of this assignment. A test
* suite is simply a collection of individual tests for some specific
* component of a program.
*
* A test suite is created by defining a class which extends the type
* `org.scalatest.FunSuite`. When running ScalaTest, it will automatically
* find this class and execute all of its tests.
*
* Adding the `@RunWith` annotation enables the test suite to be executed
* inside eclipse using the built-in JUnit test runner.
*
* You have two options for running this test suite:
*
* - Start the sbt console and run the "test" command
* - Right-click this file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class ListsSuite extends FunSuite {
/**
* Tests are written using the `test` operator which takes two arguments:
*
* - A description of the test. This description has to be unique, no two
* tests can have the same description.
* - The test body, a piece of Scala code that implements the test
*
* The most common way to implement a test body is using the method `assert`
* which tests that its argument evaluates to `true`. So one of the simplest
* successful tests is the following:
*/
test("one plus one is two")(assert(1 + 1 == 2))
/**
* In Scala, it is allowed to pass an argument to a method using the block
* syntax, i.e. `{ argument }` instead of parentheses `(argument)`.
*
* This allows tests to be written in a more readable manner:
*/
test("one plus one is three?") {
assert(1 + 1 != 3) // This assertion fails! Go ahead and fix it.
}
/**
* One problem with the previous (failing) test is that ScalaTest will
* only tell you that a test failed, but it will not tell you what was
* the reason for the failure. The output looks like this:
*
* {{{
* [info] - one plus one is three? *** FAILED ***
* }}}
*
* This situation can be improved by using a special equality operator
* `===` instead of `==` (this is only possible in ScalaTest). So if you
* run the next test, ScalaTest will show the following output:
*
* {{{
* [info] - details why one plus one is not three *** FAILED ***
* [info] 2 did not equal 3 (ListsSuite.scala:67)
* }}}
*
* We recommend to always use the `===` equality operator when writing tests.
*/
test("details why one plus one is not three") {
assert(1 + 1 === 2) // Fix me, please!
}
/**
* In order to test the exceptional behavior of a methods, ScalaTest offers
* the `intercept` operation.
*
* In the following example, we test the fact that the method `intNotZero`
* throws an `IllegalArgumentException` if its argument is `0`.
*/
test("intNotZero throws an exception if its argument is 0") {
intercept[IllegalArgumentException] {
intNotZero(0)
}
}
def intNotZero(x: Int): Int = {
if (x == 0) throw new IllegalArgumentException("zero is not allowed")
else x
}
/**
* Now we finally write some tests for the list functions that have to be
* implemented for this assignment. We fist import all members of the
* `List` object.
*/
import Lists._
/**
* We only provide two very basic tests for you. Write more tests to make
* sure your `sum` and `max` methods work as expected.
*
* In particular, write tests for corner cases: negative numbers, zeros,
* empty lists, lists with repeated elements, etc.
*
* It is allowed to have multiple `assert` statements inside one test,
* however it is recommended to write an individual `test` statement for
* every tested aspect of a method.
*/
test("sum of a few numbers") {
assert(sum(List(1,2,0)) === 3)
}
test("max of a few numbers") {
assert(max(List(3, 7, 2)) === 7)
}
test("sum of negtive numbers") {
assert(sum(List(-1,-1,-2)) === -4)
}
test("sum of zeros") {
assert(sum(List(0,0,0)) == 0)
}
test("sum of empty list") {
assert(sum(List()) == 0)
}
test("max of empty list") {
intercept[NoSuchElementException] {
max(List())
}
}
test("max of repeat numbers"){
assert(max(List(-1,-1,-1)) == -1)
}
test("max of negtive numbers"){
assert(max(List(-1,-2,-3)) == -1)
}
}
| RominYue/sourceCode | Functional-Programming-Principles-in-Scala/week0/example/src/test/scala/example/ListsSuite.scala | Scala | gpl-3.0 | 4,551 |
// Copyright (C) 2020 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.scenarios
import java.io.{File, IOException}
import com.github.barbasa.gatling.git.GitRequestSession
import com.github.barbasa.gatling.git.protocol.GitProtocol
import com.github.barbasa.gatling.git.request.builder.GitRequestBuilder
import io.gatling.core.Predef._
import org.apache.commons.io.FileUtils
import org.eclipse.jgit.hooks.CommitMsgHook
class GitSimulation extends GerritSimulation {
implicit val postMessageHook: Option[String] = Some(s"hooks/${CommitMsgHook.NAME}")
protected val gitRequest = new GitRequestBuilder(GitRequestSession("${cmd}", "${url}"))
protected val gitProtocol: GitProtocol = GitProtocol()
after {
Thread.sleep(5000)
val path = conf.tmpBasePath
try {
FileUtils.deleteDirectory(new File(path))
} catch {
case e: IOException =>
System.err.println("Unable to delete temporary directory " + path)
e.printStackTrace()
}
}
}
| WANdisco/gerrit | e2e-tests/src/test/scala/com/google/gerrit/scenarios/GitSimulation.scala | Scala | apache-2.0 | 1,548 |
/*
* Copyright 2007-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package actor
import java.util.concurrent._
/**
* Rules for dealing with thread pools, both in lift-actor and
* in lift-util
*/
object ThreadPoolRules {
/**
* When threads are created in the thread factories, should
* they null the context class loader. By default false,
* but it you set it to true, Tomcat complains less about stuff.
* Must be set in the first line of Boot.scala
*/
@volatile var nullContextClassLoader: Boolean = false
}
/**
* LAPinger is for scheduling LiftActors to be pinged with an arbitrary message at some point
* in the future.
*/
object LAPinger {
/**The underlying <code>java.util.concurrent.ScheduledExecutor</code> */
private var service = Executors.newSingleThreadScheduledExecutor(TF)
/**
* Re-create the underlying <code>SingleThreadScheduledExecutor</code>
*/
def restart: Unit = synchronized {
if ((service eq null) || service.isShutdown)
service = Executors.newSingleThreadScheduledExecutor(TF)
}
/**
* Shut down the underlying <code>SingleThreadScheduledExecutor</code>
*/
def shutdown: Unit = synchronized {
service.shutdown
}
/**
* Schedules the sending of a message to occur after the specified delay.
*
* @param to The LiftActor to send the message to.
* @param msg The message to send.
* @param delay The number of milliseconds to delay before sending msg
* @return a <code>ScheduledFuture</code> which sends the <code>msg</code> to
* the <code>to<code> Actor after the specified TimeSpan <code>delay</code>.
*/
def schedule[T](to: SpecializedLiftActor[T], msg: T, delay: Long): ScheduledFuture[Unit] = {
val r = new Callable[Unit] {
def call: Unit = {
to ! msg
}
}
try {
service.schedule(r, delay, TimeUnit.MILLISECONDS)
} catch {
case e: RejectedExecutionException => throw PingerException(msg + " could not be scheduled on " + to, e)
}
}
}
/**
* Exception thrown if a ping can't be scheduled.
*/
case class PingerException(msg: String, e: Throwable) extends RuntimeException(msg, e)
private object TF extends ThreadFactory {
val threadFactory = Executors.defaultThreadFactory()
def newThread(r: Runnable): Thread = {
val d: Thread = threadFactory.newThread(r)
d setName "ActorPinger"
d setDaemon true
if (ThreadPoolRules.nullContextClassLoader) {
d setContextClassLoader null
}
d
}
}
| lift/framework | core/actor/src/main/scala/net/liftweb/actor/LAPinger.scala | Scala | apache-2.0 | 3,066 |
package org.hqjpa.generator
import java.util.regex.Pattern
import java.util.regex.Matcher
/**
* Companion object for related class.<br/>
* <br/>
* Static members are thread safe, intance members are not.
*/
object JpaEntityMetadataParser {
/** Model of JPA entity meta-data. */
class Model {
/** Package line. */
var packageLine : String = "";
/** Import statements. */
var imports : Seq[String] = Vector();
/** JPA entity meta-data class. */
val clasz : ClassModel = new ClassModel();
}
/**
* Model of JPA entity meta-data class.
*/
class ClassModel {
/** Name of meta-data class. */
var className : String = "";
/** Name of related entity. */
var entityName : String = "";
/** Annotations. */
var annotations : Seq[String] = Vector();
/** Fields. */
var fields : Seq[FieldModel] = Vector();
}
/**
* Model of JPA entity meta-data class field.
*/
class FieldModel {
/** Modifiers. */
var modifiers : String = "";
/** Name of base type. */
var tyype : String = "";
/** Names of type arguments. */
var typeArgs : Seq[String] = Vector();
/** Field name. */
var name : String = "";
}
}
/**
* Parser for JPA entity meta-data classes.<br/>
* <br/>
* Static members are thread safe, instance members are not.
*
* @param fileText Text of the file containing the JPA entity meta-data class.
*/
class JpaEntityMetadataParser(val fileText : String) {
import JpaEntityMetadataParser._
def run() : Model = {
val model = new Model();
//make modifiable copy of given file text
var text = fileText;
//extract and subtract package name
{
val regex = Pattern.compile(""";""");
model.packageLine = extractIncluding(regex, text).get.trim();
text = subtractIncluding(regex, text)
}
//extract and subtract import statements
{
val regex = Pattern.compile("""import[^;]+;""");
//extract import statements one by one
var tryAgain = true;
while( tryAgain ) {
extractIncluding(regex, text) match {
//success, register statement, subtract from source text, try again
case Some(imporrt) => {
model.imports :+= imporrt.trim();
text = subtractIncluding(regex, text);
}
//failure, stop trying
case None => tryAgain = false;
}
}
}
//extract and subtract class annotations
{
val regex = Pattern.compile("""@[^\\(]+\\([^\\)]+\\)""");
//extract class annotations one by one
var tryAgain = true;
while( tryAgain ) {
extractIncluding(regex, text) match {
//success, register annotation, subtract from source text, try again
case Some(annotation) => {
model.clasz.annotations :+= annotation.trim();
text = subtractIncluding(regex, text);
}
//failure, stop trying
case None => tryAgain = false;
}
}
}
//extract and subtract class name
{
val regex = Pattern.compile("""public abstract class ([^\\{]*) \\{""");
//find class name, subtract the line from text
val matcher = extractNext(regex, text).get;
text = subtractIncluding(regex, text);
//extract class name and related entity class name
model.clasz.className = matcher.group(1);
model.clasz.entityName = model.clasz.className.dropRight(1);
}
//extract fields
{
val regex = Pattern.compile("""(public static volatile) ([^<]+)<([^,]+),([^>]+)> ([^;]+);""");
//split text into lines (up to one field per line)
val lines = text.split("\\n").map { line => line.trim() };
//run through lines
lines.foreach { line =>
//ignore empty lines and closing brace of the class
if( line != "" && line != "}" ) {
val matcher = extractNext(regex, line).get;
//extract field model
val field = new FieldModel();
field.modifiers = matcher.group(1).trim();
field.tyype = matcher.group(2).trim();
field.typeArgs = Vector(matcher.group(3).trim(), matcher.group(4).trim());
field.name = matcher.group(5).trim();
//add field to class
model.clasz.fields :+= field;
}
}
}
//
return model;
}
/**
* Find first match of given regular expression in given string and subtracts from given
* string everything until the end of regular expression match, inclusive.
* @param regex Regular expression to match.
* @param src String to match against.
* @return Resulting string or original string of regular expression did not match.
*/
private def subtractIncluding(regex : Pattern, src : String) : String = {
val matcher = regex.matcher(src);
//search for regular expression
if( matcher.find() ) {
//throw everything until the end of expression match from given string
val subtractTo = matcher.end + 1;
val newSrc = src.slice(subtractTo, src.length());
//
return newSrc;
}
//regular expression did not match, return given string unmodified
return src;
}
/**
* Matches given expression against given string.
* @param regex Regular expression to match.
* @param src String to match against.
* @return Some matcher on success, None on mismatch.
*/
private def extractNext(regex : Pattern, src : String) : Option[Matcher] = {
val matcher = regex.matcher(src);
if( matcher.find() ) {
return Some(matcher);
}
return None;
}
/**
* Matches given expression against given string. Extracts substring from
* the start of given string until the end of the match, inclusive.
* @param regex Regular expression to match.
* @param src String to match against.
* @return Some string extracted or None on mismatch.
*/
private def extractIncluding(regex : Pattern, src : String) : Option[String] = {
val matcher = regex.matcher(src);
//search for regular expression
if( matcher.find() ) {
//take everything until the end of expression match from given string
val extractUntil = matcher.end + 1;
val result = src.slice(0, extractUntil);
//
return Some(result);
}
//regular expression did not match, return given string unmodified
return None;
}
} | vejobrolis/hqjpa | hqjpa/src/org/hqjpa/generator/JpaEntityMetadataParser.scala | Scala | lgpl-3.0 | 6,084 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.typeutils
import org.apache.flink.api.common.typeinfo.BasicTypeInfo._
import org.apache.flink.api.common.typeinfo._
import org.apache.flink.api.java.typeutils.{MapTypeInfo, ObjectArrayTypeInfo}
import org.apache.flink.table.planner.validate._
import org.apache.flink.table.runtime.typeutils.{BigDecimalTypeInfo, DecimalDataTypeInfo}
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
object TypeInfoCheckUtils {
def isNumeric(dataType: TypeInformation[_]): Boolean = dataType match {
case _: NumericTypeInfo[_] => true
case BIG_DEC_TYPE_INFO | _: BigDecimalTypeInfo | _: DecimalDataTypeInfo => true
case _ => false
}
def isTemporal(dataType: TypeInformation[_]): Boolean =
isTimePoint(dataType) || isTimeInterval(dataType)
def isTimePoint(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[SqlTimeTypeInfo[_]] || dataType.isInstanceOf[LocalTimeTypeInfo[_]]
def isTimeInterval(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[TimeIntervalTypeInfo[_]]
def isArray(dataType: TypeInformation[_]): Boolean = dataType match {
case _: ObjectArrayTypeInfo[_, _] |
_: BasicArrayTypeInfo[_, _] |
_: PrimitiveArrayTypeInfo[_] => true
case _ => false
}
def isMap(dataType: TypeInformation[_]): Boolean =
dataType.isInstanceOf[MapTypeInfo[_, _]]
def assertNumericExpr(
dataType: TypeInformation[_],
caller: String)
: ValidationResult = dataType match {
case _: NumericTypeInfo[_] =>
ValidationSuccess
case BIG_DEC_TYPE_INFO | _: BigDecimalTypeInfo | _: DecimalDataTypeInfo =>
ValidationSuccess
case _ =>
ValidationFailure(s"$caller requires numeric types, get $dataType here")
}
def assertOrderableExpr(dataType: TypeInformation[_], caller: String): ValidationResult = {
if (dataType.isSortKeyType) {
ValidationSuccess
} else {
ValidationFailure(s"$caller requires orderable types, get $dataType here")
}
}
/**
* Checks if one class can be assigned to a variable of another class.
*
* Adopted from o.a.commons.lang.ClassUtils#isAssignable(java.lang.Class[], java.lang.Class[])
* but without null checks.
*/
def isAssignable(classArray: Array[Class[_]], toClassArray: Array[Class[_]]): Boolean = {
if (classArray.length != toClassArray.length) {
return false
}
var i = 0
while (i < classArray.length) {
if (!isAssignable(classArray(i), toClassArray(i))) {
return false
}
i += 1
}
true
}
/**
* Checks if one class can be assigned to a variable of another class.
*
* Adopted from o.a.commons.lang.ClassUtils#isAssignable(java.lang.Class, java.lang.Class) but
* without null checks.
*/
def isAssignable(cls: Class[_], toClass: Class[_]): Boolean = {
if (cls.equals(toClass)) {
return true
}
if (cls.isPrimitive) {
if (!toClass.isPrimitive) {
return false
}
if (java.lang.Integer.TYPE.equals(cls)) {
return java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Long.TYPE.equals(cls)) {
return java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Boolean.TYPE.equals(cls)) {
return false
}
if (java.lang.Double.TYPE.equals(cls)) {
return false
}
if (java.lang.Float.TYPE.equals(cls)) {
return java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Character.TYPE.equals(cls)) {
return java.lang.Integer.TYPE.equals(toClass) ||
java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Short.TYPE.equals(cls)) {
return java.lang.Integer.TYPE.equals(toClass) ||
java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
if (java.lang.Byte.TYPE.equals(cls)) {
return java.lang.Short.TYPE.equals(toClass) ||
java.lang.Integer.TYPE.equals(toClass) ||
java.lang.Long.TYPE.equals(toClass) ||
java.lang.Float.TYPE.equals(toClass) ||
java.lang.Double.TYPE.equals(toClass)
}
// should never get here
return false
}
toClass.isAssignableFrom(cls)
}
}
| lincoln-lil/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/typeutils/TypeInfoCheckUtils.scala | Scala | apache-2.0 | 5,397 |
// scalac: -Ypickle-java
class Test
| scala/scala | test/files/pos/java-import-static-from-subclass/Test.scala | Scala | apache-2.0 | 36 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s
import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, Pod, PodBuilder}
/**
* Bootstraps a driver or executor container or an init-container with needed secrets mounted.
*/
private[spark] class MountSecretsBootstrap(secretNamesToMountPaths: Map[String, String]) {
/**
* Add new secret volumes for the secrets specified in secretNamesToMountPaths into the given pod.
*
* @param pod the pod into which the secret volumes are being added.
* @return the updated pod with the secret volumes added.
*/
def addSecretVolumes(pod: Pod): Pod = {
var podBuilder = new PodBuilder(pod)
secretNamesToMountPaths.keys.foreach { name =>
podBuilder = podBuilder
.editOrNewSpec()
.addNewVolume()
.withName(secretVolumeName(name))
.withNewSecret()
.withSecretName(name)
.endSecret()
.endVolume()
.endSpec()
}
podBuilder.build()
}
/**
* Mounts Kubernetes secret volumes of the secrets specified in secretNamesToMountPaths into the
* given container.
*
* @param container the container into which the secret volumes are being mounted.
* @return the updated container with the secrets mounted.
*/
def mountSecrets(container: Container): Container = {
var containerBuilder = new ContainerBuilder(container)
secretNamesToMountPaths.foreach { case (name, path) =>
containerBuilder = containerBuilder
.addNewVolumeMount()
.withName(secretVolumeName(name))
.withMountPath(path)
.endVolumeMount()
}
containerBuilder.build()
}
private def secretVolumeName(secretName: String): String = {
secretName + "-volume"
}
}
| ioana-delaney/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/MountSecretsBootstrap.scala | Scala | apache-2.0 | 2,572 |
package com.dslplatform.sbt
import sbt._
import Keys._
import com.dslplatform.compiler.client.parameters.{Settings, Targets, TempPath}
import sbt.Def.Initialize
import sbt.complete.Parsers
import sbt.plugins.JvmPlugin
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
object SbtDslPlatformPlugin extends AutoPlugin {
object autoImport {
val dslLibrary = inputKey[Seq[Any]]("Compile DSL into a compiled jar ready for usage.")
val dslSource = inputKey[Seq[File]]("Compile DSL into generated source ready for usage.")
val dslResource = inputKey[Seq[File]]("Scan code and create META-INF/services files for plugins.")
val dslMigrate = inputKey[Unit]("Create an SQL migration file based on difference from DSL in project and in the target database.")
val dslExecute = inputKey[Unit]("Execute custom DSL compiler command")
val dslLibraries = settingKey[Map[Targets.Option, File]]("Compile libraries to specified outputs)")
val dslSources = settingKey[Map[Targets.Option, File]]("Generate sources to specified folders)")
val dslCompiler = settingKey[String]("Path to custom dsl-compiler.exe or port to running instance (requires .NET/Mono)")
val dslServerMode = settingKey[Boolean]("Talk with DSL compiler in server mode (will be faster)")
val dslServerPort = settingKey[Option[Int]]("Use a specific port to talk with DSL compiler in server mode")
val dslPostgres = settingKey[String]("JDBC-like connection string to the Postgres database")
val dslOracle = settingKey[String]("JDBC-like connection string to the Oracle database")
val dslApplyMigration = settingKey[Boolean]("Apply SQL migration directly to the database")
val dslNamespace = settingKey[String]("Root namespace for target language")
val dslSettings = settingKey[Seq[Settings.Option]]("Additional compilation settings")
val dslDslPath = settingKey[Seq[File]]("Path to DSL folder(s)")
val dslResourcePath = settingKey[Option[File]]("Path to META-INF/services folder")
val dslDependencies = settingKey[Map[Targets.Option, File]]("Library compilation requires various dependencies. Customize default paths to dependencies")
val dslSqlPath = settingKey[File]("Output folder for SQL scripts")
val dslLatest = settingKey[Boolean]("Check for latest versions (dsl-compiler, libraries, etc...)")
val dslForce = settingKey[Boolean]("Force actions without prompt (destructive migrations, missing folders, etc...)")
val dslPlugins = settingKey[Option[File]]("Path to additional DSL plugins")
val dslDownload = settingKey[Option[String]]("Download URL for a custom DSL compiler")
}
import autoImport._
lazy val DslPlatform = config("dsl-platform") extend(Compile)
override def requires: Plugins = JvmPlugin
override def projectConfigurations: Seq[Configuration] = Seq(DslPlatform)
private def findTarget(logger: Logger, name: String): Targets.Option = {
Targets.Option.values().find(it => it.toString.equals(name)) match {
case Some(t) => t
case _ =>
logger.error(s"Unable to find target: $name")
logger.error("List of known targets: ")
Targets.Option.values() foreach { it => logger.error(it.toString) }
throw new RuntimeException(s"Unable to find target: $name")
}
}
private lazy val dslDefaultSettings = Seq(
dslLibraries := Map.empty,
dslSources := Map.empty,
dslCompiler := "",
dslServerMode := false,
dslServerPort := Some(55662),
dslPostgres := "",
dslOracle := "",
dslApplyMigration := false,
dslNamespace := "",
dslSettings := Nil,
dslDslPath := Seq(baseDirectory.value / "dsl"),
dslDependencies := Map.empty,
dslResourcePath := None,
dslSqlPath := baseDirectory.value / "sql",
dslLatest := true,
dslForce := false,
dslPlugins := Some(baseDirectory.value),
dslDownload := None
)
private lazy val dslTasks = Seq(
dslLibrary in Compile <<= dslLibraryInputTask(Compile),
dslLibrary in Test <<= dslLibraryInputTask(Test),
dslSource <<= dslSourceTask,
dslResource in Compile <<= dslResourceTask(Compile),
dslResource in Test <<= dslResourceTask(Test),
dslMigrate <<= dslMigrateTask,
dslExecute <<= dslExecuteTask
)
private lazy val dslCompilationSettings = inConfig(DslPlatform)(
Defaults.compileInputsSettings ++
Defaults.compileAnalysisSettings ++
Defaults.packageTaskSettings(packageBin, Defaults.packageBinMappings) ++
Seq(
sourceDirectories := Nil,
unmanagedSources := Nil,
sourceGenerators += dslSourcesForLibrary.taskValue,
resourceGenerators := Seq(dslResourceForLibrary.taskValue),
managedSources := Defaults.generate(sourceGenerators in DslPlatform).value,
managedResources := Defaults.generate(resourceGenerators in DslPlatform).value,
sources := (managedSources in DslPlatform).value,
resources := (managedResources in DslPlatform).value,
manipulateBytecode := compileIncremental.value,
compileIncremental := (Defaults.compileIncrementalTask tag (Tags.Compile, Tags.CPU)).value,
compileIncSetup := Defaults.compileIncSetupTask.value,
compile := Defaults.compileTask.value,
classDirectory := crossTarget.value / (configuration.value.name + "-classes"),
compileAnalysisFilename := (compileAnalysisFilename in Compile).value,
dependencyClasspath := Classpaths.concat(managedClasspath in Compile, unmanagedClasspath in Compile).value,
copyResources := Defaults.copyResourcesTask.value,
products := Classpaths.makeProducts.value,
packageOptions := dslPackageOptions.value,
artifactPath in packageBin := artifactPathSetting(artifact in packageBin in DslPlatform).value,
exportJars := true,
exportedProducts := Classpaths.exportProductsTask.value
)
) ++ Seq(
dependencyClasspath in Compile ++= (products in DslPlatform).value.classpath,
exportedProducts in Compile ++= (exportedProducts in DslPlatform).value,
unmanagedSourceDirectories in Compile ++= dslDslPath.value
)
// When running `compile` from the command line interface, SBT is calling dsl-platform:compile, apparently because it
// it takes the latest definition of the task if no config is provided (like when using `compile:compile`).
private lazy val commandLineOrderingWorkaroundSettings = Seq(
compile := (compile in Compile).value
)
override lazy val projectSettings = dslDefaultSettings ++ dslTasks ++ dslCompilationSettings ++ Seq(
onLoad := {
if (dslServerMode.value) {
Actions.setupServerMode(dslCompiler.value, None, dslDownload.value, dslServerPort.value)
}
onLoad.value
}
) ++ commandLineOrderingWorkaroundSettings
private def dslPackageOptions = Def.task { Seq(
Package.addSpecManifestAttributes(name.value, version.value, organizationName.value),
Package.addImplManifestAttributes(name.value, version.value, homepage.value, organization.value, organizationName.value)
)}
private def artifactPathSetting(art: SettingKey[Artifact]) =
(crossTarget, projectID, art, scalaVersion in artifactName, scalaBinaryVersion in artifactName, artifactName) {
(t, module, a, sv, sbv, toString) => {
val dslArtifact = a.copy(name = a.name + "-dsl", classifier = None)
t / Artifact.artifactName(ScalaVersion(sv, sbv), module, dslArtifact) asFile
}
}
private def dslLibraryInputTask(config: Configuration): Initialize[InputTask[Seq[Any]]] = Def.inputTaskDyn {
import sbt.complete.Parsers.spaceDelimited
val args = spaceDelimited("<arg>").parsed
dslLibraryTask(config, args)
}
private def dslLibraryTask(config: Configuration, args: Seq[String]): Initialize[Task[Seq[Any]]] = Def.taskDyn {
val log = streams.value.log
def compileLibrary(dslTarget: Targets.Option, targetPath: File, targetDeps: Option[File]): Initialize[Task[Any]] = {
if(dslTarget == Targets.Option.REVENJ_SCALA) {
Def.task {
val compiledLibrary = (packageBin in DslPlatform).value
if(targetPath.getName.toLowerCase.endsWith(".jar")) {
IO.copyFile(compiledLibrary, targetPath)
log.info(s"Generated library for target $dslTarget in $targetPath")
} else {
val targetFile = targetPath / compiledLibrary.getName
IO.copyFile(compiledLibrary, targetFile)
log.info(s"Generated library for target $dslTarget in directory $targetFile")
}
}
} else {
Def.task {
Actions.compileLibrary(
streams.value.log,
dslTarget,
targetPath,
dslDslPath.value,
dslPlugins.value,
dslCompiler.value,
dslServerMode.value,
dslDownload.value,
dslServerPort.value,
dslNamespace.value,
dslSettings.value,
targetDeps,
Classpaths.concat(managedClasspath in Compile, unmanagedClasspath in Compile).value,
dslLatest.value)
log.info(s"Generated library for target $dslTarget in $targetPath")
}
}
}
if (args.isEmpty) {
if (dslLibraries.value.isEmpty) throw new RuntimeException(
"""|dslLibraries is empty.
|Either define dslLibraries in build.sbt or provide target argument (eg. revenj.scala).
|Usage example: dslLibrary revenj.scala path_to_jar""".stripMargin)
val allTargets = dslLibraries.value collect { case (targetArg, targetOutput) =>
val targetDeps = dslDependencies.value.get(targetArg)
compileLibrary(targetArg, targetOutput, targetDeps)
}
joinTasks(allTargets.toSeq)
} else if (args.length > 2) {
throw new RuntimeException("Too many arguments. Usage example: dslLibrary revenj.scala path_to_jar")
} else {
val targetArg = findTarget(streams.value.log, args.head)
val predefinedOutput = dslLibraries.value.get(targetArg)
if (args.length == 1 && predefinedOutput.isEmpty) {
throw new RuntimeException(
s"""|dslLibraries does not contain definition for $targetArg.
|Either define it in dslLibraries or provide explicit output path.
|Example: dslLibrary revenj.scala path_to_jar""".stripMargin)
}
val targetOutput = if (args.length == 2) new File(args.last) else predefinedOutput.get
val targetDeps = dslDependencies.value.get(targetArg)
joinTasks(Seq(compileLibrary(targetArg, targetOutput, targetDeps)))
}
}
def joinTasks(tasks: Seq[sbt.Def.Initialize[Task[Any]]]): sbt.Def.Initialize[Task[Seq[Any]]] = {
tasks.joinWith(_.join)
}
private def dslTempFolder = Def.setting {
target.value / "dsl-temp"
}
private def createCompilerSettingsFingerprint: Initialize[Task[File]] = Def.task {
def parsePort(in: String): Option[Int] = Try(Integer.parseInt(in)).filter(_ > 0).toOption
val fallBackCompiler = {
val workingDirectoryCompiler = new File("dsl-compiler.exe")
if(workingDirectoryCompiler.exists()) {
workingDirectoryCompiler
} else {
val logger = streams.value.log
val tempPath = TempPath.getTempRootPath(new DslContext(Some(logger)))
new File(tempPath, "dsl-compiler.exe")
}
}
val file = parsePort(dslCompiler.value)
.map(_ => fallBackCompiler)
.getOrElse(
if(dslCompiler.value.isEmpty) {
fallBackCompiler
} else {
val customCompilerPath = new File(dslCompiler.value)
if(!customCompilerPath.exists()) {
throw new RuntimeException(s"Unable to find the specified dslCompiler path: ${customCompilerPath.getAbsolutePath}")
}
customCompilerPath
}
)
val fingerprintFile = dslTempFolder.value / "dsl-fingerprint.txt"
val settings = {
dslSettings.value.map(_.name).sorted.mkString("\\n") + "\\n" +
(if (file.exists()) file.lastModified.toString else "") + "\\n" +
dslCompiler + "\\n" +
dslDownload.value.getOrElse("")
}
IO.write(fingerprintFile, settings)
fingerprintFile
}
private def dslSourcesForLibrary = Def.task {
def generateSource(inChanges: ChangeReport[File], outChanges: ChangeReport[File]): Set[File] = {
val buffer = new ArrayBuffer[File]()
buffer ++= Actions.generateSource(
streams.value.log,
Targets.Option.REVENJ_SCALA,
dslTempFolder.value / Targets.Option.REVENJ_SCALA.name(),
dslDslPath.value,
dslPlugins.value,
dslCompiler.value,
dslServerMode.value,
dslDownload.value,
dslServerPort.value,
dslNamespace.value,
dslSettings.value,
dslLatest.value)
buffer.toSet
}
val allDslFiles = (dslDslPath.value ** "*.dsl").get :+ createCompilerSettingsFingerprint.value
val dslSourceCache = target.value / "dsl-source-cache"
val cachedGenerator = FileFunction.cached(dslSourceCache)(inStyle = FilesInfo.hash, outStyle = FilesInfo.hash)(generateSource)
cachedGenerator(allDslFiles.toSet).toSeq
}
private def dslResourceForLibrary = Def.task {
val file = resourceManaged.value / "META-INF" / "services" / "net.revenj.extensibility.SystemAspect"
IO.write(file, if (dslNamespace.value.isEmpty) "Boot" else dslNamespace.value + ".Boot", charset = IO.utf8)
Seq(file)
}
private def dslSourceTask = Def.inputTask {
val args = Parsers.spaceDelimited("<arg>").parsed
def generate(dslTarget: Targets.Option, targetPath: File): Seq[File] = {
Actions.generateSource(
streams.value.log,
dslTarget,
targetPath,
dslDslPath.value,
dslPlugins.value,
dslCompiler.value,
dslServerMode.value,
dslDownload.value,
dslServerPort.value,
dslNamespace.value,
dslSettings.value,
dslLatest.value)
}
val buffer = new ArrayBuffer[File]()
if (args.isEmpty) {
if (dslSources.value.isEmpty) throw new RuntimeException(
"""|dslSources is empty.
|Either define dslSources in build.sbt or provide target argument (eg. revenj.scala).
|Usage example: dslSource revenj.scala path_to_folder""".stripMargin)
dslSources.value foreach { case (targetArg, targetOutput) =>
buffer ++= generate(targetArg, targetOutput)
}
} else if (args.length > 2) {
throw new RuntimeException("Too many arguments. Usage example: dslSource revenj.scala path_to_target_source_folder")
} else {
val targetArg = findTarget(streams.value.log, args.head)
val predefinedOutput = dslSources.value.get(targetArg)
if (args.length == 1 && predefinedOutput.isEmpty) {
throw new RuntimeException(
s"""|dslSources does not contain definition for $targetArg.
|Either define it in dslSources or provide explicit output path.
|Example: dslLibrary revenj.scala path_to_folder""".stripMargin)
}
val targetOutput = if (args.length == 2) new File(args.last) else predefinedOutput.get
buffer ++= generate(targetArg, targetOutput)
}
buffer.toSeq
}
private def dslResourceTask(config: Configuration) = Def.inputTask {
val args = Parsers.spaceDelimited("<arg>").parsed
def generate(dslTarget: Targets.Option, targetPath: Option[File]): Seq[File] = {
streams.value.log(s"creating resources in $config")
Actions.generateResources(
streams.value.log,
dslTarget,
targetPath.getOrElse((resourceDirectory in config).value / "META-INF" / "services"),
Seq((target in config).value),
(dependencyClasspath in config).value)
}
val buffer = new ArrayBuffer[File]()
if (args.isEmpty) {
if (dslSources.value.isEmpty && dslLibraries.value.isEmpty) throw new RuntimeException(
"""|Both dslSources and dslLibraries is empty.
|Either define dslSources/dslLibraries in build.sbt or provide target argument (eg. revenj.scala).
|Usage example: dslResource revenj.scala""".stripMargin)
(dslSources.value.keys ++ dslLibraries.value.keys).toSet[Targets.Option] foreach { target =>
buffer ++= generate(target, dslResourcePath.value)
}
} else if (args.length > 2) {
throw new RuntimeException("Too many arguments. Usage example: dslResource revenj.scala path_to_meta_inf_services_folder")
} else {
val targetArg = findTarget(streams.value.log, args.head)
if (args.length == 1 &&
dslResourcePath.value.isEmpty &&
targetArg != Targets.Option.REVENJ_SCALA && targetArg != Targets.Option.REVENJ_SCALA_POSTGRES &&
targetArg != Targets.Option.REVENJ_JAVA && targetArg != Targets.Option.REVENJ_JAVA_POSTGRES &&
targetArg != Targets.Option.REVENJ_SPRING) {
throw new RuntimeException(
s"""Missing path argument for dslResource on $targetArg.
|Only several targets use default META-INF/services path.
|Others need to provide an explicit path.
|Example: dslResource revenj.scala path_to_folder""".stripMargin)
}
val targetOutput = if (args.length == 2) Some(new File(args.last)) else dslResourcePath.value
buffer ++= generate(targetArg, targetOutput)
}
buffer.toSeq
}
private def dslMigrateTask = Def.inputTask {
def migrate(pg: Boolean, jdbc: String): Unit = {
Actions.dbMigration(
streams.value.log,
jdbc,
pg,
dslSqlPath.value,
dslDslPath.value,
dslPlugins.value,
dslCompiler.value,
dslServerMode.value,
dslDownload.value,
dslServerPort.value,
dslApplyMigration.value,
dslForce.value,
dslLatest.value)
}
if (dslPostgres.value.nonEmpty) {
migrate(pg = true, dslPostgres.value)
}
if (dslOracle.value.nonEmpty) {
migrate(pg = false, dslOracle.value)
} else if (dslPostgres.value.isEmpty) {
streams.value.log.error("Jdbc connection string not defined for Postgres or Oracle")
}
}
private def dslExecuteTask = Def.inputTask {
val args = Parsers.spaceDelimited("<arg>").parsed
Actions.execute(
streams.value.log,
dslDslPath.value,
dslPlugins.value,
dslCompiler.value,
dslServerMode.value,
dslDownload.value,
dslServerPort.value,
args)
}
}
| hperadin/dsl-compiler-client | SbtPlugin/src/main/scala/com/dslplatform/sbt/SbtDslPlatformPlugin.scala | Scala | bsd-3-clause | 18,903 |
package com.example.http4s.blaze.demo.server.endpoints
import cats.effect.{Async, Timer}
import cats.implicits._
import java.util.concurrent.TimeUnit
import org.http4s.{ApiVersion => _, _}
import org.http4s.dsl.Http4sDsl
import scala.concurrent.duration.FiniteDuration
import scala.util.Random
class TimeoutHttpEndpoint[F[_]](implicit F: Async[F], timer: Timer[F]) extends Http4sDsl[F] {
val service: HttpRoutes[F] = HttpRoutes.of {
case GET -> Root / ApiVersion / "timeout" =>
val randomDuration = FiniteDuration(Random.nextInt(3) * 1000L, TimeUnit.MILLISECONDS)
timer.sleep(randomDuration) *> Ok("delayed response")
}
}
| aeons/http4s | examples/blaze/src/main/scala/com/example/http4s/blaze/demo/server/endpoints/TimeoutHttpEndpoint.scala | Scala | apache-2.0 | 646 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.secondaryindex
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class TestIndexModelWithUnsafeColumnPage extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
drop()
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE, "true")
sql("drop table if exists testSecondryIndex")
sql("create table testSecondryIndex( a string,b string,c string) STORED AS carbondata")
sql("insert into testSecondryIndex select 'babu','a','6'")
sql("create index testSecondryIndex_IndexTable on table testSecondryIndex(b) AS 'carbondata'")
}
test("Test secondry index data count") {
checkAnswer(sql("select count(*) from testSecondryIndex_IndexTable"), Seq(Row(1)))
}
override def afterAll {
drop()
}
private def drop(): Unit = {
sql("drop index if exists testSecondryIndex_IndexTable on testSecondryIndex")
sql("drop table if exists testSecondryIndex")
}
}
| zzcclp/carbondata | index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestIndexModelWithUnsafeColumnPage.scala | Scala | apache-2.0 | 1,996 |
class Foo {
def h: Int = 20
final def f: Int = h
final def init: Int = f
}
class Bar extends Foo {
private var m = 10
override def h: Int = m
}
class Qux extends Bar {
init
override def h = a
private val a = 30 // error
} | som-snytt/dotty | tests/init/neg/override36.scala | Scala | apache-2.0 | 242 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.platform.opencl
import cogx.compiler.parser.op.RestoredOpcode
import cogx.platform.checkpoint.ObjectRestorer
import cogx.platform.types.{AbstractKernel, VirtualFieldRegister, FieldType}
import cogx.runtime.ComputeGraphRestorerState
/**
* @author Dick Carter
*/
private[cogx]
class OpenCLRestoredDeviceKernel(opcode: RestoredOpcode,
val kernelCode: String,
val simpleKernelName: String,
val workGroup: WorkGroupParameters,
in: Array[VirtualFieldRegister],
resultTypes: Array[FieldType]) extends OpenCLDeviceKernel(opcode, in, resultTypes) {
/** Useful name for debugging. */
override def toString: String = {
simpleKernelName + "_" + id + "() => " + fieldTypesString +
" '" + name + "'" + ", inputs = " + inputFieldTypesString
}
/** Create a clone of this kernel that uses a new set of kernels as inputs.
* Useful for breaking a large circuit apart into smaller subcircuits. */
def copyWithNewInputs(inputs: Array[VirtualFieldRegister]): AbstractKernel = {
new OpenCLRestoredDeviceKernel(opcode, kernelCode, simpleKernelName, workGroup, inputs, resultTypes)
}
}
object OpenCLRestoredDeviceKernel {
/** Create a FieldType instance through use of the provided ObjectRestorer
* @param restorer The restorer through which to read the new object state.
* @return The created FieldType based on the read information.
*/
def restore(restorer: ObjectRestorer, kernelId: Int, inputRegisters: Array[VirtualFieldRegister],
resultTypes: Array[FieldType]): OpenCLRestoredDeviceKernel = {
val kernelCodes = restorer.asInstanceOf[ComputeGraphRestorerState].kernelCodes
val workGroupParameters = restorer.readRestorable("workGroupParameters",
WorkGroupParameters).asInstanceOf[WorkGroupParameters]
val kernelCodeIndex = restorer.readInt("kernelCodeIndex")
val kernelCode = kernelCodes(kernelCodeIndex).codeAsRun
val simpleKernelName = kernelCodes(kernelCodeIndex).nameWithIdRemoved
new OpenCLRestoredDeviceKernel(RestoredOpcode(kernelId.toString), kernelCode, simpleKernelName,
workGroupParameters, inputRegisters, resultTypes)
}
} | hpe-cct/cct-core | src/main/scala/cogx/platform/opencl/OpenCLRestoredDeviceKernel.scala | Scala | apache-2.0 | 2,921 |